repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
OMFGBKANG/nk2 | arch/mips/math-emu/ieee754dp.c | 895 | 5403 | /* IEEE754 floating point arithmetic
* double precision: common utilities
*/
/*
* MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd.
* http://www.algor.co.uk
*
* ########################################################################
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* ########################################################################
*/
#include "ieee754dp.h"
int ieee754dp_class(ieee754dp x)
{
COMPXDP;
EXPLODEXDP;
return xc;
}
int ieee754dp_isnan(ieee754dp x)
{
return ieee754dp_class(x) >= IEEE754_CLASS_SNAN;
}
int ieee754dp_issnan(ieee754dp x)
{
assert(ieee754dp_isnan(x));
return ((DPMANT(x) & DP_MBIT(DP_MBITS-1)) == DP_MBIT(DP_MBITS-1));
}
ieee754dp ieee754dp_xcpt(ieee754dp r, const char *op, ...)
{
struct ieee754xctx ax;
if (!TSTX())
return r;
ax.op = op;
ax.rt = IEEE754_RT_DP;
ax.rv.dp = r;
va_start(ax.ap, op);
ieee754_xcpt(&ax);
va_end(ax.ap);
return ax.rv.dp;
}
ieee754dp ieee754dp_nanxcpt(ieee754dp r, const char *op, ...)
{
struct ieee754xctx ax;
assert(ieee754dp_isnan(r));
if (!ieee754dp_issnan(r)) /* QNAN does not cause invalid op !! */
return r;
if (!SETANDTESTCX(IEEE754_INVALID_OPERATION)) {
/* not enabled convert to a quiet NaN */
DPMANT(r) &= (~DP_MBIT(DP_MBITS-1));
if (ieee754dp_isnan(r))
return r;
else
return ieee754dp_indef();
}
ax.op = op;
ax.rt = 0;
ax.rv.dp = r;
va_start(ax.ap, op);
ieee754_xcpt(&ax);
va_end(ax.ap);
return ax.rv.dp;
}
ieee754dp ieee754dp_bestnan(ieee754dp x, ieee754dp y)
{
assert(ieee754dp_isnan(x));
assert(ieee754dp_isnan(y));
if (DPMANT(x) > DPMANT(y))
return x;
else
return y;
}
static u64 get_rounding(int sn, u64 xm)
{
/* inexact must round of 3 bits
*/
if (xm & (DP_MBIT(3) - 1)) {
switch (ieee754_csr.rm) {
case IEEE754_RZ:
break;
case IEEE754_RN:
xm += 0x3 + ((xm >> 3) & 1);
/* xm += (xm&0x8)?0x4:0x3 */
break;
case IEEE754_RU: /* toward +Infinity */
if (!sn) /* ?? */
xm += 0x8;
break;
case IEEE754_RD: /* toward -Infinity */
if (sn) /* ?? */
xm += 0x8;
break;
}
}
return xm;
}
/* generate a normal/denormal number with over,under handling
* sn is sign
* xe is an unbiased exponent
* xm is 3bit extended precision value.
*/
ieee754dp ieee754dp_format(int sn, int xe, u64 xm)
{
assert(xm); /* we don't gen exact zeros (probably should) */
assert((xm >> (DP_MBITS + 1 + 3)) == 0); /* no execess */
assert(xm & (DP_HIDDEN_BIT << 3));
if (xe < DP_EMIN) {
/* strip lower bits */
int es = DP_EMIN - xe;
if (ieee754_csr.nod) {
SETCX(IEEE754_UNDERFLOW);
SETCX(IEEE754_INEXACT);
switch(ieee754_csr.rm) {
case IEEE754_RN:
case IEEE754_RZ:
return ieee754dp_zero(sn);
case IEEE754_RU: /* toward +Infinity */
if(sn == 0)
return ieee754dp_min(0);
else
return ieee754dp_zero(1);
case IEEE754_RD: /* toward -Infinity */
if(sn == 0)
return ieee754dp_zero(0);
else
return ieee754dp_min(1);
}
}
if (xe == DP_EMIN - 1
&& get_rounding(sn, xm) >> (DP_MBITS + 1 + 3))
{
/* Not tiny after rounding */
SETCX(IEEE754_INEXACT);
xm = get_rounding(sn, xm);
xm >>= 1;
/* Clear grs bits */
xm &= ~(DP_MBIT(3) - 1);
xe++;
}
else {
/* sticky right shift es bits
*/
xm = XDPSRS(xm, es);
xe += es;
assert((xm & (DP_HIDDEN_BIT << 3)) == 0);
assert(xe == DP_EMIN);
}
}
if (xm & (DP_MBIT(3) - 1)) {
SETCX(IEEE754_INEXACT);
if ((xm & (DP_HIDDEN_BIT << 3)) == 0) {
SETCX(IEEE754_UNDERFLOW);
}
/* inexact must round of 3 bits
*/
xm = get_rounding(sn, xm);
/* adjust exponent for rounding add overflowing
*/
if (xm >> (DP_MBITS + 3 + 1)) {
/* add causes mantissa overflow */
xm >>= 1;
xe++;
}
}
/* strip grs bits */
xm >>= 3;
assert((xm >> (DP_MBITS + 1)) == 0); /* no execess */
assert(xe >= DP_EMIN);
if (xe > DP_EMAX) {
SETCX(IEEE754_OVERFLOW);
SETCX(IEEE754_INEXACT);
/* -O can be table indexed by (rm,sn) */
switch (ieee754_csr.rm) {
case IEEE754_RN:
return ieee754dp_inf(sn);
case IEEE754_RZ:
return ieee754dp_max(sn);
case IEEE754_RU: /* toward +Infinity */
if (sn == 0)
return ieee754dp_inf(0);
else
return ieee754dp_max(1);
case IEEE754_RD: /* toward -Infinity */
if (sn == 0)
return ieee754dp_max(0);
else
return ieee754dp_inf(1);
}
}
/* gen norm/denorm/zero */
if ((xm & DP_HIDDEN_BIT) == 0) {
/* we underflow (tiny/zero) */
assert(xe == DP_EMIN);
if (ieee754_csr.mx & IEEE754_UNDERFLOW)
SETCX(IEEE754_UNDERFLOW);
return builddp(sn, DP_EMIN - 1 + DP_EBIAS, xm);
} else {
assert((xm >> (DP_MBITS + 1)) == 0); /* no execess */
assert(xm & DP_HIDDEN_BIT);
return builddp(sn, xe + DP_EBIAS, xm & ~DP_HIDDEN_BIT);
}
}
| gpl-2.0 |
joshfire/chromiumos-third_party-kernel | drivers/net/yellowfin.c | 2687 | 45987 | /* yellowfin.c: A Packet Engines G-NIC ethernet driver for linux. */
/*
Written 1997-2001 by Donald Becker.
This software may be used and distributed according to the terms of
the GNU General Public License (GPL), incorporated herein by reference.
Drivers based on or derived from this code fall under the GPL and must
retain the authorship, copyright and license notice. This file is not
a complete program and may only be used when the entire operating
system is licensed under the GPL.
This driver is for the Packet Engines G-NIC PCI Gigabit Ethernet adapter.
It also supports the Symbios Logic version of the same chip core.
The author may be reached as becker@scyld.com, or C/O
Scyld Computing Corporation
410 Severn Ave., Suite 210
Annapolis MD 21403
Support and updates available at
http://www.scyld.com/network/yellowfin.html
[link no longer provides useful info -jgarzik]
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DRV_NAME "yellowfin"
#define DRV_VERSION "2.1"
#define DRV_RELDATE "Sep 11, 2006"
/* The user-configurable values.
These may be modified when a driver module is loaded.*/
static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
static int max_interrupt_work = 20;
static int mtu;
#ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
/* System-wide count of bogus-rx frames. */
static int bogus_rx;
static int dma_ctrl = 0x004A0263; /* Constrained by errata */
static int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */
#elif defined(YF_NEW) /* A future perfect board :->. */
static int dma_ctrl = 0x00CAC277; /* Override when loading module! */
static int fifo_cfg = 0x0028;
#else
static const int dma_ctrl = 0x004A0263; /* Constrained by errata */
static const int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */
#endif
/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
Setting to > 1514 effectively disables this feature. */
static int rx_copybreak;
/* Used to pass the media type, etc.
No media types are currently defined. These exist for driver
interoperability.
*/
#define MAX_UNITS 8 /* More are supported, limit only on options */
static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
/* Do ugly workaround for GX server chipset errata. */
static int gx_fix;
/* Operational parameters that are set at compile time. */
/* Keep the ring sizes a power of two for efficiency.
Making the Tx ring too long decreases the effectiveness of channel
bonding and packet priority.
There are no ill effects from too-large receive rings. */
#define TX_RING_SIZE 16
#define TX_QUEUE_SIZE 12 /* Must be > 4 && <= TX_RING_SIZE */
#define RX_RING_SIZE 64
#define STATUS_TOTAL_SIZE TX_RING_SIZE*sizeof(struct tx_status_words)
#define TX_TOTAL_SIZE 2*TX_RING_SIZE*sizeof(struct yellowfin_desc)
#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct yellowfin_desc)
/* Operational parameters that usually are not changed. */
/* Time in jiffies before concluding the transmitter is hung. */
#define TX_TIMEOUT (2*HZ)
#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
#define yellowfin_debug debug
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/mii.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/ethtool.h>
#include <linux/crc32.h>
#include <linux/bitops.h>
#include <asm/uaccess.h>
#include <asm/processor.h> /* Processor type for cache alignment. */
#include <asm/unaligned.h>
#include <asm/io.h>
/* These identify the driver base version and may not be removed. */
static const char version[] __devinitconst =
KERN_INFO DRV_NAME ".c:v1.05 1/09/2001 Written by Donald Becker <becker@scyld.com>\n"
" (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n";
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver");
MODULE_LICENSE("GPL");
module_param(max_interrupt_work, int, 0);
module_param(mtu, int, 0);
module_param(debug, int, 0);
module_param(rx_copybreak, int, 0);
module_param_array(options, int, NULL, 0);
module_param_array(full_duplex, int, NULL, 0);
module_param(gx_fix, int, 0);
MODULE_PARM_DESC(max_interrupt_work, "G-NIC maximum events handled per interrupt");
MODULE_PARM_DESC(mtu, "G-NIC MTU (all boards)");
MODULE_PARM_DESC(debug, "G-NIC debug level (0-7)");
MODULE_PARM_DESC(rx_copybreak, "G-NIC copy breakpoint for copy-only-tiny-frames");
MODULE_PARM_DESC(options, "G-NIC: Bits 0-3: media type, bit 17: full duplex");
MODULE_PARM_DESC(full_duplex, "G-NIC full duplex setting(s) (1)");
MODULE_PARM_DESC(gx_fix, "G-NIC: enable GX server chipset bug workaround (0-1)");
/*
Theory of Operation
I. Board Compatibility
This device driver is designed for the Packet Engines "Yellowfin" Gigabit
Ethernet adapter. The G-NIC 64-bit PCI card is supported, as well as the
Symbios 53C885E dual function chip.
II. Board-specific settings
PCI bus devices are configured by the system at boot time, so no jumpers
need to be set on the board. The system BIOS preferably should assign the
PCI INTA signal to an otherwise unused system IRQ line.
Note: Kernel versions earlier than 1.3.73 do not support shared PCI
interrupt lines.
III. Driver operation
IIIa. Ring buffers
The Yellowfin uses the Descriptor Based DMA Architecture specified by Apple.
This is a descriptor list scheme similar to that used by the EEPro100 and
Tulip. This driver uses two statically allocated fixed-size descriptor lists
formed into rings by a branch from the final descriptor to the beginning of
the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
The driver allocates full frame size skbuffs for the Rx ring buffers at
open() time and passes the skb->data field to the Yellowfin as receive data
buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
a fresh skbuff is allocated and the frame is copied to the new skbuff.
When the incoming frame is larger, the skbuff is passed directly up the
protocol stack and replaced by a newly allocated skbuff.
The RX_COPYBREAK value is chosen to trade-off the memory wasted by
using a full-sized skbuff for small frames vs. the copying costs of larger
frames. For small frames the copying cost is negligible (esp. considering
that we are pre-loading the cache with immediately useful header
information). For large frames the copying cost is non-trivial, and the
larger copy might flush the cache of useful data.
IIIC. Synchronization
The driver runs as two independent, single-threaded flows of control. One
is the send-packet routine, which enforces single-threaded use by the
dev->tbusy flag. The other thread is the interrupt handler, which is single
threaded by the hardware and other software.
The send packet thread has partial control over the Tx ring and 'dev->tbusy'
flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
queue slot is empty, it clears the tbusy flag when finished otherwise it sets
the 'yp->tx_full' flag.
The interrupt handler has exclusive control over the Rx ring and records stats
from the Tx ring. After reaping the stats, it marks the Tx queue entry as
empty by incrementing the dirty_tx mark. Iff the 'yp->tx_full' flag is set, it
clears both the tx_full and tbusy flags.
IV. Notes
Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards.
Thanks to Bruce Faust of Digitalscape for providing both their SYM53C885 board
and an AlphaStation to verifty the Alpha port!
IVb. References
Yellowfin Engineering Design Specification, 4/23/97 Preliminary/Confidential
Symbios SYM53C885 PCI-SCSI/Fast Ethernet Multifunction Controller Preliminary
Data Manual v3.0
http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
IVc. Errata
See Packet Engines confidential appendix (prototype chips only).
*/
enum capability_flags {
HasMII=1, FullTxStatus=2, IsGigabit=4, HasMulticastBug=8, FullRxStatus=16,
HasMACAddrBug=32, /* Only on early revs. */
DontUseEeprom=64, /* Don't read the MAC from the EEPROm. */
};
/* The PCI I/O space extent. */
enum {
YELLOWFIN_SIZE = 0x100,
};
struct pci_id_info {
const char *name;
struct match_info {
int pci, pci_mask, subsystem, subsystem_mask;
int revision, revision_mask; /* Only 8 bits. */
} id;
int drv_flags; /* Driver use, intended as capability flags. */
};
static const struct pci_id_info pci_id_tbl[] = {
{"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff},
FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom},
{"Symbios SYM83C885", { 0x07011000, 0xffffffff},
HasMII | DontUseEeprom },
{ }
};
static DEFINE_PCI_DEVICE_TABLE(yellowfin_pci_tbl) = {
{ 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
{ }
};
MODULE_DEVICE_TABLE (pci, yellowfin_pci_tbl);
/* Offsets to the Yellowfin registers. Various sizes and alignments. */
enum yellowfin_offsets {
TxCtrl=0x00, TxStatus=0x04, TxPtr=0x0C,
TxIntrSel=0x10, TxBranchSel=0x14, TxWaitSel=0x18,
RxCtrl=0x40, RxStatus=0x44, RxPtr=0x4C,
RxIntrSel=0x50, RxBranchSel=0x54, RxWaitSel=0x58,
EventStatus=0x80, IntrEnb=0x82, IntrClear=0x84, IntrStatus=0x86,
ChipRev=0x8C, DMACtrl=0x90, TxThreshold=0x94,
Cnfg=0xA0, FrameGap0=0xA2, FrameGap1=0xA4,
MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC,
MII_Status=0xAE,
RxDepth=0xB8, FlowCtrl=0xBC,
AddrMode=0xD0, StnAddr=0xD2, HashTbl=0xD8, FIFOcfg=0xF8,
EEStatus=0xF0, EECtrl=0xF1, EEAddr=0xF2, EERead=0xF3, EEWrite=0xF4,
EEFeature=0xF5,
};
/* The Yellowfin Rx and Tx buffer descriptors.
Elements are written as 32 bit for endian portability. */
struct yellowfin_desc {
__le32 dbdma_cmd;
__le32 addr;
__le32 branch_addr;
__le32 result_status;
};
struct tx_status_words {
#ifdef __BIG_ENDIAN
u16 tx_errs;
u16 tx_cnt;
u16 paused;
u16 total_tx_cnt;
#else /* Little endian chips. */
u16 tx_cnt;
u16 tx_errs;
u16 total_tx_cnt;
u16 paused;
#endif /* __BIG_ENDIAN */
};
/* Bits in yellowfin_desc.cmd */
enum desc_cmd_bits {
CMD_TX_PKT=0x10000000, CMD_RX_BUF=0x20000000, CMD_TXSTATUS=0x30000000,
CMD_NOP=0x60000000, CMD_STOP=0x70000000,
BRANCH_ALWAYS=0x0C0000, INTR_ALWAYS=0x300000, WAIT_ALWAYS=0x030000,
BRANCH_IFTRUE=0x040000,
};
/* Bits in yellowfin_desc.status */
enum desc_status_bits { RX_EOP=0x0040, };
/* Bits in the interrupt status/mask registers. */
enum intr_status_bits {
IntrRxDone=0x01, IntrRxInvalid=0x02, IntrRxPCIFault=0x04,IntrRxPCIErr=0x08,
IntrTxDone=0x10, IntrTxInvalid=0x20, IntrTxPCIFault=0x40,IntrTxPCIErr=0x80,
IntrEarlyRx=0x100, IntrWakeup=0x200, };
#define PRIV_ALIGN 31 /* Required alignment mask */
#define MII_CNT 4
struct yellowfin_private {
/* Descriptor rings first for alignment.
Tx requires a second descriptor for status. */
struct yellowfin_desc *rx_ring;
struct yellowfin_desc *tx_ring;
struct sk_buff* rx_skbuff[RX_RING_SIZE];
struct sk_buff* tx_skbuff[TX_RING_SIZE];
dma_addr_t rx_ring_dma;
dma_addr_t tx_ring_dma;
struct tx_status_words *tx_status;
dma_addr_t tx_status_dma;
struct timer_list timer; /* Media selection timer. */
/* Frequently used and paired value: keep adjacent for cache effect. */
int chip_id, drv_flags;
struct pci_dev *pci_dev;
unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
unsigned int rx_buf_sz; /* Based on MTU+slack. */
struct tx_status_words *tx_tail_desc;
unsigned int cur_tx, dirty_tx;
int tx_threshold;
unsigned int tx_full:1; /* The Tx queue is full. */
unsigned int full_duplex:1; /* Full-duplex operation requested. */
unsigned int duplex_lock:1;
unsigned int medialock:1; /* Do not sense media. */
unsigned int default_port:4; /* Last dev->if_port value. */
/* MII transceiver section. */
int mii_cnt; /* MII device addresses. */
u16 advertising; /* NWay media advertisement */
unsigned char phys[MII_CNT]; /* MII device addresses, only first one used */
spinlock_t lock;
void __iomem *base;
};
static int read_eeprom(void __iomem *ioaddr, int location);
static int mdio_read(void __iomem *ioaddr, int phy_id, int location);
static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value);
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static int yellowfin_open(struct net_device *dev);
static void yellowfin_timer(unsigned long data);
static void yellowfin_tx_timeout(struct net_device *dev);
static int yellowfin_init_ring(struct net_device *dev);
static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance);
static int yellowfin_rx(struct net_device *dev);
static void yellowfin_error(struct net_device *dev, int intr_status);
static int yellowfin_close(struct net_device *dev);
static void set_rx_mode(struct net_device *dev);
static const struct ethtool_ops ethtool_ops;
static const struct net_device_ops netdev_ops = {
.ndo_open = yellowfin_open,
.ndo_stop = yellowfin_close,
.ndo_start_xmit = yellowfin_start_xmit,
.ndo_set_multicast_list = set_rx_mode,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_do_ioctl = netdev_ioctl,
.ndo_tx_timeout = yellowfin_tx_timeout,
};
static int __devinit yellowfin_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct net_device *dev;
struct yellowfin_private *np;
int irq;
int chip_idx = ent->driver_data;
static int find_cnt;
void __iomem *ioaddr;
int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
int drv_flags = pci_id_tbl[chip_idx].drv_flags;
void *ring_space;
dma_addr_t ring_dma;
#ifdef USE_IO_OPS
int bar = 0;
#else
int bar = 1;
#endif
/* when built into the kernel, we only print version if device is found */
#ifndef MODULE
static int printed_version;
if (!printed_version++)
printk(version);
#endif
i = pci_enable_device(pdev);
if (i) return i;
dev = alloc_etherdev(sizeof(*np));
if (!dev) {
pr_err("cannot allocate ethernet device\n");
return -ENOMEM;
}
SET_NETDEV_DEV(dev, &pdev->dev);
np = netdev_priv(dev);
if (pci_request_regions(pdev, DRV_NAME))
goto err_out_free_netdev;
pci_set_master (pdev);
ioaddr = pci_iomap(pdev, bar, YELLOWFIN_SIZE);
if (!ioaddr)
goto err_out_free_res;
irq = pdev->irq;
if (drv_flags & DontUseEeprom)
for (i = 0; i < 6; i++)
dev->dev_addr[i] = ioread8(ioaddr + StnAddr + i);
else {
int ee_offset = (read_eeprom(ioaddr, 6) == 0xff ? 0x100 : 0);
for (i = 0; i < 6; i++)
dev->dev_addr[i] = read_eeprom(ioaddr, ee_offset + i);
}
/* Reset the chip. */
iowrite32(0x80000000, ioaddr + DMACtrl);
dev->base_addr = (unsigned long)ioaddr;
dev->irq = irq;
pci_set_drvdata(pdev, dev);
spin_lock_init(&np->lock);
np->pci_dev = pdev;
np->chip_id = chip_idx;
np->drv_flags = drv_flags;
np->base = ioaddr;
ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
if (!ring_space)
goto err_out_cleardev;
np->tx_ring = (struct yellowfin_desc *)ring_space;
np->tx_ring_dma = ring_dma;
ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
if (!ring_space)
goto err_out_unmap_tx;
np->rx_ring = (struct yellowfin_desc *)ring_space;
np->rx_ring_dma = ring_dma;
ring_space = pci_alloc_consistent(pdev, STATUS_TOTAL_SIZE, &ring_dma);
if (!ring_space)
goto err_out_unmap_rx;
np->tx_status = (struct tx_status_words *)ring_space;
np->tx_status_dma = ring_dma;
if (dev->mem_start)
option = dev->mem_start;
/* The lower four bits are the media type. */
if (option > 0) {
if (option & 0x200)
np->full_duplex = 1;
np->default_port = option & 15;
if (np->default_port)
np->medialock = 1;
}
if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
np->full_duplex = 1;
if (np->full_duplex)
np->duplex_lock = 1;
/* The Yellowfin-specific entries in the device structure. */
dev->netdev_ops = &netdev_ops;
SET_ETHTOOL_OPS(dev, ðtool_ops);
dev->watchdog_timeo = TX_TIMEOUT;
if (mtu)
dev->mtu = mtu;
i = register_netdev(dev);
if (i)
goto err_out_unmap_status;
netdev_info(dev, "%s type %8x at %p, %pM, IRQ %d\n",
pci_id_tbl[chip_idx].name,
ioread32(ioaddr + ChipRev), ioaddr,
dev->dev_addr, irq);
if (np->drv_flags & HasMII) {
int phy, phy_idx = 0;
for (phy = 0; phy < 32 && phy_idx < MII_CNT; phy++) {
int mii_status = mdio_read(ioaddr, phy, 1);
if (mii_status != 0xffff && mii_status != 0x0000) {
np->phys[phy_idx++] = phy;
np->advertising = mdio_read(ioaddr, phy, 4);
netdev_info(dev, "MII PHY found at address %d, status 0x%04x advertising %04x\n",
phy, mii_status, np->advertising);
}
}
np->mii_cnt = phy_idx;
}
find_cnt++;
return 0;
err_out_unmap_status:
pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
np->tx_status_dma);
err_out_unmap_rx:
pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
err_out_unmap_tx:
pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
err_out_cleardev:
pci_set_drvdata(pdev, NULL);
pci_iounmap(pdev, ioaddr);
err_out_free_res:
pci_release_regions(pdev);
err_out_free_netdev:
free_netdev (dev);
return -ENODEV;
}
static int __devinit read_eeprom(void __iomem *ioaddr, int location)
{
int bogus_cnt = 10000; /* Typical 33Mhz: 1050 ticks */
iowrite8(location, ioaddr + EEAddr);
iowrite8(0x30 | ((location >> 8) & 7), ioaddr + EECtrl);
while ((ioread8(ioaddr + EEStatus) & 0x80) && --bogus_cnt > 0)
;
return ioread8(ioaddr + EERead);
}
/* MII Managemen Data I/O accesses.
These routines assume the MDIO controller is idle, and do not exit until
the command is finished. */
static int mdio_read(void __iomem *ioaddr, int phy_id, int location)
{
int i;
iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
iowrite16(1, ioaddr + MII_Cmd);
for (i = 10000; i >= 0; i--)
if ((ioread16(ioaddr + MII_Status) & 1) == 0)
break;
return ioread16(ioaddr + MII_Rd_Data);
}
static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value)
{
int i;
iowrite16((phy_id<<8) + location, ioaddr + MII_Addr);
iowrite16(value, ioaddr + MII_Wr_Data);
/* Wait for the command to finish. */
for (i = 10000; i >= 0; i--)
if ((ioread16(ioaddr + MII_Status) & 1) == 0)
break;
}
static int yellowfin_open(struct net_device *dev)
{
struct yellowfin_private *yp = netdev_priv(dev);
void __iomem *ioaddr = yp->base;
int i, ret;
/* Reset the chip. */
iowrite32(0x80000000, ioaddr + DMACtrl);
ret = request_irq(dev->irq, yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
if (ret)
return ret;
if (yellowfin_debug > 1)
netdev_printk(KERN_DEBUG, dev, "%s() irq %d\n",
__func__, dev->irq);
ret = yellowfin_init_ring(dev);
if (ret) {
free_irq(dev->irq, dev);
return ret;
}
iowrite32(yp->rx_ring_dma, ioaddr + RxPtr);
iowrite32(yp->tx_ring_dma, ioaddr + TxPtr);
for (i = 0; i < 6; i++)
iowrite8(dev->dev_addr[i], ioaddr + StnAddr + i);
/* Set up various condition 'select' registers.
There are no options here. */
iowrite32(0x00800080, ioaddr + TxIntrSel); /* Interrupt on Tx abort */
iowrite32(0x00800080, ioaddr + TxBranchSel); /* Branch on Tx abort */
iowrite32(0x00400040, ioaddr + TxWaitSel); /* Wait on Tx status */
iowrite32(0x00400040, ioaddr + RxIntrSel); /* Interrupt on Rx done */
iowrite32(0x00400040, ioaddr + RxBranchSel); /* Branch on Rx error */
iowrite32(0x00400040, ioaddr + RxWaitSel); /* Wait on Rx done */
/* Initialize other registers: with so many this eventually this will
converted to an offset/value list. */
iowrite32(dma_ctrl, ioaddr + DMACtrl);
iowrite16(fifo_cfg, ioaddr + FIFOcfg);
/* Enable automatic generation of flow control frames, period 0xffff. */
iowrite32(0x0030FFFF, ioaddr + FlowCtrl);
yp->tx_threshold = 32;
iowrite32(yp->tx_threshold, ioaddr + TxThreshold);
if (dev->if_port == 0)
dev->if_port = yp->default_port;
netif_start_queue(dev);
/* Setting the Rx mode will start the Rx process. */
if (yp->drv_flags & IsGigabit) {
/* We are always in full-duplex mode with gigabit! */
yp->full_duplex = 1;
iowrite16(0x01CF, ioaddr + Cnfg);
} else {
iowrite16(0x0018, ioaddr + FrameGap0); /* 0060/4060 for non-MII 10baseT */
iowrite16(0x1018, ioaddr + FrameGap1);
iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
}
set_rx_mode(dev);
/* Enable interrupts by setting the interrupt mask. */
iowrite16(0x81ff, ioaddr + IntrEnb); /* See enum intr_status_bits */
iowrite16(0x0000, ioaddr + EventStatus); /* Clear non-interrupting events */
iowrite32(0x80008000, ioaddr + RxCtrl); /* Start Rx and Tx channels. */
iowrite32(0x80008000, ioaddr + TxCtrl);
if (yellowfin_debug > 2) {
netdev_printk(KERN_DEBUG, dev, "Done %s()\n", __func__);
}
/* Set the timer to check for link beat. */
init_timer(&yp->timer);
yp->timer.expires = jiffies + 3*HZ;
yp->timer.data = (unsigned long)dev;
yp->timer.function = yellowfin_timer; /* timer handler */
add_timer(&yp->timer);
return 0;
}
static void yellowfin_timer(unsigned long data)
{
struct net_device *dev = (struct net_device *)data;
struct yellowfin_private *yp = netdev_priv(dev);
void __iomem *ioaddr = yp->base;
int next_tick = 60*HZ;
if (yellowfin_debug > 3) {
netdev_printk(KERN_DEBUG, dev, "Yellowfin timer tick, status %08x\n",
ioread16(ioaddr + IntrStatus));
}
if (yp->mii_cnt) {
int bmsr = mdio_read(ioaddr, yp->phys[0], MII_BMSR);
int lpa = mdio_read(ioaddr, yp->phys[0], MII_LPA);
int negotiated = lpa & yp->advertising;
if (yellowfin_debug > 1)
netdev_printk(KERN_DEBUG, dev, "MII #%d status register is %04x, link partner capability %04x\n",
yp->phys[0], bmsr, lpa);
yp->full_duplex = mii_duplex(yp->duplex_lock, negotiated);
iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
if (bmsr & BMSR_LSTATUS)
next_tick = 60*HZ;
else
next_tick = 3*HZ;
}
yp->timer.expires = jiffies + next_tick;
add_timer(&yp->timer);
}
static void yellowfin_tx_timeout(struct net_device *dev)
{
struct yellowfin_private *yp = netdev_priv(dev);
void __iomem *ioaddr = yp->base;
netdev_warn(dev, "Yellowfin transmit timed out at %d/%d Tx status %04x, Rx status %04x, resetting...\n",
yp->cur_tx, yp->dirty_tx,
ioread32(ioaddr + TxStatus),
ioread32(ioaddr + RxStatus));
/* Note: these should be KERN_DEBUG. */
if (yellowfin_debug) {
int i;
pr_warning(" Rx ring %p: ", yp->rx_ring);
for (i = 0; i < RX_RING_SIZE; i++)
pr_cont(" %08x", yp->rx_ring[i].result_status);
pr_cont("\n");
pr_warning(" Tx ring %p: ", yp->tx_ring);
for (i = 0; i < TX_RING_SIZE; i++)
pr_cont(" %04x /%08x",
yp->tx_status[i].tx_errs,
yp->tx_ring[i].result_status);
pr_cont("\n");
}
/* If the hardware is found to hang regularly, we will update the code
to reinitialize the chip here. */
dev->if_port = 0;
/* Wake the potentially-idle transmit channel. */
iowrite32(0x10001000, yp->base + TxCtrl);
if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
netif_wake_queue (dev); /* Typical path */
dev->trans_start = jiffies; /* prevent tx timeout */
dev->stats.tx_errors++;
}
/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
static int yellowfin_init_ring(struct net_device *dev)
{
struct yellowfin_private *yp = netdev_priv(dev);
int i, j;
yp->tx_full = 0;
yp->cur_rx = yp->cur_tx = 0;
yp->dirty_tx = 0;
yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
for (i = 0; i < RX_RING_SIZE; i++) {
yp->rx_ring[i].dbdma_cmd =
cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
yp->rx_ring[i].branch_addr = cpu_to_le32(yp->rx_ring_dma +
((i+1)%RX_RING_SIZE)*sizeof(struct yellowfin_desc));
}
for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz + 2);
yp->rx_skbuff[i] = skb;
if (skb == NULL)
break;
skb->dev = dev; /* Mark as being used by this device. */
skb_reserve(skb, 2); /* 16 byte align the IP header. */
yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
}
if (i != RX_RING_SIZE) {
for (j = 0; j < i; j++)
dev_kfree_skb(yp->rx_skbuff[j]);
return -ENOMEM;
}
yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
#define NO_TXSTATS
#ifdef NO_TXSTATS
/* In this mode the Tx ring needs only a single descriptor. */
for (i = 0; i < TX_RING_SIZE; i++) {
yp->tx_skbuff[i] = NULL;
yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma +
((i+1)%TX_RING_SIZE)*sizeof(struct yellowfin_desc));
}
/* Wrap ring */
yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
#else
{
/* Tx ring needs a pair of descriptors, the second for the status. */
for (i = 0; i < TX_RING_SIZE; i++) {
j = 2*i;
yp->tx_skbuff[i] = 0;
/* Branch on Tx error. */
yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP);
yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
(j+1)*sizeof(struct yellowfin_desc));
j++;
if (yp->flags & FullTxStatus) {
yp->tx_ring[j].dbdma_cmd =
cpu_to_le32(CMD_TXSTATUS | sizeof(*yp->tx_status));
yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status);
yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
i*sizeof(struct tx_status_words));
} else {
/* Symbios chips write only tx_errs word. */
yp->tx_ring[j].dbdma_cmd =
cpu_to_le32(CMD_TXSTATUS | INTR_ALWAYS | 2);
yp->tx_ring[j].request_cnt = 2;
/* Om pade ummmmm... */
yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
i*sizeof(struct tx_status_words) +
&(yp->tx_status[0].tx_errs) -
&(yp->tx_status[0]));
}
yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
((j+1)%(2*TX_RING_SIZE))*sizeof(struct yellowfin_desc));
}
/* Wrap ring */
yp->tx_ring[++j].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS);
}
#endif
yp->tx_tail_desc = &yp->tx_status[0];
return 0;
}
static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct yellowfin_private *yp = netdev_priv(dev);
unsigned entry;
int len = skb->len;
netif_stop_queue (dev);
/* Note: Ordering is important here, set the field with the
"ownership" bit last, and only then increment cur_tx. */
/* Calculate the next Tx descriptor entry. */
entry = yp->cur_tx % TX_RING_SIZE;
if (gx_fix) { /* Note: only works for paddable protocols e.g. IP. */
int cacheline_end = ((unsigned long)skb->data + skb->len) % 32;
/* Fix GX chipset errata. */
if (cacheline_end > 24 || cacheline_end == 0) {
len = skb->len + 32 - cacheline_end + 1;
if (skb_padto(skb, len)) {
yp->tx_skbuff[entry] = NULL;
netif_wake_queue(dev);
return NETDEV_TX_OK;
}
}
}
yp->tx_skbuff[entry] = skb;
#ifdef NO_TXSTATS
yp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
skb->data, len, PCI_DMA_TODEVICE));
yp->tx_ring[entry].result_status = 0;
if (entry >= TX_RING_SIZE-1) {
/* New stop command. */
yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP);
yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd =
cpu_to_le32(CMD_TX_PKT|BRANCH_ALWAYS | len);
} else {
yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP);
yp->tx_ring[entry].dbdma_cmd =
cpu_to_le32(CMD_TX_PKT | BRANCH_IFTRUE | len);
}
yp->cur_tx++;
#else
yp->tx_ring[entry<<1].request_cnt = len;
yp->tx_ring[entry<<1].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
skb->data, len, PCI_DMA_TODEVICE));
/* The input_last (status-write) command is constant, but we must
rewrite the subsequent 'stop' command. */
yp->cur_tx++;
{
unsigned next_entry = yp->cur_tx % TX_RING_SIZE;
yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP);
}
/* Final step -- overwrite the old 'stop' command. */
yp->tx_ring[entry<<1].dbdma_cmd =
cpu_to_le32( ((entry % 6) == 0 ? CMD_TX_PKT|INTR_ALWAYS|BRANCH_IFTRUE :
CMD_TX_PKT | BRANCH_IFTRUE) | len);
#endif
/* Non-x86 Todo: explicitly flush cache lines here. */
/* Wake the potentially-idle transmit channel. */
iowrite32(0x10001000, yp->base + TxCtrl);
if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
netif_start_queue (dev); /* Typical path */
else
yp->tx_full = 1;
if (yellowfin_debug > 4) {
netdev_printk(KERN_DEBUG, dev, "Yellowfin transmit frame #%d queued in slot %d\n",
yp->cur_tx, entry);
}
return NETDEV_TX_OK;
}
/* The interrupt handler does all of the Rx thread work and cleans up
after the Tx thread. */
static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
{
struct net_device *dev = dev_instance;
struct yellowfin_private *yp;
void __iomem *ioaddr;
int boguscnt = max_interrupt_work;
unsigned int handled = 0;
yp = netdev_priv(dev);
ioaddr = yp->base;
spin_lock (&yp->lock);
do {
u16 intr_status = ioread16(ioaddr + IntrClear);
if (yellowfin_debug > 4)
netdev_printk(KERN_DEBUG, dev, "Yellowfin interrupt, status %04x\n",
intr_status);
if (intr_status == 0)
break;
handled = 1;
if (intr_status & (IntrRxDone | IntrEarlyRx)) {
yellowfin_rx(dev);
iowrite32(0x10001000, ioaddr + RxCtrl); /* Wake Rx engine. */
}
#ifdef NO_TXSTATS
for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) {
int entry = yp->dirty_tx % TX_RING_SIZE;
struct sk_buff *skb;
if (yp->tx_ring[entry].result_status == 0)
break;
skb = yp->tx_skbuff[entry];
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
/* Free the original skb. */
pci_unmap_single(yp->pci_dev, le32_to_cpu(yp->tx_ring[entry].addr),
skb->len, PCI_DMA_TODEVICE);
dev_kfree_skb_irq(skb);
yp->tx_skbuff[entry] = NULL;
}
if (yp->tx_full &&
yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) {
/* The ring is no longer full, clear tbusy. */
yp->tx_full = 0;
netif_wake_queue(dev);
}
#else
if ((intr_status & IntrTxDone) || (yp->tx_tail_desc->tx_errs)) {
unsigned dirty_tx = yp->dirty_tx;
for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0;
dirty_tx++) {
/* Todo: optimize this. */
int entry = dirty_tx % TX_RING_SIZE;
u16 tx_errs = yp->tx_status[entry].tx_errs;
struct sk_buff *skb;
#ifndef final_version
if (yellowfin_debug > 5)
netdev_printk(KERN_DEBUG, dev, "Tx queue %d check, Tx status %04x %04x %04x %04x\n",
entry,
yp->tx_status[entry].tx_cnt,
yp->tx_status[entry].tx_errs,
yp->tx_status[entry].total_tx_cnt,
yp->tx_status[entry].paused);
#endif
if (tx_errs == 0)
break; /* It still hasn't been Txed */
skb = yp->tx_skbuff[entry];
if (tx_errs & 0xF810) {
/* There was an major error, log it. */
#ifndef final_version
if (yellowfin_debug > 1)
netdev_printk(KERN_DEBUG, dev, "Transmit error, Tx status %04x\n",
tx_errs);
#endif
dev->stats.tx_errors++;
if (tx_errs & 0xF800) dev->stats.tx_aborted_errors++;
if (tx_errs & 0x0800) dev->stats.tx_carrier_errors++;
if (tx_errs & 0x2000) dev->stats.tx_window_errors++;
if (tx_errs & 0x8000) dev->stats.tx_fifo_errors++;
} else {
#ifndef final_version
if (yellowfin_debug > 4)
netdev_printk(KERN_DEBUG, dev, "Normal transmit, Tx status %04x\n",
tx_errs);
#endif
dev->stats.tx_bytes += skb->len;
dev->stats.collisions += tx_errs & 15;
dev->stats.tx_packets++;
}
/* Free the original skb. */
pci_unmap_single(yp->pci_dev,
yp->tx_ring[entry<<1].addr, skb->len,
PCI_DMA_TODEVICE);
dev_kfree_skb_irq(skb);
yp->tx_skbuff[entry] = 0;
/* Mark status as empty. */
yp->tx_status[entry].tx_errs = 0;
}
#ifndef final_version
if (yp->cur_tx - dirty_tx > TX_RING_SIZE) {
netdev_err(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d\n",
dirty_tx, yp->cur_tx, yp->tx_full);
dirty_tx += TX_RING_SIZE;
}
#endif
if (yp->tx_full &&
yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) {
/* The ring is no longer full, clear tbusy. */
yp->tx_full = 0;
netif_wake_queue(dev);
}
yp->dirty_tx = dirty_tx;
yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE];
}
#endif
/* Log errors and other uncommon events. */
if (intr_status & 0x2ee) /* Abnormal error summary. */
yellowfin_error(dev, intr_status);
if (--boguscnt < 0) {
netdev_warn(dev, "Too much work at interrupt, status=%#04x\n",
intr_status);
break;
}
} while (1);
if (yellowfin_debug > 3)
netdev_printk(KERN_DEBUG, dev, "exiting interrupt, status=%#04x\n",
ioread16(ioaddr + IntrStatus));
spin_unlock (&yp->lock);
return IRQ_RETVAL(handled);
}
/* This routine is logically part of the interrupt handler, but separated
for clarity and better register allocation. */
static int yellowfin_rx(struct net_device *dev)
{
struct yellowfin_private *yp = netdev_priv(dev);
int entry = yp->cur_rx % RX_RING_SIZE;
int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx;
if (yellowfin_debug > 4) {
printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %08x\n",
entry, yp->rx_ring[entry].result_status);
printk(KERN_DEBUG " #%d desc. %08x %08x %08x\n",
entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr,
yp->rx_ring[entry].result_status);
}
/* If EOP is set on the next entry, it's a new packet. Send it up. */
while (1) {
struct yellowfin_desc *desc = &yp->rx_ring[entry];
struct sk_buff *rx_skb = yp->rx_skbuff[entry];
s16 frame_status;
u16 desc_status;
int data_size;
u8 *buf_addr;
if(!desc->result_status)
break;
pci_dma_sync_single_for_cpu(yp->pci_dev, le32_to_cpu(desc->addr),
yp->rx_buf_sz, PCI_DMA_FROMDEVICE);
desc_status = le32_to_cpu(desc->result_status) >> 16;
buf_addr = rx_skb->data;
data_size = (le32_to_cpu(desc->dbdma_cmd) -
le32_to_cpu(desc->result_status)) & 0xffff;
frame_status = get_unaligned_le16(&(buf_addr[data_size - 2]));
if (yellowfin_debug > 4)
printk(KERN_DEBUG " %s() status was %04x\n",
__func__, frame_status);
if (--boguscnt < 0)
break;
if ( ! (desc_status & RX_EOP)) {
if (data_size != 0)
netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %04x, data_size %d!\n",
desc_status, data_size);
dev->stats.rx_length_errors++;
} else if ((yp->drv_flags & IsGigabit) && (frame_status & 0x0038)) {
/* There was a error. */
if (yellowfin_debug > 3)
printk(KERN_DEBUG " %s() Rx error was %04x\n",
__func__, frame_status);
dev->stats.rx_errors++;
if (frame_status & 0x0060) dev->stats.rx_length_errors++;
if (frame_status & 0x0008) dev->stats.rx_frame_errors++;
if (frame_status & 0x0010) dev->stats.rx_crc_errors++;
if (frame_status < 0) dev->stats.rx_dropped++;
} else if ( !(yp->drv_flags & IsGigabit) &&
((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) {
u8 status1 = buf_addr[data_size-2];
u8 status2 = buf_addr[data_size-1];
dev->stats.rx_errors++;
if (status1 & 0xC0) dev->stats.rx_length_errors++;
if (status2 & 0x03) dev->stats.rx_frame_errors++;
if (status2 & 0x04) dev->stats.rx_crc_errors++;
if (status2 & 0x80) dev->stats.rx_dropped++;
#ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
} else if ((yp->flags & HasMACAddrBug) &&
memcmp(le32_to_cpu(yp->rx_ring_dma +
entry*sizeof(struct yellowfin_desc)),
dev->dev_addr, 6) != 0 &&
memcmp(le32_to_cpu(yp->rx_ring_dma +
entry*sizeof(struct yellowfin_desc)),
"\377\377\377\377\377\377", 6) != 0) {
if (bogus_rx++ == 0)
netdev_warn(dev, "Bad frame to %pM\n",
buf_addr);
#endif
} else {
struct sk_buff *skb;
int pkt_len = data_size -
(yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]);
/* To verify: Yellowfin Length should omit the CRC! */
#ifndef final_version
if (yellowfin_debug > 4)
printk(KERN_DEBUG " %s() normal Rx pkt length %d of %d, bogus_cnt %d\n",
__func__, pkt_len, data_size, boguscnt);
#endif
/* Check if the packet is long enough to just pass up the skbuff
without copying to a properly sized skbuff. */
if (pkt_len > rx_copybreak) {
skb_put(skb = rx_skb, pkt_len);
pci_unmap_single(yp->pci_dev,
le32_to_cpu(yp->rx_ring[entry].addr),
yp->rx_buf_sz,
PCI_DMA_FROMDEVICE);
yp->rx_skbuff[entry] = NULL;
} else {
skb = dev_alloc_skb(pkt_len + 2);
if (skb == NULL)
break;
skb_reserve(skb, 2); /* 16 byte align the IP header */
skb_copy_to_linear_data(skb, rx_skb->data, pkt_len);
skb_put(skb, pkt_len);
pci_dma_sync_single_for_device(yp->pci_dev,
le32_to_cpu(desc->addr),
yp->rx_buf_sz,
PCI_DMA_FROMDEVICE);
}
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += pkt_len;
}
entry = (++yp->cur_rx) % RX_RING_SIZE;
}
/* Refill the Rx ring buffers. */
for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
entry = yp->dirty_rx % RX_RING_SIZE;
if (yp->rx_skbuff[entry] == NULL) {
struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz + 2);
if (skb == NULL)
break; /* Better luck next round. */
yp->rx_skbuff[entry] = skb;
skb->dev = dev; /* Mark as being used by this device. */
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
}
yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
yp->rx_ring[entry].result_status = 0; /* Clear complete bit. */
if (entry != 0)
yp->rx_ring[entry - 1].dbdma_cmd =
cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
else
yp->rx_ring[RX_RING_SIZE - 1].dbdma_cmd =
cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS
| yp->rx_buf_sz);
}
return 0;
}
static void yellowfin_error(struct net_device *dev, int intr_status)
{
netdev_err(dev, "Something Wicked happened! %04x\n", intr_status);
/* Hmmmmm, it's not clear what to do here. */
if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
dev->stats.tx_errors++;
if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
dev->stats.rx_errors++;
}
static int yellowfin_close(struct net_device *dev)
{
struct yellowfin_private *yp = netdev_priv(dev);
void __iomem *ioaddr = yp->base;
int i;
netif_stop_queue (dev);
if (yellowfin_debug > 1) {
netdev_printk(KERN_DEBUG, dev, "Shutting down ethercard, status was Tx %04x Rx %04x Int %02x\n",
ioread16(ioaddr + TxStatus),
ioread16(ioaddr + RxStatus),
ioread16(ioaddr + IntrStatus));
netdev_printk(KERN_DEBUG, dev, "Queue pointers were Tx %d / %d, Rx %d / %d\n",
yp->cur_tx, yp->dirty_tx,
yp->cur_rx, yp->dirty_rx);
}
/* Disable interrupts by clearing the interrupt mask. */
iowrite16(0x0000, ioaddr + IntrEnb);
/* Stop the chip's Tx and Rx processes. */
iowrite32(0x80000000, ioaddr + RxCtrl);
iowrite32(0x80000000, ioaddr + TxCtrl);
del_timer(&yp->timer);
#if defined(__i386__)
if (yellowfin_debug > 2) {
printk(KERN_DEBUG " Tx ring at %08llx:\n",
(unsigned long long)yp->tx_ring_dma);
for (i = 0; i < TX_RING_SIZE*2; i++)
printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x %08x\n",
ioread32(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
printk(KERN_DEBUG " Tx status %p:\n", yp->tx_status);
for (i = 0; i < TX_RING_SIZE; i++)
printk(KERN_DEBUG " #%d status %04x %04x %04x %04x\n",
i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
printk(KERN_DEBUG " Rx ring %08llx:\n",
(unsigned long long)yp->rx_ring_dma);
for (i = 0; i < RX_RING_SIZE; i++) {
printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x\n",
ioread32(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr,
yp->rx_ring[i].result_status);
if (yellowfin_debug > 6) {
if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) {
int j;
printk(KERN_DEBUG);
for (j = 0; j < 0x50; j++)
pr_cont(" %04x",
get_unaligned(((u16*)yp->rx_ring[i].addr) + j));
pr_cont("\n");
}
}
}
}
#endif /* __i386__ debugging only */
free_irq(dev->irq, dev);
/* Free all the skbuffs in the Rx queue. */
for (i = 0; i < RX_RING_SIZE; i++) {
yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
yp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
if (yp->rx_skbuff[i]) {
dev_kfree_skb(yp->rx_skbuff[i]);
}
yp->rx_skbuff[i] = NULL;
}
for (i = 0; i < TX_RING_SIZE; i++) {
if (yp->tx_skbuff[i])
dev_kfree_skb(yp->tx_skbuff[i]);
yp->tx_skbuff[i] = NULL;
}
#ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
if (yellowfin_debug > 0) {
netdev_printk(KERN_DEBUG, dev, "Received %d frames that we should not have\n",
bogus_rx);
}
#endif
return 0;
}
/* Set or clear the multicast filter for this adaptor. */
static void set_rx_mode(struct net_device *dev)
{
struct yellowfin_private *yp = netdev_priv(dev);
void __iomem *ioaddr = yp->base;
u16 cfg_value = ioread16(ioaddr + Cnfg);
/* Stop the Rx process to change any value. */
iowrite16(cfg_value & ~0x1000, ioaddr + Cnfg);
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
iowrite16(0x000F, ioaddr + AddrMode);
} else if ((netdev_mc_count(dev) > 64) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to filter well, or accept all multicasts. */
iowrite16(0x000B, ioaddr + AddrMode);
} else if (!netdev_mc_empty(dev)) { /* Must use the multicast hash table. */
struct netdev_hw_addr *ha;
u16 hash_table[4];
int i;
memset(hash_table, 0, sizeof(hash_table));
netdev_for_each_mc_addr(ha, dev) {
unsigned int bit;
/* Due to a bug in the early chip versions, multiple filter
slots must be set for each address. */
if (yp->drv_flags & HasMulticastBug) {
bit = (ether_crc_le(3, ha->addr) >> 3) & 0x3f;
hash_table[bit >> 4] |= (1 << bit);
bit = (ether_crc_le(4, ha->addr) >> 3) & 0x3f;
hash_table[bit >> 4] |= (1 << bit);
bit = (ether_crc_le(5, ha->addr) >> 3) & 0x3f;
hash_table[bit >> 4] |= (1 << bit);
}
bit = (ether_crc_le(6, ha->addr) >> 3) & 0x3f;
hash_table[bit >> 4] |= (1 << bit);
}
/* Copy the hash table to the chip. */
for (i = 0; i < 4; i++)
iowrite16(hash_table[i], ioaddr + HashTbl + i*2);
iowrite16(0x0003, ioaddr + AddrMode);
} else { /* Normal, unicast/broadcast-only mode. */
iowrite16(0x0001, ioaddr + AddrMode);
}
/* Restart the Rx process. */
iowrite16(cfg_value | 0x1000, ioaddr + Cnfg);
}
static void yellowfin_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct yellowfin_private *np = netdev_priv(dev);
strcpy(info->driver, DRV_NAME);
strcpy(info->version, DRV_VERSION);
strcpy(info->bus_info, pci_name(np->pci_dev));
}
static const struct ethtool_ops ethtool_ops = {
.get_drvinfo = yellowfin_get_drvinfo
};
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct yellowfin_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->base;
struct mii_ioctl_data *data = if_mii(rq);
switch(cmd) {
case SIOCGMIIPHY: /* Get address of MII PHY in use. */
data->phy_id = np->phys[0] & 0x1f;
/* Fall Through */
case SIOCGMIIREG: /* Read MII PHY register. */
data->val_out = mdio_read(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f);
return 0;
case SIOCSMIIREG: /* Write MII PHY register. */
if (data->phy_id == np->phys[0]) {
u16 value = data->val_in;
switch (data->reg_num) {
case 0:
/* Check for autonegotiation on or reset. */
np->medialock = (value & 0x9000) ? 0 : 1;
if (np->medialock)
np->full_duplex = (value & 0x0100) ? 1 : 0;
break;
case 4: np->advertising = value; break;
}
/* Perhaps check_duplex(dev), depending on chip semantics. */
}
mdio_write(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
return 0;
default:
return -EOPNOTSUPP;
}
}
static void __devexit yellowfin_remove_one (struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct yellowfin_private *np;
BUG_ON(!dev);
np = netdev_priv(dev);
pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
np->tx_status_dma);
pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
unregister_netdev (dev);
pci_iounmap(pdev, np->base);
pci_release_regions (pdev);
free_netdev (dev);
pci_set_drvdata(pdev, NULL);
}
static struct pci_driver yellowfin_driver = {
.name = DRV_NAME,
.id_table = yellowfin_pci_tbl,
.probe = yellowfin_init_one,
.remove = __devexit_p(yellowfin_remove_one),
};
static int __init yellowfin_init (void)
{
/* when a module, this is printed whether or not devices are found in probe */
#ifdef MODULE
printk(version);
#endif
return pci_register_driver(&yellowfin_driver);
}
static void __exit yellowfin_cleanup (void)
{
pci_unregister_driver (&yellowfin_driver);
}
module_init(yellowfin_init);
module_exit(yellowfin_cleanup);
| gpl-2.0 |
Alonso1398/muZic_kernel_ivoryss | drivers/media/rc/keymaps/rc-cinergy.c | 2943 | 1902 | /* cinergy.h - Keytable for cinergy Remote Controller
*
* keymap imported from ir-keymaps.c
*
* Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <media/rc-map.h>
static struct rc_map_table cinergy[] = {
{ 0x00, KEY_0 },
{ 0x01, KEY_1 },
{ 0x02, KEY_2 },
{ 0x03, KEY_3 },
{ 0x04, KEY_4 },
{ 0x05, KEY_5 },
{ 0x06, KEY_6 },
{ 0x07, KEY_7 },
{ 0x08, KEY_8 },
{ 0x09, KEY_9 },
{ 0x0a, KEY_POWER },
{ 0x0b, KEY_MEDIA }, /* app */
{ 0x0c, KEY_ZOOM }, /* zoom/fullscreen */
{ 0x0d, KEY_CHANNELUP }, /* channel */
{ 0x0e, KEY_CHANNELDOWN }, /* channel- */
{ 0x0f, KEY_VOLUMEUP },
{ 0x10, KEY_VOLUMEDOWN },
{ 0x11, KEY_TUNER }, /* AV */
{ 0x12, KEY_NUMLOCK }, /* -/-- */
{ 0x13, KEY_AUDIO }, /* audio */
{ 0x14, KEY_MUTE },
{ 0x15, KEY_UP },
{ 0x16, KEY_DOWN },
{ 0x17, KEY_LEFT },
{ 0x18, KEY_RIGHT },
{ 0x19, BTN_LEFT, },
{ 0x1a, BTN_RIGHT, },
{ 0x1b, KEY_WWW }, /* text */
{ 0x1c, KEY_REWIND },
{ 0x1d, KEY_FORWARD },
{ 0x1e, KEY_RECORD },
{ 0x1f, KEY_PLAY },
{ 0x20, KEY_PREVIOUSSONG },
{ 0x21, KEY_NEXTSONG },
{ 0x22, KEY_PAUSE },
{ 0x23, KEY_STOP },
};
static struct rc_map_list cinergy_map = {
.map = {
.scan = cinergy,
.size = ARRAY_SIZE(cinergy),
.rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */
.name = RC_MAP_CINERGY,
}
};
static int __init init_rc_map_cinergy(void)
{
return rc_map_register(&cinergy_map);
}
static void __exit exit_rc_map_cinergy(void)
{
rc_map_unregister(&cinergy_map);
}
module_init(init_rc_map_cinergy)
module_exit(exit_rc_map_cinergy)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
| gpl-2.0 |
ColDReaVeR/android_kernel_samsung_coriplus | drivers/media/rc/keymaps/rc-flydvb.c | 2943 | 2051 | /* flydvb.h - Keytable for flydvb Remote Controller
*
* keymap imported from ir-keymaps.c
*
* Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <media/rc-map.h>
static struct rc_map_table flydvb[] = {
{ 0x01, KEY_ZOOM }, /* Full Screen */
{ 0x00, KEY_POWER }, /* Power */
{ 0x03, KEY_1 },
{ 0x04, KEY_2 },
{ 0x05, KEY_3 },
{ 0x07, KEY_4 },
{ 0x08, KEY_5 },
{ 0x09, KEY_6 },
{ 0x0b, KEY_7 },
{ 0x0c, KEY_8 },
{ 0x0d, KEY_9 },
{ 0x06, KEY_AGAIN }, /* Recall */
{ 0x0f, KEY_0 },
{ 0x10, KEY_MUTE }, /* Mute */
{ 0x02, KEY_RADIO }, /* TV/Radio */
{ 0x1b, KEY_LANGUAGE }, /* SAP (Second Audio Program) */
{ 0x14, KEY_VOLUMEUP }, /* VOL+ */
{ 0x17, KEY_VOLUMEDOWN }, /* VOL- */
{ 0x12, KEY_CHANNELUP }, /* CH+ */
{ 0x13, KEY_CHANNELDOWN }, /* CH- */
{ 0x1d, KEY_ENTER }, /* Enter */
{ 0x1a, KEY_TV2 }, /* PIP */
{ 0x18, KEY_VIDEO }, /* Source */
{ 0x1e, KEY_RECORD }, /* Record/Pause */
{ 0x15, KEY_ANGLE }, /* Swap (no label on key) */
{ 0x1c, KEY_PAUSE }, /* Timeshift/Pause */
{ 0x19, KEY_BACK }, /* Rewind << */
{ 0x0a, KEY_PLAYPAUSE }, /* Play/Pause */
{ 0x1f, KEY_FORWARD }, /* Forward >> */
{ 0x16, KEY_PREVIOUS }, /* Back |<< */
{ 0x11, KEY_STOP }, /* Stop */
{ 0x0e, KEY_NEXT }, /* End >>| */
};
static struct rc_map_list flydvb_map = {
.map = {
.scan = flydvb,
.size = ARRAY_SIZE(flydvb),
.rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */
.name = RC_MAP_FLYDVB,
}
};
static int __init init_rc_map_flydvb(void)
{
return rc_map_register(&flydvb_map);
}
static void __exit exit_rc_map_flydvb(void)
{
rc_map_unregister(&flydvb_map);
}
module_init(init_rc_map_flydvb)
module_exit(exit_rc_map_flydvb)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
| gpl-2.0 |
jackyh/qt210_ics_kernel3.0.8 | net/9p/util.c | 2943 | 3340 | /*
* net/9p/util.c
*
* This file contains some helper functions
*
* Copyright (C) 2007 by Latchesar Ionkov <lucho@ionkov.net>
* Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
* Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to:
* Free Software Foundation
* 51 Franklin Street, Fifth Floor
* Boston, MA 02111-1301 USA
*
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/parser.h>
#include <linux/idr.h>
#include <linux/slab.h>
#include <net/9p/9p.h>
/**
* struct p9_idpool - per-connection accounting for tag idpool
* @lock: protects the pool
* @pool: idr to allocate tag id from
*
*/
struct p9_idpool {
spinlock_t lock;
struct idr pool;
};
/**
* p9_idpool_create - create a new per-connection id pool
*
*/
struct p9_idpool *p9_idpool_create(void)
{
struct p9_idpool *p;
p = kmalloc(sizeof(struct p9_idpool), GFP_KERNEL);
if (!p)
return ERR_PTR(-ENOMEM);
spin_lock_init(&p->lock);
idr_init(&p->pool);
return p;
}
EXPORT_SYMBOL(p9_idpool_create);
/**
* p9_idpool_destroy - create a new per-connection id pool
* @p: idpool to destroy
*/
void p9_idpool_destroy(struct p9_idpool *p)
{
idr_destroy(&p->pool);
kfree(p);
}
EXPORT_SYMBOL(p9_idpool_destroy);
/**
* p9_idpool_get - allocate numeric id from pool
* @p: pool to allocate from
*
* Bugs: This seems to be an awful generic function, should it be in idr.c with
* the lock included in struct idr?
*/
int p9_idpool_get(struct p9_idpool *p)
{
int i = 0;
int error;
unsigned long flags;
retry:
if (idr_pre_get(&p->pool, GFP_NOFS) == 0)
return -1;
spin_lock_irqsave(&p->lock, flags);
/* no need to store exactly p, we just need something non-null */
error = idr_get_new(&p->pool, p, &i);
spin_unlock_irqrestore(&p->lock, flags);
if (error == -EAGAIN)
goto retry;
else if (error)
return -1;
P9_DPRINTK(P9_DEBUG_MUX, " id %d pool %p\n", i, p);
return i;
}
EXPORT_SYMBOL(p9_idpool_get);
/**
* p9_idpool_put - release numeric id from pool
* @id: numeric id which is being released
* @p: pool to release id into
*
* Bugs: This seems to be an awful generic function, should it be in idr.c with
* the lock included in struct idr?
*/
void p9_idpool_put(int id, struct p9_idpool *p)
{
unsigned long flags;
P9_DPRINTK(P9_DEBUG_MUX, " id %d pool %p\n", id, p);
spin_lock_irqsave(&p->lock, flags);
idr_remove(&p->pool, id);
spin_unlock_irqrestore(&p->lock, flags);
}
EXPORT_SYMBOL(p9_idpool_put);
/**
* p9_idpool_check - check if the specified id is available
* @id: id to check
* @p: pool to check
*/
int p9_idpool_check(int id, struct p9_idpool *p)
{
return idr_find(&p->pool, id) != NULL;
}
EXPORT_SYMBOL(p9_idpool_check);
| gpl-2.0 |
Ordenkrieger/android_kernel_cyanogen_msm8974 | arch/arm/mach-msm/msm_cpr-debug.c | 3455 | 3859 | /*
* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/debugfs.h>
#include <linux/module.h>
struct msm_cpr_debug_device {
struct mutex debug_mutex;
struct dentry *dir;
int addr_offset;
void __iomem *base;
};
static inline
void write_reg(struct msm_cpr_debug_device *cpr, u32 value)
{
writel_relaxed(value, cpr->base + cpr->addr_offset);
}
static inline u32 read_reg(struct msm_cpr_debug_device *cpr)
{
return readl_relaxed(cpr->base + cpr->addr_offset);
}
static bool msm_cpr_debug_addr_is_valid(int addr)
{
if (addr < 0 || addr > 0x15C) {
pr_err("CPR register address is invalid: %d\n", addr);
return false;
}
return true;
}
static int msm_cpr_debug_data_set(void *data, u64 val)
{
struct msm_cpr_debug_device *debugdev = data;
uint32_t reg = val;
mutex_lock(&debugdev->debug_mutex);
if (msm_cpr_debug_addr_is_valid(debugdev->addr_offset))
write_reg(debugdev, reg);
mutex_unlock(&debugdev->debug_mutex);
return 0;
}
static int msm_cpr_debug_data_get(void *data, u64 *val)
{
struct msm_cpr_debug_device *debugdev = data;
uint32_t reg;
mutex_lock(&debugdev->debug_mutex);
if (msm_cpr_debug_addr_is_valid(debugdev->addr_offset)) {
reg = read_reg(debugdev);
*val = reg;
}
mutex_unlock(&debugdev->debug_mutex);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(debug_data_fops, msm_cpr_debug_data_get,
msm_cpr_debug_data_set, "0x%02llX\n");
static int msm_cpr_debug_addr_set(void *data, u64 val)
{
struct msm_cpr_debug_device *debugdev = data;
if (msm_cpr_debug_addr_is_valid(val)) {
mutex_lock(&debugdev->debug_mutex);
debugdev->addr_offset = val;
mutex_unlock(&debugdev->debug_mutex);
}
return 0;
}
static int msm_cpr_debug_addr_get(void *data, u64 *val)
{
struct msm_cpr_debug_device *debugdev = data;
mutex_lock(&debugdev->debug_mutex);
if (msm_cpr_debug_addr_is_valid(debugdev->addr_offset))
*val = debugdev->addr_offset;
mutex_unlock(&debugdev->debug_mutex);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(debug_addr_fops, msm_cpr_debug_addr_get,
msm_cpr_debug_addr_set, "0x%03llX\n");
int msm_cpr_debug_init(void *data)
{
char *name = "cpr-debug";
struct msm_cpr_debug_device *debugdev;
struct dentry *dir;
struct dentry *temp;
int rc;
debugdev = kzalloc(sizeof(struct msm_cpr_debug_device), GFP_KERNEL);
if (debugdev == NULL) {
pr_err("kzalloc failed\n");
return -ENOMEM;
}
dir = debugfs_create_dir(name, NULL);
if (dir == NULL || IS_ERR(dir)) {
pr_err("debugfs_create_dir failed: rc=%ld\n", PTR_ERR(dir));
rc = PTR_ERR(dir);
goto dir_error;
}
temp = debugfs_create_file("address", S_IRUGO | S_IWUSR, dir, debugdev,
&debug_addr_fops);
if (temp == NULL || IS_ERR(temp)) {
pr_err("debugfs_create_file failed: rc=%ld\n", PTR_ERR(temp));
rc = PTR_ERR(temp);
goto file_error;
}
temp = debugfs_create_file("data", S_IRUGO | S_IWUSR, dir, debugdev,
&debug_data_fops);
if (temp == NULL || IS_ERR(temp)) {
pr_err("debugfs_create_file failed: rc=%ld\n", PTR_ERR(temp));
rc = PTR_ERR(temp);
goto file_error;
}
debugdev->base = data;
debugdev->addr_offset = -1;
debugdev->dir = dir;
mutex_init(&debugdev->debug_mutex);
return 0;
file_error:
debugfs_remove_recursive(dir);
dir_error:
kfree(debugdev);
return rc;
}
| gpl-2.0 |
MassStash/htc_m9_kernel_sense_5.0.2 | drivers/video/aty/radeon_monitor.c | 3455 | 30925 | #include "radeonfb.h"
#include <linux/slab.h>
#include "../edid.h"
static struct fb_var_screeninfo radeonfb_default_var = {
.xres = 640,
.yres = 480,
.xres_virtual = 640,
.yres_virtual = 480,
.bits_per_pixel = 8,
.red = { .length = 8 },
.green = { .length = 8 },
.blue = { .length = 8 },
.activate = FB_ACTIVATE_NOW,
.height = -1,
.width = -1,
.pixclock = 39721,
.left_margin = 40,
.right_margin = 24,
.upper_margin = 32,
.lower_margin = 11,
.hsync_len = 96,
.vsync_len = 2,
.vmode = FB_VMODE_NONINTERLACED
};
static char *radeon_get_mon_name(int type)
{
char *pret = NULL;
switch (type) {
case MT_NONE:
pret = "no";
break;
case MT_CRT:
pret = "CRT";
break;
case MT_DFP:
pret = "DFP";
break;
case MT_LCD:
pret = "LCD";
break;
case MT_CTV:
pret = "CTV";
break;
case MT_STV:
pret = "STV";
break;
}
return pret;
}
#if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC)
/*
* Try to find monitor informations & EDID data out of the Open Firmware
* device-tree. This also contains some "hacks" to work around a few machine
* models with broken OF probing by hard-coding known EDIDs for some Mac
* laptops internal LVDS panel. (XXX: not done yet)
*/
static int radeon_parse_montype_prop(struct device_node *dp, u8 **out_EDID,
int hdno)
{
static char *propnames[] = { "DFP,EDID", "LCD,EDID", "EDID",
"EDID1", "EDID2", NULL };
const u8 *pedid = NULL;
const u8 *pmt = NULL;
u8 *tmp;
int i, mt = MT_NONE;
pr_debug("analyzing OF properties...\n");
pmt = of_get_property(dp, "display-type", NULL);
if (!pmt)
return MT_NONE;
pr_debug("display-type: %s\n", pmt);
/* OF says "LCD" for DFP as well, we discriminate from the caller of this
* function
*/
if (!strcmp(pmt, "LCD") || !strcmp(pmt, "DFP"))
mt = MT_DFP;
else if (!strcmp(pmt, "CRT"))
mt = MT_CRT;
else {
if (strcmp(pmt, "NONE") != 0)
printk(KERN_WARNING "radeonfb: Unknown OF display-type: %s\n",
pmt);
return MT_NONE;
}
for (i = 0; propnames[i] != NULL; ++i) {
pedid = of_get_property(dp, propnames[i], NULL);
if (pedid != NULL)
break;
}
/* We didn't find the EDID in the leaf node, some cards will actually
* put EDID1/EDID2 in the parent, look for these (typically M6 tipb).
* single-head cards have hdno == -1 and skip this step
*/
if (pedid == NULL && dp->parent && (hdno != -1))
pedid = of_get_property(dp->parent,
(hdno == 0) ? "EDID1" : "EDID2", NULL);
if (pedid == NULL && dp->parent && (hdno == 0))
pedid = of_get_property(dp->parent, "EDID", NULL);
if (pedid == NULL)
return mt;
tmp = kmemdup(pedid, EDID_LENGTH, GFP_KERNEL);
if (!tmp)
return mt;
*out_EDID = tmp;
return mt;
}
static int radeon_probe_OF_head(struct radeonfb_info *rinfo, int head_no,
u8 **out_EDID)
{
struct device_node *dp;
pr_debug("radeon_probe_OF_head\n");
dp = rinfo->of_node;
while (dp == NULL)
return MT_NONE;
if (rinfo->has_CRTC2) {
const char *pname;
int len, second = 0;
dp = dp->child;
do {
if (!dp)
return MT_NONE;
pname = of_get_property(dp, "name", NULL);
if (!pname)
return MT_NONE;
len = strlen(pname);
pr_debug("head: %s (letter: %c, head_no: %d)\n",
pname, pname[len-1], head_no);
if (pname[len-1] == 'A' && head_no == 0) {
int mt = radeon_parse_montype_prop(dp, out_EDID, 0);
/* Maybe check for LVDS_GEN_CNTL here ? I need to check out
* what OF does when booting with lid closed
*/
if (mt == MT_DFP && rinfo->is_mobility)
mt = MT_LCD;
return mt;
} else if (pname[len-1] == 'B' && head_no == 1)
return radeon_parse_montype_prop(dp, out_EDID, 1);
second = 1;
dp = dp->sibling;
} while(!second);
} else {
if (head_no > 0)
return MT_NONE;
return radeon_parse_montype_prop(dp, out_EDID, -1);
}
return MT_NONE;
}
#endif /* CONFIG_PPC_OF || CONFIG_SPARC */
static int radeon_get_panel_info_BIOS(struct radeonfb_info *rinfo)
{
unsigned long tmp, tmp0;
char stmp[30];
int i;
if (!rinfo->bios_seg)
return 0;
if (!(tmp = BIOS_IN16(rinfo->fp_bios_start + 0x40))) {
printk(KERN_ERR "radeonfb: Failed to detect DFP panel info using BIOS\n");
rinfo->panel_info.pwr_delay = 200;
return 0;
}
for(i=0; i<24; i++)
stmp[i] = BIOS_IN8(tmp+i+1);
stmp[24] = 0;
printk("radeonfb: panel ID string: %s\n", stmp);
rinfo->panel_info.xres = BIOS_IN16(tmp + 25);
rinfo->panel_info.yres = BIOS_IN16(tmp + 27);
printk("radeonfb: detected LVDS panel size from BIOS: %dx%d\n",
rinfo->panel_info.xres, rinfo->panel_info.yres);
rinfo->panel_info.pwr_delay = BIOS_IN16(tmp + 44);
pr_debug("BIOS provided panel power delay: %d\n", rinfo->panel_info.pwr_delay);
if (rinfo->panel_info.pwr_delay > 2000 || rinfo->panel_info.pwr_delay <= 0)
rinfo->panel_info.pwr_delay = 2000;
/*
* Some panels only work properly with some divider combinations
*/
rinfo->panel_info.ref_divider = BIOS_IN16(tmp + 46);
rinfo->panel_info.post_divider = BIOS_IN8(tmp + 48);
rinfo->panel_info.fbk_divider = BIOS_IN16(tmp + 49);
if (rinfo->panel_info.ref_divider != 0 &&
rinfo->panel_info.fbk_divider > 3) {
rinfo->panel_info.use_bios_dividers = 1;
printk(KERN_INFO "radeondb: BIOS provided dividers will be used\n");
pr_debug("ref_divider = %x\n", rinfo->panel_info.ref_divider);
pr_debug("post_divider = %x\n", rinfo->panel_info.post_divider);
pr_debug("fbk_divider = %x\n", rinfo->panel_info.fbk_divider);
}
pr_debug("Scanning BIOS table ...\n");
for(i=0; i<32; i++) {
tmp0 = BIOS_IN16(tmp+64+i*2);
if (tmp0 == 0)
break;
pr_debug(" %d x %d\n", BIOS_IN16(tmp0), BIOS_IN16(tmp0+2));
if ((BIOS_IN16(tmp0) == rinfo->panel_info.xres) &&
(BIOS_IN16(tmp0+2) == rinfo->panel_info.yres)) {
rinfo->panel_info.hblank = (BIOS_IN16(tmp0+17) - BIOS_IN16(tmp0+19)) * 8;
rinfo->panel_info.hOver_plus = ((BIOS_IN16(tmp0+21) -
BIOS_IN16(tmp0+19) -1) * 8) & 0x7fff;
rinfo->panel_info.hSync_width = BIOS_IN8(tmp0+23) * 8;
rinfo->panel_info.vblank = BIOS_IN16(tmp0+24) - BIOS_IN16(tmp0+26);
rinfo->panel_info.vOver_plus = (BIOS_IN16(tmp0+28) & 0x7ff) - BIOS_IN16(tmp0+26);
rinfo->panel_info.vSync_width = (BIOS_IN16(tmp0+28) & 0xf800) >> 11;
rinfo->panel_info.clock = BIOS_IN16(tmp0+9);
/* Assume high active syncs for now until ATI tells me more... maybe we
* can probe register values here ?
*/
rinfo->panel_info.hAct_high = 1;
rinfo->panel_info.vAct_high = 1;
/* Mark panel infos valid */
rinfo->panel_info.valid = 1;
pr_debug("Found panel in BIOS table:\n");
pr_debug(" hblank: %d\n", rinfo->panel_info.hblank);
pr_debug(" hOver_plus: %d\n", rinfo->panel_info.hOver_plus);
pr_debug(" hSync_width: %d\n", rinfo->panel_info.hSync_width);
pr_debug(" vblank: %d\n", rinfo->panel_info.vblank);
pr_debug(" vOver_plus: %d\n", rinfo->panel_info.vOver_plus);
pr_debug(" vSync_width: %d\n", rinfo->panel_info.vSync_width);
pr_debug(" clock: %d\n", rinfo->panel_info.clock);
return 1;
}
}
pr_debug("Didn't find panel in BIOS table !\n");
return 0;
}
/* Try to extract the connector informations from the BIOS. This
* doesn't quite work yet, but it's output is still useful for
* debugging
*/
static void radeon_parse_connector_info(struct radeonfb_info *rinfo)
{
int offset, chips, connectors, tmp, i, conn, type;
static char* __conn_type_table[16] = {
"NONE", "Proprietary", "CRT", "DVI-I", "DVI-D", "Unknown", "Unknown",
"Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown",
"Unknown", "Unknown", "Unknown"
};
if (!rinfo->bios_seg)
return;
offset = BIOS_IN16(rinfo->fp_bios_start + 0x50);
if (offset == 0) {
printk(KERN_WARNING "radeonfb: No connector info table detected\n");
return;
}
/* Don't do much more at this point but displaying the data if
* DEBUG is enabled
*/
chips = BIOS_IN8(offset++) >> 4;
pr_debug("%d chips in connector info\n", chips);
for (i = 0; i < chips; i++) {
tmp = BIOS_IN8(offset++);
connectors = tmp & 0x0f;
pr_debug(" - chip %d has %d connectors\n", tmp >> 4, connectors);
for (conn = 0; ; conn++) {
tmp = BIOS_IN16(offset);
if (tmp == 0)
break;
offset += 2;
type = (tmp >> 12) & 0x0f;
pr_debug(" * connector %d of type %d (%s) : %04x\n",
conn, type, __conn_type_table[type], tmp);
}
}
}
/*
* Probe physical connection of a CRT. This code comes from XFree
* as well and currently is only implemented for the CRT DAC, the
* code for the TVDAC is commented out in XFree as "non working"
*/
static int radeon_crt_is_connected(struct radeonfb_info *rinfo, int is_crt_dac)
{
int connected = 0;
/* the monitor either wasn't connected or it is a non-DDC CRT.
* try to probe it
*/
if (is_crt_dac) {
unsigned long ulOrigVCLK_ECP_CNTL;
unsigned long ulOrigDAC_CNTL;
unsigned long ulOrigDAC_EXT_CNTL;
unsigned long ulOrigCRTC_EXT_CNTL;
unsigned long ulData;
unsigned long ulMask;
ulOrigVCLK_ECP_CNTL = INPLL(VCLK_ECP_CNTL);
ulData = ulOrigVCLK_ECP_CNTL;
ulData &= ~(PIXCLK_ALWAYS_ONb
| PIXCLK_DAC_ALWAYS_ONb);
ulMask = ~(PIXCLK_ALWAYS_ONb
| PIXCLK_DAC_ALWAYS_ONb);
OUTPLLP(VCLK_ECP_CNTL, ulData, ulMask);
ulOrigCRTC_EXT_CNTL = INREG(CRTC_EXT_CNTL);
ulData = ulOrigCRTC_EXT_CNTL;
ulData |= CRTC_CRT_ON;
OUTREG(CRTC_EXT_CNTL, ulData);
ulOrigDAC_EXT_CNTL = INREG(DAC_EXT_CNTL);
ulData = ulOrigDAC_EXT_CNTL;
ulData &= ~DAC_FORCE_DATA_MASK;
ulData |= (DAC_FORCE_BLANK_OFF_EN
|DAC_FORCE_DATA_EN
|DAC_FORCE_DATA_SEL_MASK);
if ((rinfo->family == CHIP_FAMILY_RV250) ||
(rinfo->family == CHIP_FAMILY_RV280))
ulData |= (0x01b6 << DAC_FORCE_DATA_SHIFT);
else
ulData |= (0x01ac << DAC_FORCE_DATA_SHIFT);
OUTREG(DAC_EXT_CNTL, ulData);
ulOrigDAC_CNTL = INREG(DAC_CNTL);
ulData = ulOrigDAC_CNTL;
ulData |= DAC_CMP_EN;
ulData &= ~(DAC_RANGE_CNTL_MASK
| DAC_PDWN);
ulData |= 0x2;
OUTREG(DAC_CNTL, ulData);
mdelay(1);
ulData = INREG(DAC_CNTL);
connected = (DAC_CMP_OUTPUT & ulData) ? 1 : 0;
ulData = ulOrigVCLK_ECP_CNTL;
ulMask = 0xFFFFFFFFL;
OUTPLLP(VCLK_ECP_CNTL, ulData, ulMask);
OUTREG(DAC_CNTL, ulOrigDAC_CNTL );
OUTREG(DAC_EXT_CNTL, ulOrigDAC_EXT_CNTL );
OUTREG(CRTC_EXT_CNTL, ulOrigCRTC_EXT_CNTL);
}
return connected ? MT_CRT : MT_NONE;
}
/*
* Parse the "monitor_layout" string if any. This code is mostly
* copied from XFree's radeon driver
*/
static int radeon_parse_monitor_layout(struct radeonfb_info *rinfo,
const char *monitor_layout)
{
char s1[5], s2[5];
int i = 0, second = 0;
const char *s;
if (!monitor_layout)
return 0;
s = monitor_layout;
do {
switch(*s) {
case ',':
s1[i] = '\0';
i = 0;
second = 1;
break;
case ' ':
case '\0':
break;
default:
if (i > 4)
break;
if (second)
s2[i] = *s;
else
s1[i] = *s;
i++;
}
if (i > 4)
i = 4;
} while (*s++);
if (second)
s2[i] = 0;
else {
s1[i] = 0;
s2[0] = 0;
}
if (strcmp(s1, "CRT") == 0)
rinfo->mon1_type = MT_CRT;
else if (strcmp(s1, "TMDS") == 0)
rinfo->mon1_type = MT_DFP;
else if (strcmp(s1, "LVDS") == 0)
rinfo->mon1_type = MT_LCD;
if (strcmp(s2, "CRT") == 0)
rinfo->mon2_type = MT_CRT;
else if (strcmp(s2, "TMDS") == 0)
rinfo->mon2_type = MT_DFP;
else if (strcmp(s2, "LVDS") == 0)
rinfo->mon2_type = MT_LCD;
return 1;
}
/*
* Probe display on both primary and secondary card's connector (if any)
* by various available techniques (i2c, OF device tree, BIOS, ...) and
* try to retrieve EDID. The algorithm here comes from XFree's radeon
* driver
*/
void radeon_probe_screens(struct radeonfb_info *rinfo,
const char *monitor_layout, int ignore_edid)
{
#ifdef CONFIG_FB_RADEON_I2C
int ddc_crt2_used = 0;
#endif
int tmp, i;
radeon_parse_connector_info(rinfo);
if (radeon_parse_monitor_layout(rinfo, monitor_layout)) {
/*
* If user specified a monitor_layout option, use it instead
* of auto-detecting. Maybe we should only use this argument
* on the first radeon card probed or provide a way to specify
* a layout for each card ?
*/
pr_debug("Using specified monitor layout: %s", monitor_layout);
#ifdef CONFIG_FB_RADEON_I2C
if (!ignore_edid) {
if (rinfo->mon1_type != MT_NONE)
if (!radeon_probe_i2c_connector(rinfo, ddc_dvi, &rinfo->mon1_EDID)) {
radeon_probe_i2c_connector(rinfo, ddc_crt2, &rinfo->mon1_EDID);
ddc_crt2_used = 1;
}
if (rinfo->mon2_type != MT_NONE)
if (!radeon_probe_i2c_connector(rinfo, ddc_vga, &rinfo->mon2_EDID) &&
!ddc_crt2_used)
radeon_probe_i2c_connector(rinfo, ddc_crt2, &rinfo->mon2_EDID);
}
#endif /* CONFIG_FB_RADEON_I2C */
if (rinfo->mon1_type == MT_NONE) {
if (rinfo->mon2_type != MT_NONE) {
rinfo->mon1_type = rinfo->mon2_type;
rinfo->mon1_EDID = rinfo->mon2_EDID;
} else {
rinfo->mon1_type = MT_CRT;
printk(KERN_INFO "radeonfb: No valid monitor, assuming CRT on first port\n");
}
rinfo->mon2_type = MT_NONE;
rinfo->mon2_EDID = NULL;
}
} else {
/*
* Auto-detecting display type (well... trying to ...)
*/
pr_debug("Starting monitor auto detection...\n");
#if defined(DEBUG) && defined(CONFIG_FB_RADEON_I2C)
{
u8 *EDIDs[4] = { NULL, NULL, NULL, NULL };
int mon_types[4] = {MT_NONE, MT_NONE, MT_NONE, MT_NONE};
int i;
for (i = 0; i < 4; i++)
mon_types[i] = radeon_probe_i2c_connector(rinfo,
i+1, &EDIDs[i]);
}
#endif /* DEBUG */
/*
* Old single head cards
*/
if (!rinfo->has_CRTC2) {
#if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC)
if (rinfo->mon1_type == MT_NONE)
rinfo->mon1_type = radeon_probe_OF_head(rinfo, 0,
&rinfo->mon1_EDID);
#endif /* CONFIG_PPC_OF || CONFIG_SPARC */
#ifdef CONFIG_FB_RADEON_I2C
if (rinfo->mon1_type == MT_NONE)
rinfo->mon1_type =
radeon_probe_i2c_connector(rinfo, ddc_dvi,
&rinfo->mon1_EDID);
if (rinfo->mon1_type == MT_NONE)
rinfo->mon1_type =
radeon_probe_i2c_connector(rinfo, ddc_vga,
&rinfo->mon1_EDID);
if (rinfo->mon1_type == MT_NONE)
rinfo->mon1_type =
radeon_probe_i2c_connector(rinfo, ddc_crt2,
&rinfo->mon1_EDID);
#endif /* CONFIG_FB_RADEON_I2C */
if (rinfo->mon1_type == MT_NONE)
rinfo->mon1_type = MT_CRT;
goto bail;
}
/*
* Check for cards with reversed DACs or TMDS controllers using BIOS
*/
if (rinfo->bios_seg &&
(tmp = BIOS_IN16(rinfo->fp_bios_start + 0x50))) {
for (i = 1; i < 4; i++) {
unsigned int tmp0;
if (!BIOS_IN8(tmp + i*2) && i > 1)
break;
tmp0 = BIOS_IN16(tmp + i*2);
if ((!(tmp0 & 0x01)) && (((tmp0 >> 8) & 0x0f) == ddc_dvi)) {
rinfo->reversed_DAC = 1;
printk(KERN_INFO "radeonfb: Reversed DACs detected\n");
}
if ((((tmp0 >> 8) & 0x0f) == ddc_dvi) && ((tmp0 >> 4) & 0x01)) {
rinfo->reversed_TMDS = 1;
printk(KERN_INFO "radeonfb: Reversed TMDS detected\n");
}
}
}
/*
* Probe primary head (DVI or laptop internal panel)
*/
#if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC)
if (rinfo->mon1_type == MT_NONE)
rinfo->mon1_type = radeon_probe_OF_head(rinfo, 0,
&rinfo->mon1_EDID);
#endif /* CONFIG_PPC_OF || CONFIG_SPARC */
#ifdef CONFIG_FB_RADEON_I2C
if (rinfo->mon1_type == MT_NONE)
rinfo->mon1_type = radeon_probe_i2c_connector(rinfo, ddc_dvi,
&rinfo->mon1_EDID);
if (rinfo->mon1_type == MT_NONE) {
rinfo->mon1_type = radeon_probe_i2c_connector(rinfo, ddc_crt2,
&rinfo->mon1_EDID);
if (rinfo->mon1_type != MT_NONE)
ddc_crt2_used = 1;
}
#endif /* CONFIG_FB_RADEON_I2C */
if (rinfo->mon1_type == MT_NONE && rinfo->is_mobility &&
((rinfo->bios_seg && (INREG(BIOS_4_SCRATCH) & 4))
|| (INREG(LVDS_GEN_CNTL) & LVDS_ON))) {
rinfo->mon1_type = MT_LCD;
printk("Non-DDC laptop panel detected\n");
}
if (rinfo->mon1_type == MT_NONE)
rinfo->mon1_type = radeon_crt_is_connected(rinfo, rinfo->reversed_DAC);
/*
* Probe secondary head (mostly VGA, can be DVI)
*/
#if defined(CONFIG_PPC_OF) || defined(CONFIG_SPARC)
if (rinfo->mon2_type == MT_NONE)
rinfo->mon2_type = radeon_probe_OF_head(rinfo, 1,
&rinfo->mon2_EDID);
#endif /* CONFIG_PPC_OF || defined(CONFIG_SPARC) */
#ifdef CONFIG_FB_RADEON_I2C
if (rinfo->mon2_type == MT_NONE)
rinfo->mon2_type = radeon_probe_i2c_connector(rinfo, ddc_vga,
&rinfo->mon2_EDID);
if (rinfo->mon2_type == MT_NONE && !ddc_crt2_used)
rinfo->mon2_type = radeon_probe_i2c_connector(rinfo, ddc_crt2,
&rinfo->mon2_EDID);
#endif /* CONFIG_FB_RADEON_I2C */
if (rinfo->mon2_type == MT_NONE)
rinfo->mon2_type = radeon_crt_is_connected(rinfo, !rinfo->reversed_DAC);
/*
* If we only detected port 2, we swap them, if none detected,
* assume CRT (maybe fallback to old BIOS_SCRATCH stuff ? or look
* at FP registers ?)
*/
if (rinfo->mon1_type == MT_NONE) {
if (rinfo->mon2_type != MT_NONE) {
rinfo->mon1_type = rinfo->mon2_type;
rinfo->mon1_EDID = rinfo->mon2_EDID;
} else
rinfo->mon1_type = MT_CRT;
rinfo->mon2_type = MT_NONE;
rinfo->mon2_EDID = NULL;
}
/*
* Deal with reversed TMDS
*/
if (rinfo->reversed_TMDS) {
/* Always keep internal TMDS as primary head */
if (rinfo->mon1_type == MT_DFP || rinfo->mon2_type == MT_DFP) {
int tmp_type = rinfo->mon1_type;
u8 *tmp_EDID = rinfo->mon1_EDID;
rinfo->mon1_type = rinfo->mon2_type;
rinfo->mon1_EDID = rinfo->mon2_EDID;
rinfo->mon2_type = tmp_type;
rinfo->mon2_EDID = tmp_EDID;
if (rinfo->mon1_type == MT_CRT || rinfo->mon2_type == MT_CRT)
rinfo->reversed_DAC ^= 1;
}
}
}
if (ignore_edid) {
kfree(rinfo->mon1_EDID);
rinfo->mon1_EDID = NULL;
kfree(rinfo->mon2_EDID);
rinfo->mon2_EDID = NULL;
}
bail:
printk(KERN_INFO "radeonfb: Monitor 1 type %s found\n",
radeon_get_mon_name(rinfo->mon1_type));
if (rinfo->mon1_EDID)
printk(KERN_INFO "radeonfb: EDID probed\n");
if (!rinfo->has_CRTC2)
return;
printk(KERN_INFO "radeonfb: Monitor 2 type %s found\n",
radeon_get_mon_name(rinfo->mon2_type));
if (rinfo->mon2_EDID)
printk(KERN_INFO "radeonfb: EDID probed\n");
}
/*
* This functions applyes any arch/model/machine specific fixups
* to the panel info. It may eventually alter EDID block as
* well or whatever is specific to a given model and not probed
* properly by the default code
*/
static void radeon_fixup_panel_info(struct radeonfb_info *rinfo)
{
#ifdef CONFIG_PPC_OF
/*
* LCD Flat panels should use fixed dividers, we enfore that on
* PPC only for now...
*/
if (!rinfo->panel_info.use_bios_dividers && rinfo->mon1_type == MT_LCD
&& rinfo->is_mobility) {
int ppll_div_sel;
u32 ppll_divn;
ppll_div_sel = INREG8(CLOCK_CNTL_INDEX + 1) & 0x3;
radeon_pll_errata_after_index(rinfo);
ppll_divn = INPLL(PPLL_DIV_0 + ppll_div_sel);
rinfo->panel_info.ref_divider = rinfo->pll.ref_div;
rinfo->panel_info.fbk_divider = ppll_divn & 0x7ff;
rinfo->panel_info.post_divider = (ppll_divn >> 16) & 0x7;
rinfo->panel_info.use_bios_dividers = 1;
printk(KERN_DEBUG "radeonfb: Using Firmware dividers 0x%08x "
"from PPLL %d\n",
rinfo->panel_info.fbk_divider |
(rinfo->panel_info.post_divider << 16),
ppll_div_sel);
}
#endif /* CONFIG_PPC_OF */
}
/*
* Fill up panel infos from a mode definition, either returned by the EDID
* or from the default mode when we can't do any better
*/
static void radeon_var_to_panel_info(struct radeonfb_info *rinfo, struct fb_var_screeninfo *var)
{
rinfo->panel_info.xres = var->xres;
rinfo->panel_info.yres = var->yres;
rinfo->panel_info.clock = 100000000 / var->pixclock;
rinfo->panel_info.hOver_plus = var->right_margin;
rinfo->panel_info.hSync_width = var->hsync_len;
rinfo->panel_info.hblank = var->left_margin +
(var->right_margin + var->hsync_len);
rinfo->panel_info.vOver_plus = var->lower_margin;
rinfo->panel_info.vSync_width = var->vsync_len;
rinfo->panel_info.vblank = var->upper_margin +
(var->lower_margin + var->vsync_len);
rinfo->panel_info.hAct_high =
(var->sync & FB_SYNC_HOR_HIGH_ACT) != 0;
rinfo->panel_info.vAct_high =
(var->sync & FB_SYNC_VERT_HIGH_ACT) != 0;
rinfo->panel_info.valid = 1;
/* We use a default of 200ms for the panel power delay,
* I need to have a real schedule() instead of mdelay's in the panel code.
* we might be possible to figure out a better power delay either from
* MacOS OF tree or from the EDID block (proprietary extensions ?)
*/
rinfo->panel_info.pwr_delay = 200;
}
static void radeon_videomode_to_var(struct fb_var_screeninfo *var,
const struct fb_videomode *mode)
{
var->xres = mode->xres;
var->yres = mode->yres;
var->xres_virtual = mode->xres;
var->yres_virtual = mode->yres;
var->xoffset = 0;
var->yoffset = 0;
var->pixclock = mode->pixclock;
var->left_margin = mode->left_margin;
var->right_margin = mode->right_margin;
var->upper_margin = mode->upper_margin;
var->lower_margin = mode->lower_margin;
var->hsync_len = mode->hsync_len;
var->vsync_len = mode->vsync_len;
var->sync = mode->sync;
var->vmode = mode->vmode;
}
#ifdef CONFIG_PPC_PSERIES
static int is_powerblade(const char *model)
{
struct device_node *root;
const char* cp;
int len, l, rc = 0;
root = of_find_node_by_path("/");
if (root && model) {
l = strlen(model);
cp = of_get_property(root, "model", &len);
if (cp)
rc = memcmp(model, cp, min(len, l)) == 0;
of_node_put(root);
}
return rc;
}
#endif
/*
* Build the modedb for head 1 (head 2 will come later), check panel infos
* from either BIOS or EDID, and pick up the default mode
*/
void radeon_check_modes(struct radeonfb_info *rinfo, const char *mode_option)
{
struct fb_info * info = rinfo->info;
int has_default_mode = 0;
/*
* Fill default var first
*/
info->var = radeonfb_default_var;
INIT_LIST_HEAD(&info->modelist);
/*
* First check out what BIOS has to say
*/
if (rinfo->mon1_type == MT_LCD)
radeon_get_panel_info_BIOS(rinfo);
/*
* Parse EDID detailed timings and deduce panel infos if any. Right now
* we only deal with first entry returned by parse_EDID, we may do better
* some day...
*/
if (!rinfo->panel_info.use_bios_dividers && rinfo->mon1_type != MT_CRT
&& rinfo->mon1_EDID) {
struct fb_var_screeninfo var;
pr_debug("Parsing EDID data for panel info\n");
if (fb_parse_edid(rinfo->mon1_EDID, &var) == 0) {
if (var.xres >= rinfo->panel_info.xres &&
var.yres >= rinfo->panel_info.yres)
radeon_var_to_panel_info(rinfo, &var);
}
}
/*
* Do any additional platform/arch fixups to the panel infos
*/
radeon_fixup_panel_info(rinfo);
/*
* If we have some valid panel infos, we setup the default mode based on
* those
*/
if (rinfo->mon1_type != MT_CRT && rinfo->panel_info.valid) {
struct fb_var_screeninfo *var = &info->var;
pr_debug("Setting up default mode based on panel info\n");
var->xres = rinfo->panel_info.xres;
var->yres = rinfo->panel_info.yres;
var->xres_virtual = rinfo->panel_info.xres;
var->yres_virtual = rinfo->panel_info.yres;
var->xoffset = var->yoffset = 0;
var->bits_per_pixel = 8;
var->pixclock = 100000000 / rinfo->panel_info.clock;
var->left_margin = (rinfo->panel_info.hblank - rinfo->panel_info.hOver_plus
- rinfo->panel_info.hSync_width);
var->right_margin = rinfo->panel_info.hOver_plus;
var->upper_margin = (rinfo->panel_info.vblank - rinfo->panel_info.vOver_plus
- rinfo->panel_info.vSync_width);
var->lower_margin = rinfo->panel_info.vOver_plus;
var->hsync_len = rinfo->panel_info.hSync_width;
var->vsync_len = rinfo->panel_info.vSync_width;
var->sync = 0;
if (rinfo->panel_info.hAct_high)
var->sync |= FB_SYNC_HOR_HIGH_ACT;
if (rinfo->panel_info.vAct_high)
var->sync |= FB_SYNC_VERT_HIGH_ACT;
var->vmode = 0;
has_default_mode = 1;
}
/*
* Now build modedb from EDID
*/
if (rinfo->mon1_EDID) {
fb_edid_to_monspecs(rinfo->mon1_EDID, &info->monspecs);
fb_videomode_to_modelist(info->monspecs.modedb,
info->monspecs.modedb_len,
&info->modelist);
rinfo->mon1_modedb = info->monspecs.modedb;
rinfo->mon1_dbsize = info->monspecs.modedb_len;
}
/*
* Finally, if we don't have panel infos we need to figure some (or
* we try to read it from card), we try to pick a default mode
* and create some panel infos. Whatever...
*/
if (rinfo->mon1_type != MT_CRT && !rinfo->panel_info.valid) {
struct fb_videomode *modedb;
int dbsize;
char modename[32];
pr_debug("Guessing panel info...\n");
if (rinfo->panel_info.xres == 0 || rinfo->panel_info.yres == 0) {
u32 tmp = INREG(FP_HORZ_STRETCH) & HORZ_PANEL_SIZE;
rinfo->panel_info.xres = ((tmp >> HORZ_PANEL_SHIFT) + 1) * 8;
tmp = INREG(FP_VERT_STRETCH) & VERT_PANEL_SIZE;
rinfo->panel_info.yres = (tmp >> VERT_PANEL_SHIFT) + 1;
}
if (rinfo->panel_info.xres == 0 || rinfo->panel_info.yres == 0) {
printk(KERN_WARNING "radeonfb: Can't find panel size, going back to CRT\n");
rinfo->mon1_type = MT_CRT;
goto pickup_default;
}
printk(KERN_WARNING "radeonfb: Assuming panel size %dx%d\n",
rinfo->panel_info.xres, rinfo->panel_info.yres);
modedb = rinfo->mon1_modedb;
dbsize = rinfo->mon1_dbsize;
snprintf(modename, 31, "%dx%d", rinfo->panel_info.xres, rinfo->panel_info.yres);
if (fb_find_mode(&info->var, info, modename,
modedb, dbsize, NULL, 8) == 0) {
printk(KERN_WARNING "radeonfb: Can't find mode for panel size, going back to CRT\n");
rinfo->mon1_type = MT_CRT;
goto pickup_default;
}
has_default_mode = 1;
radeon_var_to_panel_info(rinfo, &info->var);
}
pickup_default:
/*
* Apply passed-in mode option if any
*/
if (mode_option) {
if (fb_find_mode(&info->var, info, mode_option,
info->monspecs.modedb,
info->monspecs.modedb_len, NULL, 8) != 0)
has_default_mode = 1;
}
#ifdef CONFIG_PPC_PSERIES
if (!has_default_mode && (
is_powerblade("IBM,8842") || /* JS20 */
is_powerblade("IBM,8844") || /* JS21 */
is_powerblade("IBM,7998") || /* JS12/JS21/JS22 */
is_powerblade("IBM,0792") || /* QS21 */
is_powerblade("IBM,0793") /* QS22 */
)) {
printk("Falling back to 800x600 on JSxx hardware\n");
if (fb_find_mode(&info->var, info, "800x600@60",
info->monspecs.modedb,
info->monspecs.modedb_len, NULL, 8) != 0)
has_default_mode = 1;
}
#endif
/*
* Still no mode, let's pick up a default from the db
*/
if (!has_default_mode && info->monspecs.modedb != NULL) {
struct fb_monspecs *specs = &info->monspecs;
struct fb_videomode *modedb = NULL;
/* get preferred timing */
if (specs->misc & FB_MISC_1ST_DETAIL) {
int i;
for (i = 0; i < specs->modedb_len; i++) {
if (specs->modedb[i].flag & FB_MODE_IS_FIRST) {
modedb = &specs->modedb[i];
break;
}
}
} else {
/* otherwise, get first mode in database */
modedb = &specs->modedb[0];
}
if (modedb != NULL) {
info->var.bits_per_pixel = 8;
radeon_videomode_to_var(&info->var, modedb);
has_default_mode = 1;
}
}
if (1) {
struct fb_videomode mode;
/* Make sure that whatever mode got selected is actually in the
* modelist or the kernel may die
*/
fb_var_to_videomode(&mode, &info->var);
fb_add_videomode(&mode, &info->modelist);
}
}
/*
* The code below is used to pick up a mode in check_var and
* set_var. It should be made generic
*/
/*
* This is used when looking for modes. We assign a "distance" value
* to a mode in the modedb depending how "close" it is from what we
* are looking for.
* Currently, we don't compare that much, we could do better but
* the current fbcon doesn't quite mind ;)
*/
static int radeon_compare_modes(const struct fb_var_screeninfo *var,
const struct fb_videomode *mode)
{
int distance = 0;
distance = mode->yres - var->yres;
distance += (mode->xres - var->xres)/2;
return distance;
}
/*
* This function is called by check_var, it gets the passed in mode parameter, and
* outputs a valid mode matching the passed-in one as closely as possible.
* We need something better ultimately. Things like fbcon basically pass us out
* current mode with xres/yres hacked, while things like XFree will actually
* produce a full timing that we should respect as much as possible.
*
* This is why I added the FB_ACTIVATE_FIND that is used by fbcon. Without this,
* we do a simple spec match, that's all. With it, we actually look for a mode in
* either our monitor modedb or the vesa one if none
*
*/
int radeon_match_mode(struct radeonfb_info *rinfo,
struct fb_var_screeninfo *dest,
const struct fb_var_screeninfo *src)
{
const struct fb_videomode *db = vesa_modes;
int i, dbsize = 34;
int has_rmx, native_db = 0;
int distance = INT_MAX;
const struct fb_videomode *candidate = NULL;
/* Start with a copy of the requested mode */
memcpy(dest, src, sizeof(struct fb_var_screeninfo));
/* Check if we have a modedb built from EDID */
if (rinfo->mon1_modedb) {
db = rinfo->mon1_modedb;
dbsize = rinfo->mon1_dbsize;
native_db = 1;
}
/* Check if we have a scaler allowing any fancy mode */
has_rmx = rinfo->mon1_type == MT_LCD || rinfo->mon1_type == MT_DFP;
/* If we have a scaler and are passed FB_ACTIVATE_TEST or
* FB_ACTIVATE_NOW, just do basic checking and return if the
* mode match
*/
if ((src->activate & FB_ACTIVATE_MASK) == FB_ACTIVATE_TEST ||
(src->activate & FB_ACTIVATE_MASK) == FB_ACTIVATE_NOW) {
/* We don't have an RMX, validate timings. If we don't have
* monspecs, we should be paranoid and not let use go above
* 640x480-60, but I assume userland knows what it's doing here
* (though I may be proven wrong...)
*/
if (has_rmx == 0 && rinfo->mon1_modedb)
if (fb_validate_mode((struct fb_var_screeninfo *)src, rinfo->info))
return -EINVAL;
return 0;
}
/* Now look for a mode in the database */
while (db) {
for (i = 0; i < dbsize; i++) {
int d;
if (db[i].yres < src->yres)
continue;
if (db[i].xres < src->xres)
continue;
d = radeon_compare_modes(src, &db[i]);
/* If the new mode is at least as good as the previous one,
* then it's our new candidate
*/
if (d < distance) {
candidate = &db[i];
distance = d;
}
}
db = NULL;
/* If we have a scaler, we allow any mode from the database */
if (native_db && has_rmx) {
db = vesa_modes;
dbsize = 34;
native_db = 0;
}
}
/* If we have found a match, return it */
if (candidate != NULL) {
radeon_videomode_to_var(dest, candidate);
return 0;
}
/* If we haven't and don't have a scaler, fail */
if (!has_rmx)
return -EINVAL;
return 0;
}
| gpl-2.0 |
hagar006/android_kernel_sony_apq8064 | drivers/scsi/aha1740.c | 4479 | 19656 | /* $Id$
* 1993/03/31
* linux/kernel/aha1740.c
*
* Based loosely on aha1542.c which is
* Copyright (C) 1992 Tommy Thorn and
* Modified by Eric Youngdale
*
* This file is aha1740.c, written and
* Copyright (C) 1992,1993 Brad McLean
* brad@saturn.gaylord.com or brad@bradpc.gaylord.com.
*
* Modifications to makecode and queuecommand
* for proper handling of multiple devices courteously
* provided by Michael Weller, March, 1993
*
* Multiple adapter support, extended translation detection,
* update to current scsi subsystem changes, proc fs support,
* working (!) module support based on patches from Andreas Arens,
* by Andreas Degert <ad@papyrus.hamburg.com>, 2/1997
*
* aha1740_makecode may still need even more work
* if it doesn't work for your devices, take a look.
*
* Reworked for new_eh and new locking by Alan Cox <alan@lxorguk.ukuu.org.uk>
*
* Converted to EISA and generic DMA APIs by Marc Zyngier
* <maz@wild-wind.fr.eu.org>, 4/2003.
*
* Shared interrupt support added by Rask Ingemann Lambertsen
* <rask@sygehus.dk>, 10/2003
*
* For the avoidance of doubt the "preferred form" of this code is one which
* is in an open non patent encumbered format. Where cryptographic key signing
* forms part of the process of creating an executable the information
* including keys needed to generate an equivalently functional executable
* are deemed to be part of the source code.
*/
#include <linux/blkdev.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/ioport.h>
#include <linux/proc_fs.h>
#include <linux/stat.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/eisa.h>
#include <linux/dma-mapping.h>
#include <linux/gfp.h>
#include <asm/dma.h>
#include <asm/io.h>
#include "scsi.h"
#include <scsi/scsi_host.h>
#include "aha1740.h"
/* IF YOU ARE HAVING PROBLEMS WITH THIS DRIVER, AND WANT TO WATCH
IT WORK, THEN:
#define DEBUG
*/
#ifdef DEBUG
#define DEB(x) x
#else
#define DEB(x)
#endif
struct aha1740_hostdata {
struct eisa_device *edev;
unsigned int translation;
unsigned int last_ecb_used;
dma_addr_t ecb_dma_addr;
struct ecb ecb[AHA1740_ECBS];
};
struct aha1740_sg {
struct aha1740_chain sg_chain[AHA1740_SCATTER];
dma_addr_t sg_dma_addr;
dma_addr_t buf_dma_addr;
};
#define HOSTDATA(host) ((struct aha1740_hostdata *) &host->hostdata)
static inline struct ecb *ecb_dma_to_cpu (struct Scsi_Host *host,
dma_addr_t dma)
{
struct aha1740_hostdata *hdata = HOSTDATA (host);
dma_addr_t offset;
offset = dma - hdata->ecb_dma_addr;
return (struct ecb *)(((char *) hdata->ecb) + (unsigned int) offset);
}
static inline dma_addr_t ecb_cpu_to_dma (struct Scsi_Host *host, void *cpu)
{
struct aha1740_hostdata *hdata = HOSTDATA (host);
dma_addr_t offset;
offset = (char *) cpu - (char *) hdata->ecb;
return hdata->ecb_dma_addr + offset;
}
static int aha1740_proc_info(struct Scsi_Host *shpnt, char *buffer,
char **start, off_t offset,
int length, int inout)
{
int len;
struct aha1740_hostdata *host;
if (inout)
return-ENOSYS;
host = HOSTDATA(shpnt);
len = sprintf(buffer, "aha174x at IO:%lx, IRQ %d, SLOT %d.\n"
"Extended translation %sabled.\n",
shpnt->io_port, shpnt->irq, host->edev->slot,
host->translation ? "en" : "dis");
if (offset > len) {
*start = buffer;
return 0;
}
*start = buffer + offset;
len -= offset;
if (len > length)
len = length;
return len;
}
static int aha1740_makecode(unchar *sense, unchar *status)
{
struct statusword
{
ushort don:1, /* Command Done - No Error */
du:1, /* Data underrun */
:1, qf:1, /* Queue full */
sc:1, /* Specification Check */
dor:1, /* Data overrun */
ch:1, /* Chaining Halted */
intr:1, /* Interrupt issued */
asa:1, /* Additional Status Available */
sns:1, /* Sense information Stored */
:1, ini:1, /* Initialization Required */
me:1, /* Major error or exception */
:1, eca:1, /* Extended Contingent alliance */
:1;
} status_word;
int retval = DID_OK;
status_word = * (struct statusword *) status;
#ifdef DEBUG
printk("makecode from %x,%x,%x,%x %x,%x,%x,%x",
status[0], status[1], status[2], status[3],
sense[0], sense[1], sense[2], sense[3]);
#endif
if (!status_word.don) { /* Anything abnormal was detected */
if ( (status[1]&0x18) || status_word.sc ) {
/*Additional info available*/
/* Use the supplied info for further diagnostics */
switch ( status[2] ) {
case 0x12:
if ( status_word.dor )
retval=DID_ERROR; /* It's an Overrun */
/* If not overrun, assume underrun and
* ignore it! */
case 0x00: /* No info, assume no error, should
* not occur */
break;
case 0x11:
case 0x21:
retval=DID_TIME_OUT;
break;
case 0x0a:
retval=DID_BAD_TARGET;
break;
case 0x04:
case 0x05:
retval=DID_ABORT;
/* Either by this driver or the
* AHA1740 itself */
break;
default:
retval=DID_ERROR; /* No further
* diagnostics
* possible */
}
} else {
/* Michael suggests, and Brad concurs: */
if ( status_word.qf ) {
retval = DID_TIME_OUT; /* forces a redo */
/* I think this specific one should
* not happen -Brad */
printk("aha1740.c: WARNING: AHA1740 queue overflow!\n");
} else
if ( status[0]&0x60 ) {
/* Didn't find a better error */
retval = DID_ERROR;
}
/* In any other case return DID_OK so for example
CONDITION_CHECKS make it through to the appropriate
device driver */
}
}
/* Under all circumstances supply the target status -Michael */
return status[3] | retval << 16;
}
static int aha1740_test_port(unsigned int base)
{
if ( inb(PORTADR(base)) & PORTADDR_ENH )
return 1; /* Okay, we're all set */
printk("aha174x: Board detected, but not in enhanced mode, so disabled it.\n");
return 0;
}
/* A "high" level interrupt handler */
static irqreturn_t aha1740_intr_handle(int irq, void *dev_id)
{
struct Scsi_Host *host = (struct Scsi_Host *) dev_id;
void (*my_done)(Scsi_Cmnd *);
int errstatus, adapstat;
int number_serviced;
struct ecb *ecbptr;
Scsi_Cmnd *SCtmp;
unsigned int base;
unsigned long flags;
int handled = 0;
struct aha1740_sg *sgptr;
struct eisa_device *edev;
if (!host)
panic("aha1740.c: Irq from unknown host!\n");
spin_lock_irqsave(host->host_lock, flags);
base = host->io_port;
number_serviced = 0;
edev = HOSTDATA(host)->edev;
while(inb(G2STAT(base)) & G2STAT_INTPEND) {
handled = 1;
DEB(printk("aha1740_intr top of loop.\n"));
adapstat = inb(G2INTST(base));
ecbptr = ecb_dma_to_cpu (host, inl(MBOXIN0(base)));
outb(G2CNTRL_IRST,G2CNTRL(base)); /* interrupt reset */
switch ( adapstat & G2INTST_MASK ) {
case G2INTST_CCBRETRY:
case G2INTST_CCBERROR:
case G2INTST_CCBGOOD:
/* Host Ready -> Mailbox in complete */
outb(G2CNTRL_HRDY,G2CNTRL(base));
if (!ecbptr) {
printk("Aha1740 null ecbptr in interrupt (%x,%x,%x,%d)\n",
inb(G2STAT(base)),adapstat,
inb(G2INTST(base)), number_serviced++);
continue;
}
SCtmp = ecbptr->SCpnt;
if (!SCtmp) {
printk("Aha1740 null SCtmp in interrupt (%x,%x,%x,%d)\n",
inb(G2STAT(base)),adapstat,
inb(G2INTST(base)), number_serviced++);
continue;
}
sgptr = (struct aha1740_sg *) SCtmp->host_scribble;
scsi_dma_unmap(SCtmp);
/* Free the sg block */
dma_free_coherent (&edev->dev,
sizeof (struct aha1740_sg),
SCtmp->host_scribble,
sgptr->sg_dma_addr);
/* Fetch the sense data, and tuck it away, in
the required slot. The Adaptec
automatically fetches it, and there is no
guarantee that we will still have it in the
cdb when we come back */
if ( (adapstat & G2INTST_MASK) == G2INTST_CCBERROR ) {
memcpy(SCtmp->sense_buffer, ecbptr->sense,
SCSI_SENSE_BUFFERSIZE);
errstatus = aha1740_makecode(ecbptr->sense,ecbptr->status);
} else
errstatus = 0;
DEB(if (errstatus)
printk("aha1740_intr_handle: returning %6x\n",
errstatus));
SCtmp->result = errstatus;
my_done = ecbptr->done;
memset(ecbptr,0,sizeof(struct ecb));
if ( my_done )
my_done(SCtmp);
break;
case G2INTST_HARDFAIL:
printk(KERN_ALERT "aha1740 hardware failure!\n");
panic("aha1740.c"); /* Goodbye */
case G2INTST_ASNEVENT:
printk("aha1740 asynchronous event: %02x %02x %02x %02x %02x\n",
adapstat,
inb(MBOXIN0(base)),
inb(MBOXIN1(base)),
inb(MBOXIN2(base)),
inb(MBOXIN3(base))); /* Say What? */
/* Host Ready -> Mailbox in complete */
outb(G2CNTRL_HRDY,G2CNTRL(base));
break;
case G2INTST_CMDGOOD:
/* set immediate command success flag here: */
break;
case G2INTST_CMDERROR:
/* Set immediate command failure flag here: */
break;
}
number_serviced++;
}
spin_unlock_irqrestore(host->host_lock, flags);
return IRQ_RETVAL(handled);
}
static int aha1740_queuecommand_lck(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
{
unchar direction;
unchar *cmd = (unchar *) SCpnt->cmnd;
unchar target = scmd_id(SCpnt);
struct aha1740_hostdata *host = HOSTDATA(SCpnt->device->host);
unsigned long flags;
dma_addr_t sg_dma;
struct aha1740_sg *sgptr;
int ecbno, nseg;
DEB(int i);
if(*cmd == REQUEST_SENSE) {
SCpnt->result = 0;
done(SCpnt);
return 0;
}
#ifdef DEBUG
if (*cmd == READ_10 || *cmd == WRITE_10)
i = xscsi2int(cmd+2);
else if (*cmd == READ_6 || *cmd == WRITE_6)
i = scsi2int(cmd+2);
else
i = -1;
printk("aha1740_queuecommand: dev %d cmd %02x pos %d len %d ",
target, *cmd, i, bufflen);
printk("scsi cmd:");
for (i = 0; i < SCpnt->cmd_len; i++) printk("%02x ", cmd[i]);
printk("\n");
#endif
/* locate an available ecb */
spin_lock_irqsave(SCpnt->device->host->host_lock, flags);
ecbno = host->last_ecb_used + 1; /* An optimization */
if (ecbno >= AHA1740_ECBS)
ecbno = 0;
do {
if (!host->ecb[ecbno].cmdw)
break;
ecbno++;
if (ecbno >= AHA1740_ECBS)
ecbno = 0;
} while (ecbno != host->last_ecb_used);
if (host->ecb[ecbno].cmdw)
panic("Unable to find empty ecb for aha1740.\n");
host->ecb[ecbno].cmdw = AHA1740CMD_INIT; /* SCSI Initiator Command
doubles as reserved flag */
host->last_ecb_used = ecbno;
spin_unlock_irqrestore(SCpnt->device->host->host_lock, flags);
#ifdef DEBUG
printk("Sending command (%d %x)...", ecbno, done);
#endif
host->ecb[ecbno].cdblen = SCpnt->cmd_len; /* SCSI Command
* Descriptor Block
* Length */
direction = 0;
if (*cmd == READ_10 || *cmd == READ_6)
direction = 1;
else if (*cmd == WRITE_10 || *cmd == WRITE_6)
direction = 0;
memcpy(host->ecb[ecbno].cdb, cmd, SCpnt->cmd_len);
SCpnt->host_scribble = dma_alloc_coherent (&host->edev->dev,
sizeof (struct aha1740_sg),
&sg_dma, GFP_ATOMIC);
if(SCpnt->host_scribble == NULL) {
printk(KERN_WARNING "aha1740: out of memory in queuecommand!\n");
return 1;
}
sgptr = (struct aha1740_sg *) SCpnt->host_scribble;
sgptr->sg_dma_addr = sg_dma;
nseg = scsi_dma_map(SCpnt);
BUG_ON(nseg < 0);
if (nseg) {
struct scatterlist *sg;
struct aha1740_chain * cptr;
int i;
DEB(unsigned char * ptr);
host->ecb[ecbno].sg = 1; /* SCSI Initiator Command
* w/scatter-gather*/
cptr = sgptr->sg_chain;
scsi_for_each_sg(SCpnt, sg, nseg, i) {
cptr[i].datalen = sg_dma_len (sg);
cptr[i].dataptr = sg_dma_address (sg);
}
host->ecb[ecbno].datalen = nseg * sizeof(struct aha1740_chain);
host->ecb[ecbno].dataptr = sg_dma;
#ifdef DEBUG
printk("cptr %x: ",cptr);
ptr = (unsigned char *) cptr;
for(i=0;i<24;i++) printk("%02x ", ptr[i]);
#endif
} else {
host->ecb[ecbno].datalen = 0;
host->ecb[ecbno].dataptr = 0;
}
host->ecb[ecbno].lun = SCpnt->device->lun;
host->ecb[ecbno].ses = 1; /* Suppress underrun errors */
host->ecb[ecbno].dir = direction;
host->ecb[ecbno].ars = 1; /* Yes, get the sense on an error */
host->ecb[ecbno].senselen = 12;
host->ecb[ecbno].senseptr = ecb_cpu_to_dma (SCpnt->device->host,
host->ecb[ecbno].sense);
host->ecb[ecbno].statusptr = ecb_cpu_to_dma (SCpnt->device->host,
host->ecb[ecbno].status);
host->ecb[ecbno].done = done;
host->ecb[ecbno].SCpnt = SCpnt;
#ifdef DEBUG
{
int i;
printk("aha1740_command: sending.. ");
for (i = 0; i < sizeof(host->ecb[ecbno]) - 10; i++)
printk("%02x ", ((unchar *)&host->ecb[ecbno])[i]);
}
printk("\n");
#endif
if (done) {
/* The Adaptec Spec says the card is so fast that the loops
will only be executed once in the code below. Even if this
was true with the fastest processors when the spec was
written, it doesn't seem to be true with today's fast
processors. We print a warning if the code is executed more
often than LOOPCNT_WARN. If this happens, it should be
investigated. If the count reaches LOOPCNT_MAX, we assume
something is broken; since there is no way to return an
error (the return value is ignored by the mid-level scsi
layer) we have to panic (and maybe that's the best thing we
can do then anyhow). */
#define LOOPCNT_WARN 10 /* excessive mbxout wait -> syslog-msg */
#define LOOPCNT_MAX 1000000 /* mbxout deadlock -> panic() after ~ 2 sec. */
int loopcnt;
unsigned int base = SCpnt->device->host->io_port;
DEB(printk("aha1740[%d] critical section\n",ecbno));
spin_lock_irqsave(SCpnt->device->host->host_lock, flags);
for (loopcnt = 0; ; loopcnt++) {
if (inb(G2STAT(base)) & G2STAT_MBXOUT) break;
if (loopcnt == LOOPCNT_WARN) {
printk("aha1740[%d]_mbxout wait!\n",ecbno);
}
if (loopcnt == LOOPCNT_MAX)
panic("aha1740.c: mbxout busy!\n");
}
outl (ecb_cpu_to_dma (SCpnt->device->host, host->ecb + ecbno),
MBOXOUT0(base));
for (loopcnt = 0; ; loopcnt++) {
if (! (inb(G2STAT(base)) & G2STAT_BUSY)) break;
if (loopcnt == LOOPCNT_WARN) {
printk("aha1740[%d]_attn wait!\n",ecbno);
}
if (loopcnt == LOOPCNT_MAX)
panic("aha1740.c: attn wait failed!\n");
}
outb(ATTN_START | (target & 7), ATTN(base)); /* Start it up */
spin_unlock_irqrestore(SCpnt->device->host->host_lock, flags);
DEB(printk("aha1740[%d] request queued.\n",ecbno));
} else
printk(KERN_ALERT "aha1740_queuecommand: done can't be NULL\n");
return 0;
}
static DEF_SCSI_QCMD(aha1740_queuecommand)
/* Query the board for its irq_level and irq_type. Nothing else matters
in enhanced mode on an EISA bus. */
static void aha1740_getconfig(unsigned int base, unsigned int *irq_level,
unsigned int *irq_type,
unsigned int *translation)
{
static int intab[] = { 9, 10, 11, 12, 0, 14, 15, 0 };
*irq_level = intab[inb(INTDEF(base)) & 0x7];
*irq_type = (inb(INTDEF(base)) & 0x8) >> 3;
*translation = inb(RESV1(base)) & 0x1;
outb(inb(INTDEF(base)) | 0x10, INTDEF(base));
}
static int aha1740_biosparam(struct scsi_device *sdev,
struct block_device *dev,
sector_t capacity, int* ip)
{
int size = capacity;
int extended = HOSTDATA(sdev->host)->translation;
DEB(printk("aha1740_biosparam\n"));
if (extended && (ip[2] > 1024)) {
ip[0] = 255;
ip[1] = 63;
ip[2] = size / (255 * 63);
} else {
ip[0] = 64;
ip[1] = 32;
ip[2] = size >> 11;
}
return 0;
}
static int aha1740_eh_abort_handler (Scsi_Cmnd *dummy)
{
/*
* From Alan Cox :
* The AHA1740 has firmware handled abort/reset handling. The "head in
* sand" kernel code is correct for once 8)
*
* So we define a dummy handler just to keep the kernel SCSI code as
* quiet as possible...
*/
return 0;
}
static struct scsi_host_template aha1740_template = {
.module = THIS_MODULE,
.proc_name = "aha1740",
.proc_info = aha1740_proc_info,
.name = "Adaptec 174x (EISA)",
.queuecommand = aha1740_queuecommand,
.bios_param = aha1740_biosparam,
.can_queue = AHA1740_ECBS,
.this_id = 7,
.sg_tablesize = AHA1740_SCATTER,
.cmd_per_lun = AHA1740_CMDLUN,
.use_clustering = ENABLE_CLUSTERING,
.eh_abort_handler = aha1740_eh_abort_handler,
};
static int aha1740_probe (struct device *dev)
{
int slotbase, rc;
unsigned int irq_level, irq_type, translation;
struct Scsi_Host *shpnt;
struct aha1740_hostdata *host;
struct eisa_device *edev = to_eisa_device (dev);
DEB(printk("aha1740_probe: \n"));
slotbase = edev->base_addr + EISA_VENDOR_ID_OFFSET;
if (!request_region(slotbase, SLOTSIZE, "aha1740")) /* See if in use */
return -EBUSY;
if (!aha1740_test_port(slotbase))
goto err_release_region;
aha1740_getconfig(slotbase,&irq_level,&irq_type,&translation);
if ((inb(G2STAT(slotbase)) &
(G2STAT_MBXOUT|G2STAT_BUSY)) != G2STAT_MBXOUT) {
/* If the card isn't ready, hard reset it */
outb(G2CNTRL_HRST, G2CNTRL(slotbase));
outb(0, G2CNTRL(slotbase));
}
printk(KERN_INFO "Configuring slot %d at IO:%x, IRQ %u (%s)\n",
edev->slot, slotbase, irq_level, irq_type ? "edge" : "level");
printk(KERN_INFO "aha174x: Extended translation %sabled.\n",
translation ? "en" : "dis");
shpnt = scsi_host_alloc(&aha1740_template,
sizeof(struct aha1740_hostdata));
if(shpnt == NULL)
goto err_release_region;
shpnt->base = 0;
shpnt->io_port = slotbase;
shpnt->n_io_port = SLOTSIZE;
shpnt->irq = irq_level;
shpnt->dma_channel = 0xff;
host = HOSTDATA(shpnt);
host->edev = edev;
host->translation = translation;
host->ecb_dma_addr = dma_map_single (&edev->dev, host->ecb,
sizeof (host->ecb),
DMA_BIDIRECTIONAL);
if (!host->ecb_dma_addr) {
printk (KERN_ERR "aha1740_probe: Couldn't map ECB, giving up\n");
scsi_unregister (shpnt);
goto err_host_put;
}
DEB(printk("aha1740_probe: enable interrupt channel %d\n",irq_level));
if (request_irq(irq_level,aha1740_intr_handle,irq_type ? 0 : IRQF_SHARED,
"aha1740",shpnt)) {
printk(KERN_ERR "aha1740_probe: Unable to allocate IRQ %d.\n",
irq_level);
goto err_unmap;
}
eisa_set_drvdata (edev, shpnt);
rc = scsi_add_host (shpnt, dev);
if (rc)
goto err_irq;
scsi_scan_host (shpnt);
return 0;
err_irq:
free_irq(irq_level, shpnt);
err_unmap:
dma_unmap_single (&edev->dev, host->ecb_dma_addr,
sizeof (host->ecb), DMA_BIDIRECTIONAL);
err_host_put:
scsi_host_put (shpnt);
err_release_region:
release_region(slotbase, SLOTSIZE);
return -ENODEV;
}
static __devexit int aha1740_remove (struct device *dev)
{
struct Scsi_Host *shpnt = dev_get_drvdata(dev);
struct aha1740_hostdata *host = HOSTDATA (shpnt);
scsi_remove_host(shpnt);
free_irq (shpnt->irq, shpnt);
dma_unmap_single (dev, host->ecb_dma_addr,
sizeof (host->ecb), DMA_BIDIRECTIONAL);
release_region (shpnt->io_port, SLOTSIZE);
scsi_host_put (shpnt);
return 0;
}
static struct eisa_device_id aha1740_ids[] = {
{ "ADP0000" }, /* 1740 */
{ "ADP0001" }, /* 1740A */
{ "ADP0002" }, /* 1742A */
{ "ADP0400" }, /* 1744 */
{ "" }
};
MODULE_DEVICE_TABLE(eisa, aha1740_ids);
static struct eisa_driver aha1740_driver = {
.id_table = aha1740_ids,
.driver = {
.name = "aha1740",
.probe = aha1740_probe,
.remove = __devexit_p (aha1740_remove),
},
};
static __init int aha1740_init (void)
{
return eisa_driver_register (&aha1740_driver);
}
static __exit void aha1740_exit (void)
{
eisa_driver_unregister (&aha1740_driver);
}
module_init (aha1740_init);
module_exit (aha1740_exit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
byzvulture/android_kernel_xiaomi_armor | drivers/scsi/aha1740.c | 4479 | 19656 | /* $Id$
* 1993/03/31
* linux/kernel/aha1740.c
*
* Based loosely on aha1542.c which is
* Copyright (C) 1992 Tommy Thorn and
* Modified by Eric Youngdale
*
* This file is aha1740.c, written and
* Copyright (C) 1992,1993 Brad McLean
* brad@saturn.gaylord.com or brad@bradpc.gaylord.com.
*
* Modifications to makecode and queuecommand
* for proper handling of multiple devices courteously
* provided by Michael Weller, March, 1993
*
* Multiple adapter support, extended translation detection,
* update to current scsi subsystem changes, proc fs support,
* working (!) module support based on patches from Andreas Arens,
* by Andreas Degert <ad@papyrus.hamburg.com>, 2/1997
*
* aha1740_makecode may still need even more work
* if it doesn't work for your devices, take a look.
*
* Reworked for new_eh and new locking by Alan Cox <alan@lxorguk.ukuu.org.uk>
*
* Converted to EISA and generic DMA APIs by Marc Zyngier
* <maz@wild-wind.fr.eu.org>, 4/2003.
*
* Shared interrupt support added by Rask Ingemann Lambertsen
* <rask@sygehus.dk>, 10/2003
*
* For the avoidance of doubt the "preferred form" of this code is one which
* is in an open non patent encumbered format. Where cryptographic key signing
* forms part of the process of creating an executable the information
* including keys needed to generate an equivalently functional executable
* are deemed to be part of the source code.
*/
#include <linux/blkdev.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/ioport.h>
#include <linux/proc_fs.h>
#include <linux/stat.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/eisa.h>
#include <linux/dma-mapping.h>
#include <linux/gfp.h>
#include <asm/dma.h>
#include <asm/io.h>
#include "scsi.h"
#include <scsi/scsi_host.h>
#include "aha1740.h"
/* IF YOU ARE HAVING PROBLEMS WITH THIS DRIVER, AND WANT TO WATCH
IT WORK, THEN:
#define DEBUG
*/
#ifdef DEBUG
#define DEB(x) x
#else
#define DEB(x)
#endif
struct aha1740_hostdata {
struct eisa_device *edev;
unsigned int translation;
unsigned int last_ecb_used;
dma_addr_t ecb_dma_addr;
struct ecb ecb[AHA1740_ECBS];
};
struct aha1740_sg {
struct aha1740_chain sg_chain[AHA1740_SCATTER];
dma_addr_t sg_dma_addr;
dma_addr_t buf_dma_addr;
};
#define HOSTDATA(host) ((struct aha1740_hostdata *) &host->hostdata)
static inline struct ecb *ecb_dma_to_cpu (struct Scsi_Host *host,
dma_addr_t dma)
{
struct aha1740_hostdata *hdata = HOSTDATA (host);
dma_addr_t offset;
offset = dma - hdata->ecb_dma_addr;
return (struct ecb *)(((char *) hdata->ecb) + (unsigned int) offset);
}
static inline dma_addr_t ecb_cpu_to_dma (struct Scsi_Host *host, void *cpu)
{
struct aha1740_hostdata *hdata = HOSTDATA (host);
dma_addr_t offset;
offset = (char *) cpu - (char *) hdata->ecb;
return hdata->ecb_dma_addr + offset;
}
static int aha1740_proc_info(struct Scsi_Host *shpnt, char *buffer,
char **start, off_t offset,
int length, int inout)
{
int len;
struct aha1740_hostdata *host;
if (inout)
return-ENOSYS;
host = HOSTDATA(shpnt);
len = sprintf(buffer, "aha174x at IO:%lx, IRQ %d, SLOT %d.\n"
"Extended translation %sabled.\n",
shpnt->io_port, shpnt->irq, host->edev->slot,
host->translation ? "en" : "dis");
if (offset > len) {
*start = buffer;
return 0;
}
*start = buffer + offset;
len -= offset;
if (len > length)
len = length;
return len;
}
static int aha1740_makecode(unchar *sense, unchar *status)
{
struct statusword
{
ushort don:1, /* Command Done - No Error */
du:1, /* Data underrun */
:1, qf:1, /* Queue full */
sc:1, /* Specification Check */
dor:1, /* Data overrun */
ch:1, /* Chaining Halted */
intr:1, /* Interrupt issued */
asa:1, /* Additional Status Available */
sns:1, /* Sense information Stored */
:1, ini:1, /* Initialization Required */
me:1, /* Major error or exception */
:1, eca:1, /* Extended Contingent alliance */
:1;
} status_word;
int retval = DID_OK;
status_word = * (struct statusword *) status;
#ifdef DEBUG
printk("makecode from %x,%x,%x,%x %x,%x,%x,%x",
status[0], status[1], status[2], status[3],
sense[0], sense[1], sense[2], sense[3]);
#endif
if (!status_word.don) { /* Anything abnormal was detected */
if ( (status[1]&0x18) || status_word.sc ) {
/*Additional info available*/
/* Use the supplied info for further diagnostics */
switch ( status[2] ) {
case 0x12:
if ( status_word.dor )
retval=DID_ERROR; /* It's an Overrun */
/* If not overrun, assume underrun and
* ignore it! */
case 0x00: /* No info, assume no error, should
* not occur */
break;
case 0x11:
case 0x21:
retval=DID_TIME_OUT;
break;
case 0x0a:
retval=DID_BAD_TARGET;
break;
case 0x04:
case 0x05:
retval=DID_ABORT;
/* Either by this driver or the
* AHA1740 itself */
break;
default:
retval=DID_ERROR; /* No further
* diagnostics
* possible */
}
} else {
/* Michael suggests, and Brad concurs: */
if ( status_word.qf ) {
retval = DID_TIME_OUT; /* forces a redo */
/* I think this specific one should
* not happen -Brad */
printk("aha1740.c: WARNING: AHA1740 queue overflow!\n");
} else
if ( status[0]&0x60 ) {
/* Didn't find a better error */
retval = DID_ERROR;
}
/* In any other case return DID_OK so for example
CONDITION_CHECKS make it through to the appropriate
device driver */
}
}
/* Under all circumstances supply the target status -Michael */
return status[3] | retval << 16;
}
static int aha1740_test_port(unsigned int base)
{
if ( inb(PORTADR(base)) & PORTADDR_ENH )
return 1; /* Okay, we're all set */
printk("aha174x: Board detected, but not in enhanced mode, so disabled it.\n");
return 0;
}
/* A "high" level interrupt handler */
static irqreturn_t aha1740_intr_handle(int irq, void *dev_id)
{
struct Scsi_Host *host = (struct Scsi_Host *) dev_id;
void (*my_done)(Scsi_Cmnd *);
int errstatus, adapstat;
int number_serviced;
struct ecb *ecbptr;
Scsi_Cmnd *SCtmp;
unsigned int base;
unsigned long flags;
int handled = 0;
struct aha1740_sg *sgptr;
struct eisa_device *edev;
if (!host)
panic("aha1740.c: Irq from unknown host!\n");
spin_lock_irqsave(host->host_lock, flags);
base = host->io_port;
number_serviced = 0;
edev = HOSTDATA(host)->edev;
while(inb(G2STAT(base)) & G2STAT_INTPEND) {
handled = 1;
DEB(printk("aha1740_intr top of loop.\n"));
adapstat = inb(G2INTST(base));
ecbptr = ecb_dma_to_cpu (host, inl(MBOXIN0(base)));
outb(G2CNTRL_IRST,G2CNTRL(base)); /* interrupt reset */
switch ( adapstat & G2INTST_MASK ) {
case G2INTST_CCBRETRY:
case G2INTST_CCBERROR:
case G2INTST_CCBGOOD:
/* Host Ready -> Mailbox in complete */
outb(G2CNTRL_HRDY,G2CNTRL(base));
if (!ecbptr) {
printk("Aha1740 null ecbptr in interrupt (%x,%x,%x,%d)\n",
inb(G2STAT(base)),adapstat,
inb(G2INTST(base)), number_serviced++);
continue;
}
SCtmp = ecbptr->SCpnt;
if (!SCtmp) {
printk("Aha1740 null SCtmp in interrupt (%x,%x,%x,%d)\n",
inb(G2STAT(base)),adapstat,
inb(G2INTST(base)), number_serviced++);
continue;
}
sgptr = (struct aha1740_sg *) SCtmp->host_scribble;
scsi_dma_unmap(SCtmp);
/* Free the sg block */
dma_free_coherent (&edev->dev,
sizeof (struct aha1740_sg),
SCtmp->host_scribble,
sgptr->sg_dma_addr);
/* Fetch the sense data, and tuck it away, in
the required slot. The Adaptec
automatically fetches it, and there is no
guarantee that we will still have it in the
cdb when we come back */
if ( (adapstat & G2INTST_MASK) == G2INTST_CCBERROR ) {
memcpy(SCtmp->sense_buffer, ecbptr->sense,
SCSI_SENSE_BUFFERSIZE);
errstatus = aha1740_makecode(ecbptr->sense,ecbptr->status);
} else
errstatus = 0;
DEB(if (errstatus)
printk("aha1740_intr_handle: returning %6x\n",
errstatus));
SCtmp->result = errstatus;
my_done = ecbptr->done;
memset(ecbptr,0,sizeof(struct ecb));
if ( my_done )
my_done(SCtmp);
break;
case G2INTST_HARDFAIL:
printk(KERN_ALERT "aha1740 hardware failure!\n");
panic("aha1740.c"); /* Goodbye */
case G2INTST_ASNEVENT:
printk("aha1740 asynchronous event: %02x %02x %02x %02x %02x\n",
adapstat,
inb(MBOXIN0(base)),
inb(MBOXIN1(base)),
inb(MBOXIN2(base)),
inb(MBOXIN3(base))); /* Say What? */
/* Host Ready -> Mailbox in complete */
outb(G2CNTRL_HRDY,G2CNTRL(base));
break;
case G2INTST_CMDGOOD:
/* set immediate command success flag here: */
break;
case G2INTST_CMDERROR:
/* Set immediate command failure flag here: */
break;
}
number_serviced++;
}
spin_unlock_irqrestore(host->host_lock, flags);
return IRQ_RETVAL(handled);
}
static int aha1740_queuecommand_lck(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
{
unchar direction;
unchar *cmd = (unchar *) SCpnt->cmnd;
unchar target = scmd_id(SCpnt);
struct aha1740_hostdata *host = HOSTDATA(SCpnt->device->host);
unsigned long flags;
dma_addr_t sg_dma;
struct aha1740_sg *sgptr;
int ecbno, nseg;
DEB(int i);
if(*cmd == REQUEST_SENSE) {
SCpnt->result = 0;
done(SCpnt);
return 0;
}
#ifdef DEBUG
if (*cmd == READ_10 || *cmd == WRITE_10)
i = xscsi2int(cmd+2);
else if (*cmd == READ_6 || *cmd == WRITE_6)
i = scsi2int(cmd+2);
else
i = -1;
printk("aha1740_queuecommand: dev %d cmd %02x pos %d len %d ",
target, *cmd, i, bufflen);
printk("scsi cmd:");
for (i = 0; i < SCpnt->cmd_len; i++) printk("%02x ", cmd[i]);
printk("\n");
#endif
/* locate an available ecb */
spin_lock_irqsave(SCpnt->device->host->host_lock, flags);
ecbno = host->last_ecb_used + 1; /* An optimization */
if (ecbno >= AHA1740_ECBS)
ecbno = 0;
do {
if (!host->ecb[ecbno].cmdw)
break;
ecbno++;
if (ecbno >= AHA1740_ECBS)
ecbno = 0;
} while (ecbno != host->last_ecb_used);
if (host->ecb[ecbno].cmdw)
panic("Unable to find empty ecb for aha1740.\n");
host->ecb[ecbno].cmdw = AHA1740CMD_INIT; /* SCSI Initiator Command
doubles as reserved flag */
host->last_ecb_used = ecbno;
spin_unlock_irqrestore(SCpnt->device->host->host_lock, flags);
#ifdef DEBUG
printk("Sending command (%d %x)...", ecbno, done);
#endif
host->ecb[ecbno].cdblen = SCpnt->cmd_len; /* SCSI Command
* Descriptor Block
* Length */
direction = 0;
if (*cmd == READ_10 || *cmd == READ_6)
direction = 1;
else if (*cmd == WRITE_10 || *cmd == WRITE_6)
direction = 0;
memcpy(host->ecb[ecbno].cdb, cmd, SCpnt->cmd_len);
SCpnt->host_scribble = dma_alloc_coherent (&host->edev->dev,
sizeof (struct aha1740_sg),
&sg_dma, GFP_ATOMIC);
if(SCpnt->host_scribble == NULL) {
printk(KERN_WARNING "aha1740: out of memory in queuecommand!\n");
return 1;
}
sgptr = (struct aha1740_sg *) SCpnt->host_scribble;
sgptr->sg_dma_addr = sg_dma;
nseg = scsi_dma_map(SCpnt);
BUG_ON(nseg < 0);
if (nseg) {
struct scatterlist *sg;
struct aha1740_chain * cptr;
int i;
DEB(unsigned char * ptr);
host->ecb[ecbno].sg = 1; /* SCSI Initiator Command
* w/scatter-gather*/
cptr = sgptr->sg_chain;
scsi_for_each_sg(SCpnt, sg, nseg, i) {
cptr[i].datalen = sg_dma_len (sg);
cptr[i].dataptr = sg_dma_address (sg);
}
host->ecb[ecbno].datalen = nseg * sizeof(struct aha1740_chain);
host->ecb[ecbno].dataptr = sg_dma;
#ifdef DEBUG
printk("cptr %x: ",cptr);
ptr = (unsigned char *) cptr;
for(i=0;i<24;i++) printk("%02x ", ptr[i]);
#endif
} else {
host->ecb[ecbno].datalen = 0;
host->ecb[ecbno].dataptr = 0;
}
host->ecb[ecbno].lun = SCpnt->device->lun;
host->ecb[ecbno].ses = 1; /* Suppress underrun errors */
host->ecb[ecbno].dir = direction;
host->ecb[ecbno].ars = 1; /* Yes, get the sense on an error */
host->ecb[ecbno].senselen = 12;
host->ecb[ecbno].senseptr = ecb_cpu_to_dma (SCpnt->device->host,
host->ecb[ecbno].sense);
host->ecb[ecbno].statusptr = ecb_cpu_to_dma (SCpnt->device->host,
host->ecb[ecbno].status);
host->ecb[ecbno].done = done;
host->ecb[ecbno].SCpnt = SCpnt;
#ifdef DEBUG
{
int i;
printk("aha1740_command: sending.. ");
for (i = 0; i < sizeof(host->ecb[ecbno]) - 10; i++)
printk("%02x ", ((unchar *)&host->ecb[ecbno])[i]);
}
printk("\n");
#endif
if (done) {
/* The Adaptec Spec says the card is so fast that the loops
will only be executed once in the code below. Even if this
was true with the fastest processors when the spec was
written, it doesn't seem to be true with today's fast
processors. We print a warning if the code is executed more
often than LOOPCNT_WARN. If this happens, it should be
investigated. If the count reaches LOOPCNT_MAX, we assume
something is broken; since there is no way to return an
error (the return value is ignored by the mid-level scsi
layer) we have to panic (and maybe that's the best thing we
can do then anyhow). */
#define LOOPCNT_WARN 10 /* excessive mbxout wait -> syslog-msg */
#define LOOPCNT_MAX 1000000 /* mbxout deadlock -> panic() after ~ 2 sec. */
int loopcnt;
unsigned int base = SCpnt->device->host->io_port;
DEB(printk("aha1740[%d] critical section\n",ecbno));
spin_lock_irqsave(SCpnt->device->host->host_lock, flags);
for (loopcnt = 0; ; loopcnt++) {
if (inb(G2STAT(base)) & G2STAT_MBXOUT) break;
if (loopcnt == LOOPCNT_WARN) {
printk("aha1740[%d]_mbxout wait!\n",ecbno);
}
if (loopcnt == LOOPCNT_MAX)
panic("aha1740.c: mbxout busy!\n");
}
outl (ecb_cpu_to_dma (SCpnt->device->host, host->ecb + ecbno),
MBOXOUT0(base));
for (loopcnt = 0; ; loopcnt++) {
if (! (inb(G2STAT(base)) & G2STAT_BUSY)) break;
if (loopcnt == LOOPCNT_WARN) {
printk("aha1740[%d]_attn wait!\n",ecbno);
}
if (loopcnt == LOOPCNT_MAX)
panic("aha1740.c: attn wait failed!\n");
}
outb(ATTN_START | (target & 7), ATTN(base)); /* Start it up */
spin_unlock_irqrestore(SCpnt->device->host->host_lock, flags);
DEB(printk("aha1740[%d] request queued.\n",ecbno));
} else
printk(KERN_ALERT "aha1740_queuecommand: done can't be NULL\n");
return 0;
}
static DEF_SCSI_QCMD(aha1740_queuecommand)
/* Query the board for its irq_level and irq_type. Nothing else matters
in enhanced mode on an EISA bus. */
static void aha1740_getconfig(unsigned int base, unsigned int *irq_level,
unsigned int *irq_type,
unsigned int *translation)
{
static int intab[] = { 9, 10, 11, 12, 0, 14, 15, 0 };
*irq_level = intab[inb(INTDEF(base)) & 0x7];
*irq_type = (inb(INTDEF(base)) & 0x8) >> 3;
*translation = inb(RESV1(base)) & 0x1;
outb(inb(INTDEF(base)) | 0x10, INTDEF(base));
}
static int aha1740_biosparam(struct scsi_device *sdev,
struct block_device *dev,
sector_t capacity, int* ip)
{
int size = capacity;
int extended = HOSTDATA(sdev->host)->translation;
DEB(printk("aha1740_biosparam\n"));
if (extended && (ip[2] > 1024)) {
ip[0] = 255;
ip[1] = 63;
ip[2] = size / (255 * 63);
} else {
ip[0] = 64;
ip[1] = 32;
ip[2] = size >> 11;
}
return 0;
}
static int aha1740_eh_abort_handler (Scsi_Cmnd *dummy)
{
/*
* From Alan Cox :
* The AHA1740 has firmware handled abort/reset handling. The "head in
* sand" kernel code is correct for once 8)
*
* So we define a dummy handler just to keep the kernel SCSI code as
* quiet as possible...
*/
return 0;
}
static struct scsi_host_template aha1740_template = {
.module = THIS_MODULE,
.proc_name = "aha1740",
.proc_info = aha1740_proc_info,
.name = "Adaptec 174x (EISA)",
.queuecommand = aha1740_queuecommand,
.bios_param = aha1740_biosparam,
.can_queue = AHA1740_ECBS,
.this_id = 7,
.sg_tablesize = AHA1740_SCATTER,
.cmd_per_lun = AHA1740_CMDLUN,
.use_clustering = ENABLE_CLUSTERING,
.eh_abort_handler = aha1740_eh_abort_handler,
};
static int aha1740_probe (struct device *dev)
{
int slotbase, rc;
unsigned int irq_level, irq_type, translation;
struct Scsi_Host *shpnt;
struct aha1740_hostdata *host;
struct eisa_device *edev = to_eisa_device (dev);
DEB(printk("aha1740_probe: \n"));
slotbase = edev->base_addr + EISA_VENDOR_ID_OFFSET;
if (!request_region(slotbase, SLOTSIZE, "aha1740")) /* See if in use */
return -EBUSY;
if (!aha1740_test_port(slotbase))
goto err_release_region;
aha1740_getconfig(slotbase,&irq_level,&irq_type,&translation);
if ((inb(G2STAT(slotbase)) &
(G2STAT_MBXOUT|G2STAT_BUSY)) != G2STAT_MBXOUT) {
/* If the card isn't ready, hard reset it */
outb(G2CNTRL_HRST, G2CNTRL(slotbase));
outb(0, G2CNTRL(slotbase));
}
printk(KERN_INFO "Configuring slot %d at IO:%x, IRQ %u (%s)\n",
edev->slot, slotbase, irq_level, irq_type ? "edge" : "level");
printk(KERN_INFO "aha174x: Extended translation %sabled.\n",
translation ? "en" : "dis");
shpnt = scsi_host_alloc(&aha1740_template,
sizeof(struct aha1740_hostdata));
if(shpnt == NULL)
goto err_release_region;
shpnt->base = 0;
shpnt->io_port = slotbase;
shpnt->n_io_port = SLOTSIZE;
shpnt->irq = irq_level;
shpnt->dma_channel = 0xff;
host = HOSTDATA(shpnt);
host->edev = edev;
host->translation = translation;
host->ecb_dma_addr = dma_map_single (&edev->dev, host->ecb,
sizeof (host->ecb),
DMA_BIDIRECTIONAL);
if (!host->ecb_dma_addr) {
printk (KERN_ERR "aha1740_probe: Couldn't map ECB, giving up\n");
scsi_unregister (shpnt);
goto err_host_put;
}
DEB(printk("aha1740_probe: enable interrupt channel %d\n",irq_level));
if (request_irq(irq_level,aha1740_intr_handle,irq_type ? 0 : IRQF_SHARED,
"aha1740",shpnt)) {
printk(KERN_ERR "aha1740_probe: Unable to allocate IRQ %d.\n",
irq_level);
goto err_unmap;
}
eisa_set_drvdata (edev, shpnt);
rc = scsi_add_host (shpnt, dev);
if (rc)
goto err_irq;
scsi_scan_host (shpnt);
return 0;
err_irq:
free_irq(irq_level, shpnt);
err_unmap:
dma_unmap_single (&edev->dev, host->ecb_dma_addr,
sizeof (host->ecb), DMA_BIDIRECTIONAL);
err_host_put:
scsi_host_put (shpnt);
err_release_region:
release_region(slotbase, SLOTSIZE);
return -ENODEV;
}
static __devexit int aha1740_remove (struct device *dev)
{
struct Scsi_Host *shpnt = dev_get_drvdata(dev);
struct aha1740_hostdata *host = HOSTDATA (shpnt);
scsi_remove_host(shpnt);
free_irq (shpnt->irq, shpnt);
dma_unmap_single (dev, host->ecb_dma_addr,
sizeof (host->ecb), DMA_BIDIRECTIONAL);
release_region (shpnt->io_port, SLOTSIZE);
scsi_host_put (shpnt);
return 0;
}
static struct eisa_device_id aha1740_ids[] = {
{ "ADP0000" }, /* 1740 */
{ "ADP0001" }, /* 1740A */
{ "ADP0002" }, /* 1742A */
{ "ADP0400" }, /* 1744 */
{ "" }
};
MODULE_DEVICE_TABLE(eisa, aha1740_ids);
static struct eisa_driver aha1740_driver = {
.id_table = aha1740_ids,
.driver = {
.name = "aha1740",
.probe = aha1740_probe,
.remove = __devexit_p (aha1740_remove),
},
};
static __init int aha1740_init (void)
{
return eisa_driver_register (&aha1740_driver);
}
static __exit void aha1740_exit (void)
{
eisa_driver_unregister (&aha1740_driver);
}
module_init (aha1740_init);
module_exit (aha1740_exit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
AngSanley/AngsaKernel_armani | arch/arm/mach-omap1/fpga.c | 4735 | 5319 | /*
* linux/arch/arm/mach-omap1/fpga.c
*
* Interrupt handler for OMAP-1510 Innovator FPGA
*
* Copyright (C) 2001 RidgeRun, Inc.
* Author: Greg Lonnon <glonnon@ridgerun.com>
*
* Copyright (C) 2002 MontaVista Software, Inc.
*
* Separated FPGA interrupts from innovator1510.c and cleaned up for 2.6
* Copyright (C) 2004 Nokia Corporation by Tony Lindrgen <tony@atomide.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/io.h>
#include <asm/irq.h>
#include <asm/mach/irq.h>
#include <plat/fpga.h>
#include <mach/hardware.h>
#include "iomap.h"
static void fpga_mask_irq(struct irq_data *d)
{
unsigned int irq = d->irq - OMAP_FPGA_IRQ_BASE;
if (irq < 8)
__raw_writeb((__raw_readb(OMAP1510_FPGA_IMR_LO)
& ~(1 << irq)), OMAP1510_FPGA_IMR_LO);
else if (irq < 16)
__raw_writeb((__raw_readb(OMAP1510_FPGA_IMR_HI)
& ~(1 << (irq - 8))), OMAP1510_FPGA_IMR_HI);
else
__raw_writeb((__raw_readb(INNOVATOR_FPGA_IMR2)
& ~(1 << (irq - 16))), INNOVATOR_FPGA_IMR2);
}
static inline u32 get_fpga_unmasked_irqs(void)
{
return
((__raw_readb(OMAP1510_FPGA_ISR_LO) &
__raw_readb(OMAP1510_FPGA_IMR_LO))) |
((__raw_readb(OMAP1510_FPGA_ISR_HI) &
__raw_readb(OMAP1510_FPGA_IMR_HI)) << 8) |
((__raw_readb(INNOVATOR_FPGA_ISR2) &
__raw_readb(INNOVATOR_FPGA_IMR2)) << 16);
}
static void fpga_ack_irq(struct irq_data *d)
{
/* Don't need to explicitly ACK FPGA interrupts */
}
static void fpga_unmask_irq(struct irq_data *d)
{
unsigned int irq = d->irq - OMAP_FPGA_IRQ_BASE;
if (irq < 8)
__raw_writeb((__raw_readb(OMAP1510_FPGA_IMR_LO) | (1 << irq)),
OMAP1510_FPGA_IMR_LO);
else if (irq < 16)
__raw_writeb((__raw_readb(OMAP1510_FPGA_IMR_HI)
| (1 << (irq - 8))), OMAP1510_FPGA_IMR_HI);
else
__raw_writeb((__raw_readb(INNOVATOR_FPGA_IMR2)
| (1 << (irq - 16))), INNOVATOR_FPGA_IMR2);
}
static void fpga_mask_ack_irq(struct irq_data *d)
{
fpga_mask_irq(d);
fpga_ack_irq(d);
}
void innovator_fpga_IRQ_demux(unsigned int irq, struct irq_desc *desc)
{
u32 stat;
int fpga_irq;
stat = get_fpga_unmasked_irqs();
if (!stat)
return;
for (fpga_irq = OMAP_FPGA_IRQ_BASE;
(fpga_irq < OMAP_FPGA_IRQ_END) && stat;
fpga_irq++, stat >>= 1) {
if (stat & 1) {
generic_handle_irq(fpga_irq);
}
}
}
static struct irq_chip omap_fpga_irq_ack = {
.name = "FPGA-ack",
.irq_ack = fpga_mask_ack_irq,
.irq_mask = fpga_mask_irq,
.irq_unmask = fpga_unmask_irq,
};
static struct irq_chip omap_fpga_irq = {
.name = "FPGA",
.irq_ack = fpga_ack_irq,
.irq_mask = fpga_mask_irq,
.irq_unmask = fpga_unmask_irq,
};
/*
* All of the FPGA interrupt request inputs except for the touchscreen are
* edge-sensitive; the touchscreen is level-sensitive. The edge-sensitive
* interrupts are acknowledged as a side-effect of reading the interrupt
* status register from the FPGA. The edge-sensitive interrupt inputs
* cause a problem with level interrupt requests, such as Ethernet. The
* problem occurs when a level interrupt request is asserted while its
* interrupt input is masked in the FPGA, which results in a missed
* interrupt.
*
* In an attempt to workaround the problem with missed interrupts, the
* mask_ack routine for all of the FPGA interrupts has been changed from
* fpga_mask_ack_irq() to fpga_ack_irq() so that the specific FPGA interrupt
* being serviced is left unmasked. We can do this because the FPGA cascade
* interrupt is installed with the IRQF_DISABLED flag, which leaves all
* interrupts masked at the CPU while an FPGA interrupt handler executes.
*
* Limited testing indicates that this workaround appears to be effective
* for the smc9194 Ethernet driver used on the Innovator. It should work
* on other FPGA interrupts as well, but any drivers that explicitly mask
* interrupts at the interrupt controller via disable_irq/enable_irq
* could pose a problem.
*/
void omap1510_fpga_init_irq(void)
{
int i, res;
__raw_writeb(0, OMAP1510_FPGA_IMR_LO);
__raw_writeb(0, OMAP1510_FPGA_IMR_HI);
__raw_writeb(0, INNOVATOR_FPGA_IMR2);
for (i = OMAP_FPGA_IRQ_BASE; i < OMAP_FPGA_IRQ_END; i++) {
if (i == OMAP1510_INT_FPGA_TS) {
/*
* The touchscreen interrupt is level-sensitive, so
* we'll use the regular mask_ack routine for it.
*/
irq_set_chip(i, &omap_fpga_irq_ack);
}
else {
/*
* All FPGA interrupts except the touchscreen are
* edge-sensitive, so we won't mask them.
*/
irq_set_chip(i, &omap_fpga_irq);
}
irq_set_handler(i, handle_edge_irq);
set_irq_flags(i, IRQF_VALID);
}
/*
* The FPGA interrupt line is connected to GPIO13. Claim this pin for
* the ARM.
*
* NOTE: For general GPIO/MPUIO access and interrupts, please see
* gpio.[ch]
*/
res = gpio_request(13, "FPGA irq");
if (res) {
pr_err("%s failed to get gpio\n", __func__);
return;
}
gpio_direction_input(13);
irq_set_irq_type(gpio_to_irq(13), IRQ_TYPE_EDGE_RISING);
irq_set_chained_handler(OMAP1510_INT_FPGA, innovator_fpga_IRQ_demux);
}
| gpl-2.0 |
fishbowlFX/CM11-LGD325ds_kernel | arch/arm/mach-sa1100/jornada720.c | 4735 | 12117 | /*
* linux/arch/arm/mach-sa1100/jornada720.c
*
* HP Jornada720 init code
*
* Copyright (C) 2007 Kristoffer Ericson <Kristoffer.Ericson@gmail.com>
* Copyright (C) 2006 Filip Zyzniewski <filip.zyzniewski@tefnet.pl>
* Copyright (C) 2005 Michael Gernoth <michael@gernoth.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/tty.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/ioport.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <video/s1d13xxxfb.h>
#include <asm/hardware/sa1111.h>
#include <asm/page.h>
#include <asm/mach-types.h>
#include <asm/setup.h>
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
#include <asm/mach/map.h>
#include <asm/mach/serial_sa1100.h>
#include <mach/hardware.h>
#include <mach/irqs.h>
#include "generic.h"
/*
* HP Documentation referred in this file:
* http://www.jlime.com/downloads/development/docs/jornada7xx/jornada720.txt
*/
/* line 110 of HP's doc */
#define TUCR_VAL 0x20000400
/* memory space (line 52 of HP's doc) */
#define SA1111REGSTART 0x40000000
#define SA1111REGLEN 0x00002000
#define EPSONREGSTART 0x48000000
#define EPSONREGLEN 0x00100000
#define EPSONFBSTART 0x48200000
/* 512kB framebuffer */
#define EPSONFBLEN 512*1024
static struct s1d13xxxfb_regval s1d13xxxfb_initregs[] = {
/* line 344 of HP's doc */
{0x0001,0x00}, // Miscellaneous Register
{0x01FC,0x00}, // Display Mode Register
{0x0004,0x00}, // General IO Pins Configuration Register 0
{0x0005,0x00}, // General IO Pins Configuration Register 1
{0x0008,0x00}, // General IO Pins Control Register 0
{0x0009,0x00}, // General IO Pins Control Register 1
{0x0010,0x01}, // Memory Clock Configuration Register
{0x0014,0x11}, // LCD Pixel Clock Configuration Register
{0x0018,0x01}, // CRT/TV Pixel Clock Configuration Register
{0x001C,0x01}, // MediaPlug Clock Configuration Register
{0x001E,0x01}, // CPU To Memory Wait State Select Register
{0x0020,0x00}, // Memory Configuration Register
{0x0021,0x45}, // DRAM Refresh Rate Register
{0x002A,0x01}, // DRAM Timings Control Register 0
{0x002B,0x03}, // DRAM Timings Control Register 1
{0x0030,0x1c}, // Panel Type Register
{0x0031,0x00}, // MOD Rate Register
{0x0032,0x4F}, // LCD Horizontal Display Width Register
{0x0034,0x07}, // LCD Horizontal Non-Display Period Register
{0x0035,0x01}, // TFT FPLINE Start Position Register
{0x0036,0x0B}, // TFT FPLINE Pulse Width Register
{0x0038,0xEF}, // LCD Vertical Display Height Register 0
{0x0039,0x00}, // LCD Vertical Display Height Register 1
{0x003A,0x13}, // LCD Vertical Non-Display Period Register
{0x003B,0x0B}, // TFT FPFRAME Start Position Register
{0x003C,0x01}, // TFT FPFRAME Pulse Width Register
{0x0040,0x05}, // LCD Display Mode Register (2:4bpp,3:8bpp,5:16bpp)
{0x0041,0x00}, // LCD Miscellaneous Register
{0x0042,0x00}, // LCD Display Start Address Register 0
{0x0043,0x00}, // LCD Display Start Address Register 1
{0x0044,0x00}, // LCD Display Start Address Register 2
{0x0046,0x80}, // LCD Memory Address Offset Register 0
{0x0047,0x02}, // LCD Memory Address Offset Register 1
{0x0048,0x00}, // LCD Pixel Panning Register
{0x004A,0x00}, // LCD Display FIFO High Threshold Control Register
{0x004B,0x00}, // LCD Display FIFO Low Threshold Control Register
{0x0050,0x4F}, // CRT/TV Horizontal Display Width Register
{0x0052,0x13}, // CRT/TV Horizontal Non-Display Period Register
{0x0053,0x01}, // CRT/TV HRTC Start Position Register
{0x0054,0x0B}, // CRT/TV HRTC Pulse Width Register
{0x0056,0xDF}, // CRT/TV Vertical Display Height Register 0
{0x0057,0x01}, // CRT/TV Vertical Display Height Register 1
{0x0058,0x2B}, // CRT/TV Vertical Non-Display Period Register
{0x0059,0x09}, // CRT/TV VRTC Start Position Register
{0x005A,0x01}, // CRT/TV VRTC Pulse Width Register
{0x005B,0x10}, // TV Output Control Register
{0x0060,0x03}, // CRT/TV Display Mode Register (2:4bpp,3:8bpp,5:16bpp)
{0x0062,0x00}, // CRT/TV Display Start Address Register 0
{0x0063,0x00}, // CRT/TV Display Start Address Register 1
{0x0064,0x00}, // CRT/TV Display Start Address Register 2
{0x0066,0x40}, // CRT/TV Memory Address Offset Register 0
{0x0067,0x01}, // CRT/TV Memory Address Offset Register 1
{0x0068,0x00}, // CRT/TV Pixel Panning Register
{0x006A,0x00}, // CRT/TV Display FIFO High Threshold Control Register
{0x006B,0x00}, // CRT/TV Display FIFO Low Threshold Control Register
{0x0070,0x00}, // LCD Ink/Cursor Control Register
{0x0071,0x01}, // LCD Ink/Cursor Start Address Register
{0x0072,0x00}, // LCD Cursor X Position Register 0
{0x0073,0x00}, // LCD Cursor X Position Register 1
{0x0074,0x00}, // LCD Cursor Y Position Register 0
{0x0075,0x00}, // LCD Cursor Y Position Register 1
{0x0076,0x00}, // LCD Ink/Cursor Blue Color 0 Register
{0x0077,0x00}, // LCD Ink/Cursor Green Color 0 Register
{0x0078,0x00}, // LCD Ink/Cursor Red Color 0 Register
{0x007A,0x1F}, // LCD Ink/Cursor Blue Color 1 Register
{0x007B,0x3F}, // LCD Ink/Cursor Green Color 1 Register
{0x007C,0x1F}, // LCD Ink/Cursor Red Color 1 Register
{0x007E,0x00}, // LCD Ink/Cursor FIFO Threshold Register
{0x0080,0x00}, // CRT/TV Ink/Cursor Control Register
{0x0081,0x01}, // CRT/TV Ink/Cursor Start Address Register
{0x0082,0x00}, // CRT/TV Cursor X Position Register 0
{0x0083,0x00}, // CRT/TV Cursor X Position Register 1
{0x0084,0x00}, // CRT/TV Cursor Y Position Register 0
{0x0085,0x00}, // CRT/TV Cursor Y Position Register 1
{0x0086,0x00}, // CRT/TV Ink/Cursor Blue Color 0 Register
{0x0087,0x00}, // CRT/TV Ink/Cursor Green Color 0 Register
{0x0088,0x00}, // CRT/TV Ink/Cursor Red Color 0 Register
{0x008A,0x1F}, // CRT/TV Ink/Cursor Blue Color 1 Register
{0x008B,0x3F}, // CRT/TV Ink/Cursor Green Color 1 Register
{0x008C,0x1F}, // CRT/TV Ink/Cursor Red Color 1 Register
{0x008E,0x00}, // CRT/TV Ink/Cursor FIFO Threshold Register
{0x0100,0x00}, // BitBlt Control Register 0
{0x0101,0x00}, // BitBlt Control Register 1
{0x0102,0x00}, // BitBlt ROP Code/Color Expansion Register
{0x0103,0x00}, // BitBlt Operation Register
{0x0104,0x00}, // BitBlt Source Start Address Register 0
{0x0105,0x00}, // BitBlt Source Start Address Register 1
{0x0106,0x00}, // BitBlt Source Start Address Register 2
{0x0108,0x00}, // BitBlt Destination Start Address Register 0
{0x0109,0x00}, // BitBlt Destination Start Address Register 1
{0x010A,0x00}, // BitBlt Destination Start Address Register 2
{0x010C,0x00}, // BitBlt Memory Address Offset Register 0
{0x010D,0x00}, // BitBlt Memory Address Offset Register 1
{0x0110,0x00}, // BitBlt Width Register 0
{0x0111,0x00}, // BitBlt Width Register 1
{0x0112,0x00}, // BitBlt Height Register 0
{0x0113,0x00}, // BitBlt Height Register 1
{0x0114,0x00}, // BitBlt Background Color Register 0
{0x0115,0x00}, // BitBlt Background Color Register 1
{0x0118,0x00}, // BitBlt Foreground Color Register 0
{0x0119,0x00}, // BitBlt Foreground Color Register 1
{0x01E0,0x00}, // Look-Up Table Mode Register
{0x01E2,0x00}, // Look-Up Table Address Register
/* not sure, wouldn't like to mess with the driver */
{0x01E4,0x00}, // Look-Up Table Data Register
/* jornada doc says 0x00, but I trust the driver */
{0x01F0,0x10}, // Power Save Configuration Register
{0x01F1,0x00}, // Power Save Status Register
{0x01F4,0x00}, // CPU-to-Memory Access Watchdog Timer Register
{0x01FC,0x01}, // Display Mode Register(0x01:LCD, 0x02:CRT, 0x03:LCD&CRT)
};
static struct s1d13xxxfb_pdata s1d13xxxfb_data = {
.initregs = s1d13xxxfb_initregs,
.initregssize = ARRAY_SIZE(s1d13xxxfb_initregs),
.platform_init_video = NULL
};
static struct resource s1d13xxxfb_resources[] = {
[0] = DEFINE_RES_MEM(EPSONFBSTART, EPSONFBLEN),
[1] = DEFINE_RES_MEM(EPSONREGSTART, EPSONREGLEN),
};
static struct platform_device s1d13xxxfb_device = {
.name = S1D_DEVICENAME,
.id = 0,
.dev = {
.platform_data = &s1d13xxxfb_data,
},
.num_resources = ARRAY_SIZE(s1d13xxxfb_resources),
.resource = s1d13xxxfb_resources,
};
static struct resource sa1111_resources[] = {
[0] = DEFINE_RES_MEM(SA1111REGSTART, SA1111REGLEN),
[1] = DEFINE_RES_IRQ(IRQ_GPIO1),
};
static struct sa1111_platform_data sa1111_info = {
.disable_devs = SA1111_DEVID_PS2_MSE,
};
static u64 sa1111_dmamask = 0xffffffffUL;
static struct platform_device sa1111_device = {
.name = "sa1111",
.id = 0,
.dev = {
.dma_mask = &sa1111_dmamask,
.coherent_dma_mask = 0xffffffff,
.platform_data = &sa1111_info,
},
.num_resources = ARRAY_SIZE(sa1111_resources),
.resource = sa1111_resources,
};
static struct platform_device jornada_ssp_device = {
.name = "jornada_ssp",
.id = -1,
};
static struct platform_device jornada_kbd_device = {
.name = "jornada720_kbd",
.id = -1,
};
static struct platform_device jornada_ts_device = {
.name = "jornada_ts",
.id = -1,
};
static struct platform_device *devices[] __initdata = {
&sa1111_device,
&jornada_ssp_device,
&s1d13xxxfb_device,
&jornada_kbd_device,
&jornada_ts_device,
};
static int __init jornada720_init(void)
{
int ret = -ENODEV;
if (machine_is_jornada720()) {
/* we want to use gpio20 as input to drive the clock of our uart 3 */
GPDR |= GPIO_GPIO20; /* Clear gpio20 pin as input */
TUCR = TUCR_VAL;
GPSR = GPIO_GPIO20; /* start gpio20 pin */
udelay(1);
GPCR = GPIO_GPIO20; /* stop gpio20 */
udelay(1);
GPSR = GPIO_GPIO20; /* restart gpio20 */
udelay(20); /* give it some time to restart */
ret = platform_add_devices(devices, ARRAY_SIZE(devices));
}
return ret;
}
arch_initcall(jornada720_init);
static struct map_desc jornada720_io_desc[] __initdata = {
{ /* Epson registers */
.virtual = 0xf0000000,
.pfn = __phys_to_pfn(EPSONREGSTART),
.length = EPSONREGLEN,
.type = MT_DEVICE
}, { /* Epson frame buffer */
.virtual = 0xf1000000,
.pfn = __phys_to_pfn(EPSONFBSTART),
.length = EPSONFBLEN,
.type = MT_DEVICE
}
};
static void __init jornada720_map_io(void)
{
sa1100_map_io();
iotable_init(jornada720_io_desc, ARRAY_SIZE(jornada720_io_desc));
sa1100_register_uart(0, 3);
sa1100_register_uart(1, 1);
}
static struct mtd_partition jornada720_partitions[] = {
{
.name = "JORNADA720 boot firmware",
.size = 0x00040000,
.offset = 0,
.mask_flags = MTD_WRITEABLE, /* force read-only */
}, {
.name = "JORNADA720 kernel",
.size = 0x000c0000,
.offset = 0x00040000,
}, {
.name = "JORNADA720 params",
.size = 0x00040000,
.offset = 0x00100000,
}, {
.name = "JORNADA720 initrd",
.size = 0x00100000,
.offset = 0x00140000,
}, {
.name = "JORNADA720 root cramfs",
.size = 0x00300000,
.offset = 0x00240000,
}, {
.name = "JORNADA720 usr cramfs",
.size = 0x00800000,
.offset = 0x00540000,
}, {
.name = "JORNADA720 usr local",
.size = 0, /* will expand to the end of the flash */
.offset = 0x00d00000,
}
};
static void jornada720_set_vpp(int vpp)
{
if (vpp)
/* enabling flash write (line 470 of HP's doc) */
PPSR |= PPC_LDD7;
else
/* disabling flash write (line 470 of HP's doc) */
PPSR &= ~PPC_LDD7;
PPDR |= PPC_LDD7;
}
static struct flash_platform_data jornada720_flash_data = {
.map_name = "cfi_probe",
.set_vpp = jornada720_set_vpp,
.parts = jornada720_partitions,
.nr_parts = ARRAY_SIZE(jornada720_partitions),
};
static struct resource jornada720_flash_resource =
DEFINE_RES_MEM(SA1100_CS0_PHYS, SZ_32M);
static void __init jornada720_mach_init(void)
{
sa11x0_register_mtd(&jornada720_flash_data, &jornada720_flash_resource, 1);
}
MACHINE_START(JORNADA720, "HP Jornada 720")
/* Maintainer: Kristoffer Ericson <Kristoffer.Ericson@gmail.com> */
.atag_offset = 0x100,
.map_io = jornada720_map_io,
.nr_irqs = SA1100_NR_IRQS,
.init_irq = sa1100_init_irq,
.timer = &sa1100_timer,
.init_machine = jornada720_mach_init,
#ifdef CONFIG_SA1111
.dma_zone_size = SZ_1M,
#endif
.restart = sa11x0_restart,
MACHINE_END
| gpl-2.0 |
RenderBroken/Victara-CM-kernel | drivers/media/video/vs6624.c | 4991 | 21056 | /*
* vs6624.c ST VS6624 CMOS image sensor driver
*
* Copyright (c) 2011 Analog Devices Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/videodev2.h>
#include <media/v4l2-chip-ident.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-mediabus.h>
#include "vs6624_regs.h"
#define VGA_WIDTH 640
#define VGA_HEIGHT 480
#define QVGA_WIDTH 320
#define QVGA_HEIGHT 240
#define QQVGA_WIDTH 160
#define QQVGA_HEIGHT 120
#define CIF_WIDTH 352
#define CIF_HEIGHT 288
#define QCIF_WIDTH 176
#define QCIF_HEIGHT 144
#define QQCIF_WIDTH 88
#define QQCIF_HEIGHT 72
#define MAX_FRAME_RATE 30
struct vs6624 {
struct v4l2_subdev sd;
struct v4l2_ctrl_handler hdl;
struct v4l2_fract frame_rate;
struct v4l2_mbus_framefmt fmt;
unsigned ce_pin;
};
static const struct vs6624_format {
enum v4l2_mbus_pixelcode mbus_code;
enum v4l2_colorspace colorspace;
} vs6624_formats[] = {
{
.mbus_code = V4L2_MBUS_FMT_UYVY8_2X8,
.colorspace = V4L2_COLORSPACE_JPEG,
},
{
.mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
.colorspace = V4L2_COLORSPACE_JPEG,
},
{
.mbus_code = V4L2_MBUS_FMT_RGB565_2X8_LE,
.colorspace = V4L2_COLORSPACE_SRGB,
},
};
static struct v4l2_mbus_framefmt vs6624_default_fmt = {
.width = VGA_WIDTH,
.height = VGA_HEIGHT,
.code = V4L2_MBUS_FMT_UYVY8_2X8,
.field = V4L2_FIELD_NONE,
.colorspace = V4L2_COLORSPACE_JPEG,
};
static const u16 vs6624_p1[] = {
0x8104, 0x03,
0x8105, 0x01,
0xc900, 0x03,
0xc904, 0x47,
0xc905, 0x10,
0xc906, 0x80,
0xc907, 0x3a,
0x903a, 0x02,
0x903b, 0x47,
0x903c, 0x15,
0xc908, 0x31,
0xc909, 0xdc,
0xc90a, 0x80,
0xc90b, 0x44,
0x9044, 0x02,
0x9045, 0x31,
0x9046, 0xe2,
0xc90c, 0x07,
0xc90d, 0xe0,
0xc90e, 0x80,
0xc90f, 0x47,
0x9047, 0x90,
0x9048, 0x83,
0x9049, 0x81,
0x904a, 0xe0,
0x904b, 0x60,
0x904c, 0x08,
0x904d, 0x90,
0x904e, 0xc0,
0x904f, 0x43,
0x9050, 0x74,
0x9051, 0x01,
0x9052, 0xf0,
0x9053, 0x80,
0x9054, 0x05,
0x9055, 0xE4,
0x9056, 0x90,
0x9057, 0xc0,
0x9058, 0x43,
0x9059, 0xf0,
0x905a, 0x02,
0x905b, 0x07,
0x905c, 0xec,
0xc910, 0x5d,
0xc911, 0xca,
0xc912, 0x80,
0xc913, 0x5d,
0x905d, 0xa3,
0x905e, 0x04,
0x905f, 0xf0,
0x9060, 0xa3,
0x9061, 0x04,
0x9062, 0xf0,
0x9063, 0x22,
0xc914, 0x72,
0xc915, 0x92,
0xc916, 0x80,
0xc917, 0x64,
0x9064, 0x74,
0x9065, 0x01,
0x9066, 0x02,
0x9067, 0x72,
0x9068, 0x95,
0xc918, 0x47,
0xc919, 0xf2,
0xc91a, 0x81,
0xc91b, 0x69,
0x9169, 0x74,
0x916a, 0x02,
0x916b, 0xf0,
0x916c, 0xec,
0x916d, 0xb4,
0x916e, 0x10,
0x916f, 0x0a,
0x9170, 0x90,
0x9171, 0x80,
0x9172, 0x16,
0x9173, 0xe0,
0x9174, 0x70,
0x9175, 0x04,
0x9176, 0x90,
0x9177, 0xd3,
0x9178, 0xc4,
0x9179, 0xf0,
0x917a, 0x22,
0xc91c, 0x0a,
0xc91d, 0xbe,
0xc91e, 0x80,
0xc91f, 0x73,
0x9073, 0xfc,
0x9074, 0xa3,
0x9075, 0xe0,
0x9076, 0xf5,
0x9077, 0x82,
0x9078, 0x8c,
0x9079, 0x83,
0x907a, 0xa3,
0x907b, 0xa3,
0x907c, 0xe0,
0x907d, 0xfc,
0x907e, 0xa3,
0x907f, 0xe0,
0x9080, 0xc3,
0x9081, 0x9f,
0x9082, 0xff,
0x9083, 0xec,
0x9084, 0x9e,
0x9085, 0xfe,
0x9086, 0x02,
0x9087, 0x0a,
0x9088, 0xea,
0xc920, 0x47,
0xc921, 0x38,
0xc922, 0x80,
0xc923, 0x89,
0x9089, 0xec,
0x908a, 0xd3,
0x908b, 0x94,
0x908c, 0x20,
0x908d, 0x40,
0x908e, 0x01,
0x908f, 0x1c,
0x9090, 0x90,
0x9091, 0xd3,
0x9092, 0xd4,
0x9093, 0xec,
0x9094, 0xf0,
0x9095, 0x02,
0x9096, 0x47,
0x9097, 0x3d,
0xc924, 0x45,
0xc925, 0xca,
0xc926, 0x80,
0xc927, 0x98,
0x9098, 0x12,
0x9099, 0x77,
0x909a, 0xd6,
0x909b, 0x02,
0x909c, 0x45,
0x909d, 0xcd,
0xc928, 0x20,
0xc929, 0xd5,
0xc92a, 0x80,
0xc92b, 0x9e,
0x909e, 0x90,
0x909f, 0x82,
0x90a0, 0x18,
0x90a1, 0xe0,
0x90a2, 0xb4,
0x90a3, 0x03,
0x90a4, 0x0e,
0x90a5, 0x90,
0x90a6, 0x83,
0x90a7, 0xbf,
0x90a8, 0xe0,
0x90a9, 0x60,
0x90aa, 0x08,
0x90ab, 0x90,
0x90ac, 0x81,
0x90ad, 0xfc,
0x90ae, 0xe0,
0x90af, 0xff,
0x90b0, 0xc3,
0x90b1, 0x13,
0x90b2, 0xf0,
0x90b3, 0x90,
0x90b4, 0x81,
0x90b5, 0xfc,
0x90b6, 0xe0,
0x90b7, 0xff,
0x90b8, 0x02,
0x90b9, 0x20,
0x90ba, 0xda,
0xc92c, 0x70,
0xc92d, 0xbc,
0xc92e, 0x80,
0xc92f, 0xbb,
0x90bb, 0x90,
0x90bc, 0x82,
0x90bd, 0x18,
0x90be, 0xe0,
0x90bf, 0xb4,
0x90c0, 0x03,
0x90c1, 0x06,
0x90c2, 0x90,
0x90c3, 0xc1,
0x90c4, 0x06,
0x90c5, 0x74,
0x90c6, 0x05,
0x90c7, 0xf0,
0x90c8, 0x90,
0x90c9, 0xd3,
0x90ca, 0xa0,
0x90cb, 0x02,
0x90cc, 0x70,
0x90cd, 0xbf,
0xc930, 0x72,
0xc931, 0x21,
0xc932, 0x81,
0xc933, 0x3b,
0x913b, 0x7d,
0x913c, 0x02,
0x913d, 0x7f,
0x913e, 0x7b,
0x913f, 0x02,
0x9140, 0x72,
0x9141, 0x25,
0xc934, 0x28,
0xc935, 0xae,
0xc936, 0x80,
0xc937, 0xd2,
0x90d2, 0xf0,
0x90d3, 0x90,
0x90d4, 0xd2,
0x90d5, 0x0a,
0x90d6, 0x02,
0x90d7, 0x28,
0x90d8, 0xb4,
0xc938, 0x28,
0xc939, 0xb1,
0xc93a, 0x80,
0xc93b, 0xd9,
0x90d9, 0x90,
0x90da, 0x83,
0x90db, 0xba,
0x90dc, 0xe0,
0x90dd, 0xff,
0x90de, 0x90,
0x90df, 0xd2,
0x90e0, 0x08,
0x90e1, 0xe0,
0x90e2, 0xe4,
0x90e3, 0xef,
0x90e4, 0xf0,
0x90e5, 0xa3,
0x90e6, 0xe0,
0x90e7, 0x74,
0x90e8, 0xff,
0x90e9, 0xf0,
0x90ea, 0x90,
0x90eb, 0xd2,
0x90ec, 0x0a,
0x90ed, 0x02,
0x90ee, 0x28,
0x90ef, 0xb4,
0xc93c, 0x29,
0xc93d, 0x79,
0xc93e, 0x80,
0xc93f, 0xf0,
0x90f0, 0xf0,
0x90f1, 0x90,
0x90f2, 0xd2,
0x90f3, 0x0e,
0x90f4, 0x02,
0x90f5, 0x29,
0x90f6, 0x7f,
0xc940, 0x29,
0xc941, 0x7c,
0xc942, 0x80,
0xc943, 0xf7,
0x90f7, 0x90,
0x90f8, 0x83,
0x90f9, 0xba,
0x90fa, 0xe0,
0x90fb, 0xff,
0x90fc, 0x90,
0x90fd, 0xd2,
0x90fe, 0x0c,
0x90ff, 0xe0,
0x9100, 0xe4,
0x9101, 0xef,
0x9102, 0xf0,
0x9103, 0xa3,
0x9104, 0xe0,
0x9105, 0x74,
0x9106, 0xff,
0x9107, 0xf0,
0x9108, 0x90,
0x9109, 0xd2,
0x910a, 0x0e,
0x910b, 0x02,
0x910c, 0x29,
0x910d, 0x7f,
0xc944, 0x2a,
0xc945, 0x42,
0xc946, 0x81,
0xc947, 0x0e,
0x910e, 0xf0,
0x910f, 0x90,
0x9110, 0xd2,
0x9111, 0x12,
0x9112, 0x02,
0x9113, 0x2a,
0x9114, 0x48,
0xc948, 0x2a,
0xc949, 0x45,
0xc94a, 0x81,
0xc94b, 0x15,
0x9115, 0x90,
0x9116, 0x83,
0x9117, 0xba,
0x9118, 0xe0,
0x9119, 0xff,
0x911a, 0x90,
0x911b, 0xd2,
0x911c, 0x10,
0x911d, 0xe0,
0x911e, 0xe4,
0x911f, 0xef,
0x9120, 0xf0,
0x9121, 0xa3,
0x9122, 0xe0,
0x9123, 0x74,
0x9124, 0xff,
0x9125, 0xf0,
0x9126, 0x90,
0x9127, 0xd2,
0x9128, 0x12,
0x9129, 0x02,
0x912a, 0x2a,
0x912b, 0x48,
0xc900, 0x01,
0x0000, 0x00,
};
static const u16 vs6624_p2[] = {
0x806f, 0x01,
0x058c, 0x01,
0x0000, 0x00,
};
static const u16 vs6624_run_setup[] = {
0x1d18, 0x00, /* Enableconstrainedwhitebalance */
VS6624_PEAK_MIN_OUT_G_MSB, 0x3c, /* Damper PeakGain Output MSB */
VS6624_PEAK_MIN_OUT_G_LSB, 0x66, /* Damper PeakGain Output LSB */
VS6624_CM_LOW_THR_MSB, 0x65, /* Damper Low MSB */
VS6624_CM_LOW_THR_LSB, 0xd1, /* Damper Low LSB */
VS6624_CM_HIGH_THR_MSB, 0x66, /* Damper High MSB */
VS6624_CM_HIGH_THR_LSB, 0x62, /* Damper High LSB */
VS6624_CM_MIN_OUT_MSB, 0x00, /* Damper Min output MSB */
VS6624_CM_MIN_OUT_LSB, 0x00, /* Damper Min output LSB */
VS6624_NORA_DISABLE, 0x00, /* Nora fDisable */
VS6624_NORA_USAGE, 0x04, /* Nora usage */
VS6624_NORA_LOW_THR_MSB, 0x63, /* Damper Low MSB Changed 0x63 to 0x65 */
VS6624_NORA_LOW_THR_LSB, 0xd1, /* Damper Low LSB */
VS6624_NORA_HIGH_THR_MSB, 0x68, /* Damper High MSB */
VS6624_NORA_HIGH_THR_LSB, 0xdd, /* Damper High LSB */
VS6624_NORA_MIN_OUT_MSB, 0x3a, /* Damper Min output MSB */
VS6624_NORA_MIN_OUT_LSB, 0x00, /* Damper Min output LSB */
VS6624_F2B_DISABLE, 0x00, /* Disable */
0x1d8a, 0x30, /* MAXWeightHigh */
0x1d91, 0x62, /* fpDamperLowThresholdHigh MSB */
0x1d92, 0x4a, /* fpDamperLowThresholdHigh LSB */
0x1d95, 0x65, /* fpDamperHighThresholdHigh MSB */
0x1d96, 0x0e, /* fpDamperHighThresholdHigh LSB */
0x1da1, 0x3a, /* fpMinimumDamperOutputLow MSB */
0x1da2, 0xb8, /* fpMinimumDamperOutputLow LSB */
0x1e08, 0x06, /* MAXWeightLow */
0x1e0a, 0x0a, /* MAXWeightHigh */
0x1601, 0x3a, /* Red A MSB */
0x1602, 0x14, /* Red A LSB */
0x1605, 0x3b, /* Blue A MSB */
0x1606, 0x85, /* BLue A LSB */
0x1609, 0x3b, /* RED B MSB */
0x160a, 0x85, /* RED B LSB */
0x160d, 0x3a, /* Blue B MSB */
0x160e, 0x14, /* Blue B LSB */
0x1611, 0x30, /* Max Distance from Locus MSB */
0x1612, 0x8f, /* Max Distance from Locus MSB */
0x1614, 0x01, /* Enable constrainer */
0x0000, 0x00,
};
static const u16 vs6624_default[] = {
VS6624_CONTRAST0, 0x84,
VS6624_SATURATION0, 0x75,
VS6624_GAMMA0, 0x11,
VS6624_CONTRAST1, 0x84,
VS6624_SATURATION1, 0x75,
VS6624_GAMMA1, 0x11,
VS6624_MAN_RG, 0x80,
VS6624_MAN_GG, 0x80,
VS6624_MAN_BG, 0x80,
VS6624_WB_MODE, 0x1,
VS6624_EXPO_COMPENSATION, 0xfe,
VS6624_EXPO_METER, 0x0,
VS6624_LIGHT_FREQ, 0x64,
VS6624_PEAK_GAIN, 0xe,
VS6624_PEAK_LOW_THR, 0x28,
VS6624_HMIRROR0, 0x0,
VS6624_VFLIP0, 0x0,
VS6624_ZOOM_HSTEP0_MSB, 0x0,
VS6624_ZOOM_HSTEP0_LSB, 0x1,
VS6624_ZOOM_VSTEP0_MSB, 0x0,
VS6624_ZOOM_VSTEP0_LSB, 0x1,
VS6624_PAN_HSTEP0_MSB, 0x0,
VS6624_PAN_HSTEP0_LSB, 0xf,
VS6624_PAN_VSTEP0_MSB, 0x0,
VS6624_PAN_VSTEP0_LSB, 0xf,
VS6624_SENSOR_MODE, 0x1,
VS6624_SYNC_CODE_SETUP, 0x21,
VS6624_DISABLE_FR_DAMPER, 0x0,
VS6624_FR_DEN, 0x1,
VS6624_FR_NUM_LSB, 0xf,
VS6624_INIT_PIPE_SETUP, 0x0,
VS6624_IMG_FMT0, 0x0,
VS6624_YUV_SETUP, 0x1,
VS6624_IMAGE_SIZE0, 0x2,
0x0000, 0x00,
};
static inline struct vs6624 *to_vs6624(struct v4l2_subdev *sd)
{
return container_of(sd, struct vs6624, sd);
}
static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
{
return &container_of(ctrl->handler, struct vs6624, hdl)->sd;
}
static int vs6624_read(struct v4l2_subdev *sd, u16 index)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
u8 buf[2];
buf[0] = index >> 8;
buf[1] = index;
i2c_master_send(client, buf, 2);
i2c_master_recv(client, buf, 1);
return buf[0];
}
static int vs6624_write(struct v4l2_subdev *sd, u16 index,
u8 value)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
u8 buf[3];
buf[0] = index >> 8;
buf[1] = index;
buf[2] = value;
return i2c_master_send(client, buf, 3);
}
static int vs6624_writeregs(struct v4l2_subdev *sd, const u16 *regs)
{
u16 reg;
u8 data;
while (*regs != 0x00) {
reg = *regs++;
data = *regs++;
vs6624_write(sd, reg, data);
}
return 0;
}
static int vs6624_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = to_sd(ctrl);
switch (ctrl->id) {
case V4L2_CID_CONTRAST:
vs6624_write(sd, VS6624_CONTRAST0, ctrl->val);
break;
case V4L2_CID_SATURATION:
vs6624_write(sd, VS6624_SATURATION0, ctrl->val);
break;
case V4L2_CID_HFLIP:
vs6624_write(sd, VS6624_HMIRROR0, ctrl->val);
break;
case V4L2_CID_VFLIP:
vs6624_write(sd, VS6624_VFLIP0, ctrl->val);
break;
default:
return -EINVAL;
}
return 0;
}
static int vs6624_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned index,
enum v4l2_mbus_pixelcode *code)
{
if (index >= ARRAY_SIZE(vs6624_formats))
return -EINVAL;
*code = vs6624_formats[index].mbus_code;
return 0;
}
static int vs6624_try_mbus_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *fmt)
{
int index;
for (index = 0; index < ARRAY_SIZE(vs6624_formats); index++)
if (vs6624_formats[index].mbus_code == fmt->code)
break;
if (index >= ARRAY_SIZE(vs6624_formats)) {
/* default to first format */
index = 0;
fmt->code = vs6624_formats[0].mbus_code;
}
/* sensor mode is VGA */
if (fmt->width > VGA_WIDTH)
fmt->width = VGA_WIDTH;
if (fmt->height > VGA_HEIGHT)
fmt->height = VGA_HEIGHT;
fmt->width = fmt->width & (~3);
fmt->height = fmt->height & (~3);
fmt->field = V4L2_FIELD_NONE;
fmt->colorspace = vs6624_formats[index].colorspace;
return 0;
}
static int vs6624_s_mbus_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *fmt)
{
struct vs6624 *sensor = to_vs6624(sd);
int ret;
ret = vs6624_try_mbus_fmt(sd, fmt);
if (ret)
return ret;
/* set image format */
switch (fmt->code) {
case V4L2_MBUS_FMT_UYVY8_2X8:
vs6624_write(sd, VS6624_IMG_FMT0, 0x0);
vs6624_write(sd, VS6624_YUV_SETUP, 0x1);
break;
case V4L2_MBUS_FMT_YUYV8_2X8:
vs6624_write(sd, VS6624_IMG_FMT0, 0x0);
vs6624_write(sd, VS6624_YUV_SETUP, 0x3);
break;
case V4L2_MBUS_FMT_RGB565_2X8_LE:
vs6624_write(sd, VS6624_IMG_FMT0, 0x4);
vs6624_write(sd, VS6624_RGB_SETUP, 0x0);
break;
default:
return -EINVAL;
}
/* set image size */
if ((fmt->width == VGA_WIDTH) && (fmt->height == VGA_HEIGHT))
vs6624_write(sd, VS6624_IMAGE_SIZE0, 0x2);
else if ((fmt->width == QVGA_WIDTH) && (fmt->height == QVGA_HEIGHT))
vs6624_write(sd, VS6624_IMAGE_SIZE0, 0x4);
else if ((fmt->width == QQVGA_WIDTH) && (fmt->height == QQVGA_HEIGHT))
vs6624_write(sd, VS6624_IMAGE_SIZE0, 0x6);
else if ((fmt->width == CIF_WIDTH) && (fmt->height == CIF_HEIGHT))
vs6624_write(sd, VS6624_IMAGE_SIZE0, 0x3);
else if ((fmt->width == QCIF_WIDTH) && (fmt->height == QCIF_HEIGHT))
vs6624_write(sd, VS6624_IMAGE_SIZE0, 0x5);
else if ((fmt->width == QQCIF_WIDTH) && (fmt->height == QQCIF_HEIGHT))
vs6624_write(sd, VS6624_IMAGE_SIZE0, 0x7);
else {
vs6624_write(sd, VS6624_IMAGE_SIZE0, 0x8);
vs6624_write(sd, VS6624_MAN_HSIZE0_MSB, fmt->width >> 8);
vs6624_write(sd, VS6624_MAN_HSIZE0_LSB, fmt->width & 0xFF);
vs6624_write(sd, VS6624_MAN_VSIZE0_MSB, fmt->height >> 8);
vs6624_write(sd, VS6624_MAN_VSIZE0_LSB, fmt->height & 0xFF);
vs6624_write(sd, VS6624_CROP_CTRL0, 0x1);
}
sensor->fmt = *fmt;
return 0;
}
static int vs6624_g_mbus_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *fmt)
{
struct vs6624 *sensor = to_vs6624(sd);
*fmt = sensor->fmt;
return 0;
}
static int vs6624_g_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *parms)
{
struct vs6624 *sensor = to_vs6624(sd);
struct v4l2_captureparm *cp = &parms->parm.capture;
if (parms->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
memset(cp, 0, sizeof(*cp));
cp->capability = V4L2_CAP_TIMEPERFRAME;
cp->timeperframe.numerator = sensor->frame_rate.denominator;
cp->timeperframe.denominator = sensor->frame_rate.numerator;
return 0;
}
static int vs6624_s_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *parms)
{
struct vs6624 *sensor = to_vs6624(sd);
struct v4l2_captureparm *cp = &parms->parm.capture;
struct v4l2_fract *tpf = &cp->timeperframe;
if (parms->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
if (cp->extendedmode != 0)
return -EINVAL;
if (tpf->numerator == 0 || tpf->denominator == 0
|| (tpf->denominator > tpf->numerator * MAX_FRAME_RATE)) {
/* reset to max frame rate */
tpf->numerator = 1;
tpf->denominator = MAX_FRAME_RATE;
}
sensor->frame_rate.numerator = tpf->denominator;
sensor->frame_rate.denominator = tpf->numerator;
vs6624_write(sd, VS6624_DISABLE_FR_DAMPER, 0x0);
vs6624_write(sd, VS6624_FR_NUM_MSB,
sensor->frame_rate.numerator >> 8);
vs6624_write(sd, VS6624_FR_NUM_LSB,
sensor->frame_rate.numerator & 0xFF);
vs6624_write(sd, VS6624_FR_DEN,
sensor->frame_rate.denominator & 0xFF);
return 0;
}
static int vs6624_s_stream(struct v4l2_subdev *sd, int enable)
{
if (enable)
vs6624_write(sd, VS6624_USER_CMD, 0x2);
else
vs6624_write(sd, VS6624_USER_CMD, 0x4);
udelay(100);
return 0;
}
static int vs6624_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *chip)
{
int rev;
struct i2c_client *client = v4l2_get_subdevdata(sd);
rev = (vs6624_read(sd, VS6624_FW_VSN_MAJOR) << 8)
| vs6624_read(sd, VS6624_FW_VSN_MINOR);
return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_VS6624, rev);
}
#ifdef CONFIG_VIDEO_ADV_DEBUG
static int vs6624_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
if (!v4l2_chip_match_i2c_client(client, ®->match))
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
reg->val = vs6624_read(sd, reg->reg & 0xffff);
reg->size = 1;
return 0;
}
static int vs6624_s_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
if (!v4l2_chip_match_i2c_client(client, ®->match))
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
vs6624_write(sd, reg->reg & 0xffff, reg->val & 0xff);
return 0;
}
#endif
static const struct v4l2_ctrl_ops vs6624_ctrl_ops = {
.s_ctrl = vs6624_s_ctrl,
};
static const struct v4l2_subdev_core_ops vs6624_core_ops = {
.g_chip_ident = vs6624_g_chip_ident,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = vs6624_g_register,
.s_register = vs6624_s_register,
#endif
};
static const struct v4l2_subdev_video_ops vs6624_video_ops = {
.enum_mbus_fmt = vs6624_enum_mbus_fmt,
.try_mbus_fmt = vs6624_try_mbus_fmt,
.s_mbus_fmt = vs6624_s_mbus_fmt,
.g_mbus_fmt = vs6624_g_mbus_fmt,
.s_parm = vs6624_s_parm,
.g_parm = vs6624_g_parm,
.s_stream = vs6624_s_stream,
};
static const struct v4l2_subdev_ops vs6624_ops = {
.core = &vs6624_core_ops,
.video = &vs6624_video_ops,
};
static int __devinit vs6624_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct vs6624 *sensor;
struct v4l2_subdev *sd;
struct v4l2_ctrl_handler *hdl;
const unsigned *ce;
int ret;
/* Check if the adapter supports the needed features */
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -EIO;
ce = client->dev.platform_data;
if (ce == NULL)
return -EINVAL;
ret = gpio_request(*ce, "VS6624 Chip Enable");
if (ret) {
v4l_err(client, "failed to request GPIO %d\n", *ce);
return ret;
}
gpio_direction_output(*ce, 1);
/* wait 100ms before any further i2c writes are performed */
mdelay(100);
sensor = kzalloc(sizeof(*sensor), GFP_KERNEL);
if (sensor == NULL) {
gpio_free(*ce);
return -ENOMEM;
}
sd = &sensor->sd;
v4l2_i2c_subdev_init(sd, client, &vs6624_ops);
vs6624_writeregs(sd, vs6624_p1);
vs6624_write(sd, VS6624_MICRO_EN, 0x2);
vs6624_write(sd, VS6624_DIO_EN, 0x1);
mdelay(10);
vs6624_writeregs(sd, vs6624_p2);
vs6624_writeregs(sd, vs6624_default);
vs6624_write(sd, VS6624_HSYNC_SETUP, 0xF);
vs6624_writeregs(sd, vs6624_run_setup);
/* set frame rate */
sensor->frame_rate.numerator = MAX_FRAME_RATE;
sensor->frame_rate.denominator = 1;
vs6624_write(sd, VS6624_DISABLE_FR_DAMPER, 0x0);
vs6624_write(sd, VS6624_FR_NUM_MSB,
sensor->frame_rate.numerator >> 8);
vs6624_write(sd, VS6624_FR_NUM_LSB,
sensor->frame_rate.numerator & 0xFF);
vs6624_write(sd, VS6624_FR_DEN,
sensor->frame_rate.denominator & 0xFF);
sensor->fmt = vs6624_default_fmt;
sensor->ce_pin = *ce;
v4l_info(client, "chip found @ 0x%02x (%s)\n",
client->addr << 1, client->adapter->name);
hdl = &sensor->hdl;
v4l2_ctrl_handler_init(hdl, 4);
v4l2_ctrl_new_std(hdl, &vs6624_ctrl_ops,
V4L2_CID_CONTRAST, 0, 0xFF, 1, 0x87);
v4l2_ctrl_new_std(hdl, &vs6624_ctrl_ops,
V4L2_CID_SATURATION, 0, 0xFF, 1, 0x78);
v4l2_ctrl_new_std(hdl, &vs6624_ctrl_ops,
V4L2_CID_HFLIP, 0, 1, 1, 0);
v4l2_ctrl_new_std(hdl, &vs6624_ctrl_ops,
V4L2_CID_VFLIP, 0, 1, 1, 0);
/* hook the control handler into the driver */
sd->ctrl_handler = hdl;
if (hdl->error) {
int err = hdl->error;
v4l2_ctrl_handler_free(hdl);
kfree(sensor);
gpio_free(*ce);
return err;
}
/* initialize the hardware to the default control values */
ret = v4l2_ctrl_handler_setup(hdl);
if (ret) {
v4l2_ctrl_handler_free(hdl);
kfree(sensor);
gpio_free(*ce);
}
return ret;
}
static int __devexit vs6624_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct vs6624 *sensor = to_vs6624(sd);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(sd->ctrl_handler);
gpio_free(sensor->ce_pin);
kfree(sensor);
return 0;
}
static const struct i2c_device_id vs6624_id[] = {
{"vs6624", 0},
{},
};
MODULE_DEVICE_TABLE(i2c, vs6624_id);
static struct i2c_driver vs6624_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "vs6624",
},
.probe = vs6624_probe,
.remove = __devexit_p(vs6624_remove),
.id_table = vs6624_id,
};
static __init int vs6624_init(void)
{
return i2c_add_driver(&vs6624_driver);
}
static __exit void vs6624_exit(void)
{
i2c_del_driver(&vs6624_driver);
}
module_init(vs6624_init);
module_exit(vs6624_exit);
MODULE_DESCRIPTION("VS6624 sensor driver");
MODULE_AUTHOR("Scott Jiang <Scott.Jiang.Linux@gmail.com>");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
aosp-samsung-msm7x30/android_kernel_samsung_msm7x30-common | drivers/rtc/rtc-ls1x.c | 4991 | 5627 | /*
* Copyright (c) 2011 Zhao Zhang <zhzhl555@gmail.com>
*
* Derived from driver/rtc/rtc-au1xxx.c
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/rtc.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/types.h>
#include <linux/io.h>
#include <asm/mach-loongson1/loongson1.h>
#define LS1X_RTC_REG_OFFSET (LS1X_RTC_BASE + 0x20)
#define LS1X_RTC_REGS(x) \
((void __iomem *)KSEG1ADDR(LS1X_RTC_REG_OFFSET + (x)))
/*RTC programmable counters 0 and 1*/
#define SYS_COUNTER_CNTRL (LS1X_RTC_REGS(0x20))
#define SYS_CNTRL_ERS (1 << 23)
#define SYS_CNTRL_RTS (1 << 20)
#define SYS_CNTRL_RM2 (1 << 19)
#define SYS_CNTRL_RM1 (1 << 18)
#define SYS_CNTRL_RM0 (1 << 17)
#define SYS_CNTRL_RS (1 << 16)
#define SYS_CNTRL_BP (1 << 14)
#define SYS_CNTRL_REN (1 << 13)
#define SYS_CNTRL_BRT (1 << 12)
#define SYS_CNTRL_TEN (1 << 11)
#define SYS_CNTRL_BTT (1 << 10)
#define SYS_CNTRL_E0 (1 << 8)
#define SYS_CNTRL_ETS (1 << 7)
#define SYS_CNTRL_32S (1 << 5)
#define SYS_CNTRL_TTS (1 << 4)
#define SYS_CNTRL_TM2 (1 << 3)
#define SYS_CNTRL_TM1 (1 << 2)
#define SYS_CNTRL_TM0 (1 << 1)
#define SYS_CNTRL_TS (1 << 0)
/* Programmable Counter 0 Registers */
#define SYS_TOYTRIM (LS1X_RTC_REGS(0))
#define SYS_TOYWRITE0 (LS1X_RTC_REGS(4))
#define SYS_TOYWRITE1 (LS1X_RTC_REGS(8))
#define SYS_TOYREAD0 (LS1X_RTC_REGS(0xC))
#define SYS_TOYREAD1 (LS1X_RTC_REGS(0x10))
#define SYS_TOYMATCH0 (LS1X_RTC_REGS(0x14))
#define SYS_TOYMATCH1 (LS1X_RTC_REGS(0x18))
#define SYS_TOYMATCH2 (LS1X_RTC_REGS(0x1C))
/* Programmable Counter 1 Registers */
#define SYS_RTCTRIM (LS1X_RTC_REGS(0x40))
#define SYS_RTCWRITE0 (LS1X_RTC_REGS(0x44))
#define SYS_RTCREAD0 (LS1X_RTC_REGS(0x48))
#define SYS_RTCMATCH0 (LS1X_RTC_REGS(0x4C))
#define SYS_RTCMATCH1 (LS1X_RTC_REGS(0x50))
#define SYS_RTCMATCH2 (LS1X_RTC_REGS(0x54))
#define LS1X_SEC_OFFSET (4)
#define LS1X_MIN_OFFSET (10)
#define LS1X_HOUR_OFFSET (16)
#define LS1X_DAY_OFFSET (21)
#define LS1X_MONTH_OFFSET (26)
#define LS1X_SEC_MASK (0x3f)
#define LS1X_MIN_MASK (0x3f)
#define LS1X_HOUR_MASK (0x1f)
#define LS1X_DAY_MASK (0x1f)
#define LS1X_MONTH_MASK (0x3f)
#define LS1X_YEAR_MASK (0xffffffff)
#define ls1x_get_sec(t) (((t) >> LS1X_SEC_OFFSET) & LS1X_SEC_MASK)
#define ls1x_get_min(t) (((t) >> LS1X_MIN_OFFSET) & LS1X_MIN_MASK)
#define ls1x_get_hour(t) (((t) >> LS1X_HOUR_OFFSET) & LS1X_HOUR_MASK)
#define ls1x_get_day(t) (((t) >> LS1X_DAY_OFFSET) & LS1X_DAY_MASK)
#define ls1x_get_month(t) (((t) >> LS1X_MONTH_OFFSET) & LS1X_MONTH_MASK)
#define RTC_CNTR_OK (SYS_CNTRL_E0 | SYS_CNTRL_32S)
static int ls1x_rtc_read_time(struct device *dev, struct rtc_time *rtm)
{
unsigned long v, t;
v = readl(SYS_TOYREAD0);
t = readl(SYS_TOYREAD1);
memset(rtm, 0, sizeof(struct rtc_time));
t = mktime((t & LS1X_YEAR_MASK), ls1x_get_month(v),
ls1x_get_day(v), ls1x_get_hour(v),
ls1x_get_min(v), ls1x_get_sec(v));
rtc_time_to_tm(t, rtm);
return rtc_valid_tm(rtm);
}
static int ls1x_rtc_set_time(struct device *dev, struct rtc_time *rtm)
{
unsigned long v, t, c;
int ret = -ETIMEDOUT;
v = ((rtm->tm_mon + 1) << LS1X_MONTH_OFFSET)
| (rtm->tm_mday << LS1X_DAY_OFFSET)
| (rtm->tm_hour << LS1X_HOUR_OFFSET)
| (rtm->tm_min << LS1X_MIN_OFFSET)
| (rtm->tm_sec << LS1X_SEC_OFFSET);
writel(v, SYS_TOYWRITE0);
c = 0x10000;
/* add timeout check counter, for more safe */
while ((readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_TS) && --c)
usleep_range(1000, 3000);
if (!c) {
dev_err(dev, "set time timeout!\n");
goto err;
}
t = rtm->tm_year + 1900;
writel(t, SYS_TOYWRITE1);
c = 0x10000;
while ((readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_TS) && --c)
usleep_range(1000, 3000);
if (!c) {
dev_err(dev, "set time timeout!\n");
goto err;
}
return 0;
err:
return ret;
}
static struct rtc_class_ops ls1x_rtc_ops = {
.read_time = ls1x_rtc_read_time,
.set_time = ls1x_rtc_set_time,
};
static int __devinit ls1x_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtcdev;
unsigned long v;
int ret;
v = readl(SYS_COUNTER_CNTRL);
if (!(v & RTC_CNTR_OK)) {
dev_err(&pdev->dev, "rtc counters not working\n");
ret = -ENODEV;
goto err;
}
ret = -ETIMEDOUT;
/* set to 1 HZ if needed */
if (readl(SYS_TOYTRIM) != 32767) {
v = 0x100000;
while ((readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_TTS) && --v)
usleep_range(1000, 3000);
if (!v) {
dev_err(&pdev->dev, "time out\n");
goto err;
}
writel(32767, SYS_TOYTRIM);
}
/* this loop coundn't be endless */
while (readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_TTS)
usleep_range(1000, 3000);
rtcdev = rtc_device_register("ls1x-rtc", &pdev->dev,
&ls1x_rtc_ops , THIS_MODULE);
if (IS_ERR(rtcdev)) {
ret = PTR_ERR(rtcdev);
goto err;
}
platform_set_drvdata(pdev, rtcdev);
return 0;
err:
return ret;
}
static int __devexit ls1x_rtc_remove(struct platform_device *pdev)
{
struct rtc_device *rtcdev = platform_get_drvdata(pdev);
rtc_device_unregister(rtcdev);
platform_set_drvdata(pdev, NULL);
return 0;
}
static struct platform_driver ls1x_rtc_driver = {
.driver = {
.name = "ls1x-rtc",
.owner = THIS_MODULE,
},
.remove = __devexit_p(ls1x_rtc_remove),
.probe = ls1x_rtc_probe,
};
module_platform_driver(ls1x_rtc_driver);
MODULE_AUTHOR("zhao zhang <zhzhl555@gmail.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Phoenix-Kernel/android_kernel_lge_vee1 | drivers/leds/leds-wm8350.c | 4991 | 5829 | /*
* LED driver for WM8350 driven LEDS.
*
* Copyright(C) 2007, 2008 Wolfson Microelectronics PLC.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/leds.h>
#include <linux/err.h>
#include <linux/mfd/wm8350/pmic.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/module.h>
/* Microamps */
static const int isink_cur[] = {
4,
5,
6,
7,
8,
10,
11,
14,
16,
19,
23,
27,
32,
39,
46,
54,
65,
77,
92,
109,
130,
154,
183,
218,
259,
308,
367,
436,
518,
616,
733,
872,
1037,
1233,
1466,
1744,
2073,
2466,
2933,
3487,
4147,
4932,
5865,
6975,
8294,
9864,
11730,
13949,
16589,
19728,
23460,
27899,
33178,
39455,
46920,
55798,
66355,
78910,
93840,
111596,
132710,
157820,
187681,
223191
};
#define to_wm8350_led(led_cdev) \
container_of(led_cdev, struct wm8350_led, cdev)
static void wm8350_led_enable(struct wm8350_led *led)
{
int ret;
if (led->enabled)
return;
ret = regulator_enable(led->isink);
if (ret != 0) {
dev_err(led->cdev.dev, "Failed to enable ISINK: %d\n", ret);
return;
}
ret = regulator_enable(led->dcdc);
if (ret != 0) {
dev_err(led->cdev.dev, "Failed to enable DCDC: %d\n", ret);
regulator_disable(led->isink);
return;
}
led->enabled = 1;
}
static void wm8350_led_disable(struct wm8350_led *led)
{
int ret;
if (!led->enabled)
return;
ret = regulator_disable(led->dcdc);
if (ret != 0) {
dev_err(led->cdev.dev, "Failed to disable DCDC: %d\n", ret);
return;
}
ret = regulator_disable(led->isink);
if (ret != 0) {
dev_err(led->cdev.dev, "Failed to disable ISINK: %d\n", ret);
regulator_enable(led->dcdc);
return;
}
led->enabled = 0;
}
static void led_work(struct work_struct *work)
{
struct wm8350_led *led = container_of(work, struct wm8350_led, work);
int ret;
int uA;
unsigned long flags;
mutex_lock(&led->mutex);
spin_lock_irqsave(&led->value_lock, flags);
if (led->value == LED_OFF) {
spin_unlock_irqrestore(&led->value_lock, flags);
wm8350_led_disable(led);
goto out;
}
/* This scales linearly into the index of valid current
* settings which results in a linear scaling of perceived
* brightness due to the non-linear current settings provided
* by the hardware.
*/
uA = (led->max_uA_index * led->value) / LED_FULL;
spin_unlock_irqrestore(&led->value_lock, flags);
BUG_ON(uA >= ARRAY_SIZE(isink_cur));
ret = regulator_set_current_limit(led->isink, isink_cur[uA],
isink_cur[uA]);
if (ret != 0)
dev_err(led->cdev.dev, "Failed to set %duA: %d\n",
isink_cur[uA], ret);
wm8350_led_enable(led);
out:
mutex_unlock(&led->mutex);
}
static void wm8350_led_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct wm8350_led *led = to_wm8350_led(led_cdev);
unsigned long flags;
spin_lock_irqsave(&led->value_lock, flags);
led->value = value;
schedule_work(&led->work);
spin_unlock_irqrestore(&led->value_lock, flags);
}
static void wm8350_led_shutdown(struct platform_device *pdev)
{
struct wm8350_led *led = platform_get_drvdata(pdev);
mutex_lock(&led->mutex);
led->value = LED_OFF;
wm8350_led_disable(led);
mutex_unlock(&led->mutex);
}
static int wm8350_led_probe(struct platform_device *pdev)
{
struct regulator *isink, *dcdc;
struct wm8350_led *led;
struct wm8350_led_platform_data *pdata = pdev->dev.platform_data;
int ret, i;
if (pdata == NULL) {
dev_err(&pdev->dev, "no platform data\n");
return -ENODEV;
}
if (pdata->max_uA < isink_cur[0]) {
dev_err(&pdev->dev, "Invalid maximum current %duA\n",
pdata->max_uA);
return -EINVAL;
}
isink = regulator_get(&pdev->dev, "led_isink");
if (IS_ERR(isink)) {
printk(KERN_ERR "%s: can't get ISINK\n", __func__);
return PTR_ERR(isink);
}
dcdc = regulator_get(&pdev->dev, "led_vcc");
if (IS_ERR(dcdc)) {
printk(KERN_ERR "%s: can't get DCDC\n", __func__);
ret = PTR_ERR(dcdc);
goto err_isink;
}
led = devm_kzalloc(&pdev->dev, sizeof(*led), GFP_KERNEL);
if (led == NULL) {
ret = -ENOMEM;
goto err_dcdc;
}
led->cdev.brightness_set = wm8350_led_set;
led->cdev.default_trigger = pdata->default_trigger;
led->cdev.name = pdata->name;
led->cdev.flags |= LED_CORE_SUSPENDRESUME;
led->enabled = regulator_is_enabled(isink);
led->isink = isink;
led->dcdc = dcdc;
for (i = 0; i < ARRAY_SIZE(isink_cur) - 1; i++)
if (isink_cur[i] >= pdata->max_uA)
break;
led->max_uA_index = i;
if (pdata->max_uA != isink_cur[i])
dev_warn(&pdev->dev,
"Maximum current %duA is not directly supported,"
" check platform data\n",
pdata->max_uA);
spin_lock_init(&led->value_lock);
mutex_init(&led->mutex);
INIT_WORK(&led->work, led_work);
led->value = LED_OFF;
platform_set_drvdata(pdev, led);
ret = led_classdev_register(&pdev->dev, &led->cdev);
if (ret < 0)
goto err_dcdc;
return 0;
err_dcdc:
regulator_put(dcdc);
err_isink:
regulator_put(isink);
return ret;
}
static int wm8350_led_remove(struct platform_device *pdev)
{
struct wm8350_led *led = platform_get_drvdata(pdev);
led_classdev_unregister(&led->cdev);
flush_work_sync(&led->work);
wm8350_led_disable(led);
regulator_put(led->dcdc);
regulator_put(led->isink);
return 0;
}
static struct platform_driver wm8350_led_driver = {
.driver = {
.name = "wm8350-led",
.owner = THIS_MODULE,
},
.probe = wm8350_led_probe,
.remove = wm8350_led_remove,
.shutdown = wm8350_led_shutdown,
};
module_platform_driver(wm8350_led_driver);
MODULE_AUTHOR("Mark Brown");
MODULE_DESCRIPTION("WM8350 LED driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:wm8350-led");
| gpl-2.0 |
NoelMacwan/Kernel-10.4.1.B.0.101 | arch/mips/math-emu/sp_scalb.c | 10367 | 1464 | /* IEEE754 floating point arithmetic
* single precision
*/
/*
* MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd.
*
* ########################################################################
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* ########################################################################
*/
#include "ieee754sp.h"
ieee754sp ieee754sp_scalb(ieee754sp x, int n)
{
COMPXSP;
CLEARCX;
EXPLODEXSP;
switch (xc) {
case IEEE754_CLASS_SNAN:
return ieee754sp_nanxcpt(x, "scalb", x, n);
case IEEE754_CLASS_QNAN:
case IEEE754_CLASS_INF:
case IEEE754_CLASS_ZERO:
return x;
case IEEE754_CLASS_DNORM:
SPDNORMX;
break;
case IEEE754_CLASS_NORM:
break;
}
SPNORMRET2(xs, xe + n, xm << 3, "scalb", x, n);
}
ieee754sp ieee754sp_ldexp(ieee754sp x, int n)
{
return ieee754sp_scalb(x, n);
}
| gpl-2.0 |
johnnyslt/kernel_zte_v967s | arch/mips/math-emu/ieee754m.c | 10367 | 1415 | /*
* floor, trunc, ceil
*/
/*
* MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd.
*
* ########################################################################
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* ########################################################################
*/
#include "ieee754.h"
ieee754dp ieee754dp_floor(ieee754dp x)
{
ieee754dp i;
if (ieee754dp_lt(ieee754dp_modf(x, &i), ieee754dp_zero(0)))
return ieee754dp_sub(i, ieee754dp_one(0));
else
return i;
}
ieee754dp ieee754dp_ceil(ieee754dp x)
{
ieee754dp i;
if (ieee754dp_gt(ieee754dp_modf(x, &i), ieee754dp_zero(0)))
return ieee754dp_add(i, ieee754dp_one(0));
else
return i;
}
ieee754dp ieee754dp_trunc(ieee754dp x)
{
ieee754dp i;
(void) ieee754dp_modf(x, &i);
return i;
}
| gpl-2.0 |
Conap30/htc_kernel_desirec_cfs | drivers/infiniband/hw/ipath/ipath_wc_ppc64.c | 13439 | 2170 | /*
* Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*
* This file is conditionally built on PowerPC only. Otherwise weak symbol
* versions of the functions exported from here are used.
*/
#include "ipath_kernel.h"
/**
* ipath_enable_wc - enable write combining for MMIO writes to the device
* @dd: infinipath device
*
* Nothing to do on PowerPC, so just return without error.
*/
int ipath_enable_wc(struct ipath_devdata *dd)
{
return 0;
}
/**
* ipath_unordered_wc - indicate whether write combining is unordered
*
* Because our performance depends on our ability to do write
* combining mmio writes in the most efficient way, we need to
* know if we are on a processor that may reorder stores when
* write combining.
*/
int ipath_unordered_wc(void)
{
return 1;
}
| gpl-2.0 |
TeamJB/kernel_samsung_smdk4210 | arch/sh/lib64/udelay.c | 13951 | 1211 | /*
* arch/sh/lib64/udelay.c
*
* Delay routines, using a pre-computed "loops_per_jiffy" value.
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003, 2004 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/sched.h>
#include <asm/param.h>
/*
* Use only for very small delays (< 1 msec).
*
* The active part of our cycle counter is only 32-bits wide, and
* we're treating the difference between two marks as signed. On
* a 1GHz box, that's about 2 seconds.
*/
void __delay(unsigned long loops)
{
long long dummy;
__asm__ __volatile__("gettr tr0, %1\n\t"
"pta $+4, tr0\n\t"
"addi %0, -1, %0\n\t"
"bne %0, r63, tr0\n\t"
"ptabs %1, tr0\n\t":"=r"(loops),
"=r"(dummy)
:"0"(loops));
}
void __const_udelay(unsigned long xloops)
{
__delay(xloops * (HZ * cpu_data[raw_smp_processor_id()].loops_per_jiffy));
}
void __udelay(unsigned long usecs)
{
__const_udelay(usecs * 0x000010c6); /* 2**32 / 1000000 */
}
void __ndelay(unsigned long nsecs)
{
__const_udelay(nsecs * 0x00000005);
}
| gpl-2.0 |
lbule/kernel_htc_mtk | arch/ia64/lib/io.c | 13951 | 2610 | #include <linux/module.h>
#include <linux/types.h>
#include <asm/io.h>
/*
* Copy data from IO memory space to "real" memory space.
* This needs to be optimized.
*/
void memcpy_fromio(void *to, const volatile void __iomem *from, long count)
{
char *dst = to;
while (count) {
count--;
*dst++ = readb(from++);
}
}
EXPORT_SYMBOL(memcpy_fromio);
/*
* Copy data from "real" memory space to IO memory space.
* This needs to be optimized.
*/
void memcpy_toio(volatile void __iomem *to, const void *from, long count)
{
const char *src = from;
while (count) {
count--;
writeb(*src++, to++);
}
}
EXPORT_SYMBOL(memcpy_toio);
/*
* "memset" on IO memory space.
* This needs to be optimized.
*/
void memset_io(volatile void __iomem *dst, int c, long count)
{
unsigned char ch = (char)(c & 0xff);
while (count) {
count--;
writeb(ch, dst);
dst++;
}
}
EXPORT_SYMBOL(memset_io);
#ifdef CONFIG_IA64_GENERIC
#undef __ia64_inb
#undef __ia64_inw
#undef __ia64_inl
#undef __ia64_outb
#undef __ia64_outw
#undef __ia64_outl
#undef __ia64_readb
#undef __ia64_readw
#undef __ia64_readl
#undef __ia64_readq
#undef __ia64_readb_relaxed
#undef __ia64_readw_relaxed
#undef __ia64_readl_relaxed
#undef __ia64_readq_relaxed
#undef __ia64_writeb
#undef __ia64_writew
#undef __ia64_writel
#undef __ia64_writeq
#undef __ia64_mmiowb
unsigned int
__ia64_inb (unsigned long port)
{
return ___ia64_inb(port);
}
unsigned int
__ia64_inw (unsigned long port)
{
return ___ia64_inw(port);
}
unsigned int
__ia64_inl (unsigned long port)
{
return ___ia64_inl(port);
}
void
__ia64_outb (unsigned char val, unsigned long port)
{
___ia64_outb(val, port);
}
void
__ia64_outw (unsigned short val, unsigned long port)
{
___ia64_outw(val, port);
}
void
__ia64_outl (unsigned int val, unsigned long port)
{
___ia64_outl(val, port);
}
unsigned char
__ia64_readb (void __iomem *addr)
{
return ___ia64_readb (addr);
}
unsigned short
__ia64_readw (void __iomem *addr)
{
return ___ia64_readw (addr);
}
unsigned int
__ia64_readl (void __iomem *addr)
{
return ___ia64_readl (addr);
}
unsigned long
__ia64_readq (void __iomem *addr)
{
return ___ia64_readq (addr);
}
unsigned char
__ia64_readb_relaxed (void __iomem *addr)
{
return ___ia64_readb (addr);
}
unsigned short
__ia64_readw_relaxed (void __iomem *addr)
{
return ___ia64_readw (addr);
}
unsigned int
__ia64_readl_relaxed (void __iomem *addr)
{
return ___ia64_readl (addr);
}
unsigned long
__ia64_readq_relaxed (void __iomem *addr)
{
return ___ia64_readq (addr);
}
void
__ia64_mmiowb(void)
{
___ia64_mmiowb();
}
#endif /* CONFIG_IA64_GENERIC */
| gpl-2.0 |
FrancescoCG/CrazySuperKernel-CM13-KLTE-NEW-REBASE | arch/ia64/lib/io.c | 13951 | 2610 | #include <linux/module.h>
#include <linux/types.h>
#include <asm/io.h>
/*
* Copy data from IO memory space to "real" memory space.
* This needs to be optimized.
*/
void memcpy_fromio(void *to, const volatile void __iomem *from, long count)
{
char *dst = to;
while (count) {
count--;
*dst++ = readb(from++);
}
}
EXPORT_SYMBOL(memcpy_fromio);
/*
* Copy data from "real" memory space to IO memory space.
* This needs to be optimized.
*/
void memcpy_toio(volatile void __iomem *to, const void *from, long count)
{
const char *src = from;
while (count) {
count--;
writeb(*src++, to++);
}
}
EXPORT_SYMBOL(memcpy_toio);
/*
* "memset" on IO memory space.
* This needs to be optimized.
*/
void memset_io(volatile void __iomem *dst, int c, long count)
{
unsigned char ch = (char)(c & 0xff);
while (count) {
count--;
writeb(ch, dst);
dst++;
}
}
EXPORT_SYMBOL(memset_io);
#ifdef CONFIG_IA64_GENERIC
#undef __ia64_inb
#undef __ia64_inw
#undef __ia64_inl
#undef __ia64_outb
#undef __ia64_outw
#undef __ia64_outl
#undef __ia64_readb
#undef __ia64_readw
#undef __ia64_readl
#undef __ia64_readq
#undef __ia64_readb_relaxed
#undef __ia64_readw_relaxed
#undef __ia64_readl_relaxed
#undef __ia64_readq_relaxed
#undef __ia64_writeb
#undef __ia64_writew
#undef __ia64_writel
#undef __ia64_writeq
#undef __ia64_mmiowb
unsigned int
__ia64_inb (unsigned long port)
{
return ___ia64_inb(port);
}
unsigned int
__ia64_inw (unsigned long port)
{
return ___ia64_inw(port);
}
unsigned int
__ia64_inl (unsigned long port)
{
return ___ia64_inl(port);
}
void
__ia64_outb (unsigned char val, unsigned long port)
{
___ia64_outb(val, port);
}
void
__ia64_outw (unsigned short val, unsigned long port)
{
___ia64_outw(val, port);
}
void
__ia64_outl (unsigned int val, unsigned long port)
{
___ia64_outl(val, port);
}
unsigned char
__ia64_readb (void __iomem *addr)
{
return ___ia64_readb (addr);
}
unsigned short
__ia64_readw (void __iomem *addr)
{
return ___ia64_readw (addr);
}
unsigned int
__ia64_readl (void __iomem *addr)
{
return ___ia64_readl (addr);
}
unsigned long
__ia64_readq (void __iomem *addr)
{
return ___ia64_readq (addr);
}
unsigned char
__ia64_readb_relaxed (void __iomem *addr)
{
return ___ia64_readb (addr);
}
unsigned short
__ia64_readw_relaxed (void __iomem *addr)
{
return ___ia64_readw (addr);
}
unsigned int
__ia64_readl_relaxed (void __iomem *addr)
{
return ___ia64_readl (addr);
}
unsigned long
__ia64_readq_relaxed (void __iomem *addr)
{
return ___ia64_readq (addr);
}
void
__ia64_mmiowb(void)
{
___ia64_mmiowb();
}
#endif /* CONFIG_IA64_GENERIC */
| gpl-2.0 |
ezequielv/binutils-gdb | readline/readline.c | 128 | 33619 | /* readline.c -- a general facility for reading lines of input
with emacs style editing and completion. */
/* Copyright (C) 1987-2009 Free Software Foundation, Inc.
This file is part of the GNU Readline Library (Readline), a library
for reading lines of text with interactive input and history editing.
Readline is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Readline is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Readline. If not, see <http://www.gnu.org/licenses/>.
*/
#define READLINE_LIBRARY
#if defined (HAVE_CONFIG_H)
# include <config.h>
#endif
#include <sys/types.h>
#include "posixstat.h"
#include <fcntl.h>
#if defined (HAVE_SYS_FILE_H)
# include <sys/file.h>
#endif /* HAVE_SYS_FILE_H */
#if defined (HAVE_UNISTD_H)
# include <unistd.h>
#endif /* HAVE_UNISTD_H */
#if defined (HAVE_STDLIB_H)
# include <stdlib.h>
#else
# include "ansi_stdlib.h"
#endif /* HAVE_STDLIB_H */
#if defined (HAVE_LOCALE_H)
# include <locale.h>
#endif
#include <stdio.h>
#include "posixjmp.h"
#include <errno.h>
#if !defined (errno)
extern int errno;
#endif /* !errno */
/* System-specific feature definitions and include files. */
#include "rldefs.h"
#include "rlmbutil.h"
#if defined (__EMX__)
# define INCL_DOSPROCESS
# include <os2.h>
#endif /* __EMX__ */
/* Some standard library routines. */
#include "readline.h"
#include "history.h"
#include "rlprivate.h"
#include "rlshell.h"
#include "xmalloc.h"
#ifndef RL_LIBRARY_VERSION
# define RL_LIBRARY_VERSION "5.1"
#endif
#ifndef RL_READLINE_VERSION
# define RL_READLINE_VERSION 0x0501
#endif
extern void _rl_free_history_entry PARAMS((HIST_ENTRY *));
/* Forward declarations used in this file. */
static char *readline_internal PARAMS((void));
static void readline_initialize_everything PARAMS((void));
static void bind_arrow_keys_internal PARAMS((Keymap));
static void bind_arrow_keys PARAMS((void));
static void readline_default_bindings PARAMS((void));
static void reset_default_bindings PARAMS((void));
static int _rl_subseq_result PARAMS((int, Keymap, int, int));
static int _rl_subseq_getchar PARAMS((int));
/* **************************************************************** */
/* */
/* Line editing input utility */
/* */
/* **************************************************************** */
const char *rl_library_version = RL_LIBRARY_VERSION;
int rl_readline_version = RL_READLINE_VERSION;
/* True if this is `real' readline as opposed to some stub substitute. */
int rl_gnu_readline_p = 1;
/* A pointer to the keymap that is currently in use.
By default, it is the standard emacs keymap. */
Keymap _rl_keymap = emacs_standard_keymap;
/* The current style of editing. */
int rl_editing_mode = emacs_mode;
/* The current insert mode: input (the default) or overwrite */
int rl_insert_mode = RL_IM_DEFAULT;
/* Non-zero if we called this function from _rl_dispatch(). It's present
so functions can find out whether they were called from a key binding
or directly from an application. */
int rl_dispatching;
/* Non-zero if the previous command was a kill command. */
int _rl_last_command_was_kill = 0;
/* The current value of the numeric argument specified by the user. */
int rl_numeric_arg = 1;
/* Non-zero if an argument was typed. */
int rl_explicit_arg = 0;
/* Temporary value used while generating the argument. */
int rl_arg_sign = 1;
/* Non-zero means we have been called at least once before. */
static int rl_initialized;
#if 0
/* If non-zero, this program is running in an EMACS buffer. */
static int running_in_emacs;
#endif
/* Flags word encapsulating the current readline state. */
int rl_readline_state = RL_STATE_NONE;
/* The current offset in the current input line. */
int rl_point;
/* Mark in the current input line. */
int rl_mark;
/* Length of the current input line. */
int rl_end;
/* Make this non-zero to return the current input_line. */
int rl_done;
/* The last function executed by readline. */
rl_command_func_t *rl_last_func = (rl_command_func_t *)NULL;
/* Top level environment for readline_internal (). */
procenv_t _rl_top_level;
/* The streams we interact with. */
FILE *_rl_in_stream, *_rl_out_stream;
/* The names of the streams that we do input and output to. */
FILE *rl_instream = (FILE *)NULL;
FILE *rl_outstream = (FILE *)NULL;
/* Non-zero means echo characters as they are read. Defaults to no echo;
set to 1 if there is a controlling terminal, we can get its attributes,
and the attributes include `echo'. Look at rltty.c:prepare_terminal_settings
for the code that sets it. */
int _rl_echoing_p = 0;
/* Current prompt. */
char *rl_prompt = (char *)NULL;
int rl_visible_prompt_length = 0;
/* Set to non-zero by calling application if it has already printed rl_prompt
and does not want readline to do it the first time. */
int rl_already_prompted = 0;
/* The number of characters read in order to type this complete command. */
int rl_key_sequence_length = 0;
/* If non-zero, then this is the address of a function to call just
before readline_internal_setup () prints the first prompt. */
rl_hook_func_t *rl_startup_hook = (rl_hook_func_t *)NULL;
/* If non-zero, this is the address of a function to call just before
readline_internal_setup () returns and readline_internal starts
reading input characters. */
rl_hook_func_t *rl_pre_input_hook = (rl_hook_func_t *)NULL;
/* What we use internally. You should always refer to RL_LINE_BUFFER. */
static char *the_line;
/* The character that can generate an EOF. Really read from
the terminal driver... just defaulted here. */
int _rl_eof_char = CTRL ('D');
/* Non-zero makes this the next keystroke to read. */
int rl_pending_input = 0;
/* Pointer to a useful terminal name. */
const char *rl_terminal_name = (const char *)NULL;
/* Non-zero means to always use horizontal scrolling in line display. */
int _rl_horizontal_scroll_mode = 0;
/* Non-zero means to display an asterisk at the starts of history lines
which have been modified. */
int _rl_mark_modified_lines = 0;
/* The style of `bell' notification preferred. This can be set to NO_BELL,
AUDIBLE_BELL, or VISIBLE_BELL. */
int _rl_bell_preference = AUDIBLE_BELL;
/* String inserted into the line by rl_insert_comment (). */
char *_rl_comment_begin;
/* Keymap holding the function currently being executed. */
Keymap rl_executing_keymap;
/* Keymap we're currently using to dispatch. */
Keymap _rl_dispatching_keymap;
/* Non-zero means to erase entire line, including prompt, on empty input lines. */
int rl_erase_empty_line = 0;
/* Non-zero means to read only this many characters rather than up to a
character bound to accept-line. */
int rl_num_chars_to_read;
/* Line buffer and maintenence. */
char *rl_line_buffer = (char *)NULL;
int rl_line_buffer_len = 0;
/* Key sequence `contexts' */
_rl_keyseq_cxt *_rl_kscxt = 0;
/* Forward declarations used by the display, termcap, and history code. */
/* **************************************************************** */
/* */
/* `Forward' declarations */
/* */
/* **************************************************************** */
/* Non-zero means do not parse any lines other than comments and
parser directives. */
unsigned char _rl_parsing_conditionalized_out = 0;
/* Non-zero means to convert characters with the meta bit set to
escape-prefixed characters so we can indirect through
emacs_meta_keymap or vi_escape_keymap. */
int _rl_convert_meta_chars_to_ascii = 1;
/* Non-zero means to output characters with the meta bit set directly
rather than as a meta-prefixed escape sequence. */
int _rl_output_meta_chars = 0;
/* Non-zero means to look at the termios special characters and bind
them to equivalent readline functions at startup. */
int _rl_bind_stty_chars = 1;
/* Non-zero means to go through the history list at every newline (or
whenever rl_done is set and readline returns) and revert each line to
its initial state. */
int _rl_revert_all_at_newline = 0;
/* Non-zero means to honor the termios ECHOCTL bit and echo control
characters corresponding to keyboard-generated signals. */
int _rl_echo_control_chars = 1;
/* **************************************************************** */
/* */
/* Top Level Functions */
/* */
/* **************************************************************** */
/* Non-zero means treat 0200 bit in terminal input as Meta bit. */
int _rl_meta_flag = 0; /* Forward declaration */
/* Set up the prompt and expand it. Called from readline() and
rl_callback_handler_install (). */
int
rl_set_prompt (prompt)
const char *prompt;
{
FREE (rl_prompt);
rl_prompt = prompt ? savestring (prompt) : (char *)NULL;
rl_display_prompt = rl_prompt ? rl_prompt : "";
rl_visible_prompt_length = rl_expand_prompt (rl_prompt);
return 0;
}
/* Read a line of input. Prompt with PROMPT. An empty PROMPT means
none. A return value of NULL means that EOF was encountered. */
char *
readline (prompt)
const char *prompt;
{
char *value;
#if 0
int in_callback;
#endif
/* If we are at EOF return a NULL string. */
if (rl_pending_input == EOF)
{
rl_clear_pending_input ();
return ((char *)NULL);
}
#if 0
/* If readline() is called after installing a callback handler, temporarily
turn off the callback state to avoid ensuing messiness. Patch supplied
by the gdb folks. XXX -- disabled. This can be fooled and readline
left in a strange state by a poorly-timed longjmp. */
if (in_callback = RL_ISSTATE (RL_STATE_CALLBACK))
RL_UNSETSTATE (RL_STATE_CALLBACK);
#endif
rl_set_prompt (prompt);
rl_initialize ();
if (rl_prep_term_function)
(*rl_prep_term_function) (_rl_meta_flag);
#if defined (HANDLE_SIGNALS)
rl_set_signals ();
#endif
value = readline_internal ();
if (rl_deprep_term_function)
(*rl_deprep_term_function) ();
#if defined (HANDLE_SIGNALS)
rl_clear_signals ();
#endif
#if 0
if (in_callback)
RL_SETSTATE (RL_STATE_CALLBACK);
#endif
return (value);
}
#if defined (READLINE_CALLBACKS)
# define STATIC_CALLBACK
#else
# define STATIC_CALLBACK static
#endif
STATIC_CALLBACK void
readline_internal_setup ()
{
char *nprompt;
_rl_in_stream = rl_instream;
_rl_out_stream = rl_outstream;
if (rl_startup_hook)
(*rl_startup_hook) ();
/* If we're not echoing, we still want to at least print a prompt, because
rl_redisplay will not do it for us. If the calling application has a
custom redisplay function, though, let that function handle it. */
if (_rl_echoing_p == 0 && rl_redisplay_function == rl_redisplay)
{
if (rl_prompt && rl_already_prompted == 0)
{
nprompt = _rl_strip_prompt (rl_prompt);
fprintf (_rl_out_stream, "%s", nprompt);
fflush (_rl_out_stream);
xfree (nprompt);
}
}
else
{
if (rl_prompt && rl_already_prompted)
rl_on_new_line_with_prompt ();
else
rl_on_new_line ();
(*rl_redisplay_function) ();
}
#if defined (VI_MODE)
if (rl_editing_mode == vi_mode)
rl_vi_insert_mode (1, 'i');
#endif /* VI_MODE */
if (rl_pre_input_hook)
(*rl_pre_input_hook) ();
RL_CHECK_SIGNALS ();
}
STATIC_CALLBACK char *
readline_internal_teardown (eof)
int eof;
{
char *temp;
HIST_ENTRY *entry;
RL_CHECK_SIGNALS ();
/* Restore the original of this history line, iff the line that we
are editing was originally in the history, AND the line has changed. */
entry = current_history ();
if (entry && rl_undo_list)
{
temp = savestring (the_line);
rl_revert_line (1, 0);
entry = replace_history_entry (where_history (), the_line, (histdata_t)NULL);
_rl_free_history_entry (entry);
strcpy (the_line, temp);
xfree (temp);
}
if (_rl_revert_all_at_newline)
_rl_revert_all_lines ();
/* At any rate, it is highly likely that this line has an undo list. Get
rid of it now. */
if (rl_undo_list)
rl_free_undo_list ();
/* Restore normal cursor, if available. */
_rl_set_insert_mode (RL_IM_INSERT, 0);
return (eof ? (char *)NULL : savestring (the_line));
}
void
_rl_internal_char_cleanup ()
{
#if defined (VI_MODE)
/* In vi mode, when you exit insert mode, the cursor moves back
over the previous character. We explicitly check for that here. */
if (rl_editing_mode == vi_mode && _rl_keymap == vi_movement_keymap)
rl_vi_check ();
#endif /* VI_MODE */
if (rl_num_chars_to_read && rl_end >= rl_num_chars_to_read)
{
(*rl_redisplay_function) ();
_rl_want_redisplay = 0;
rl_newline (1, '\n');
}
if (rl_done == 0)
{
(*rl_redisplay_function) ();
_rl_want_redisplay = 0;
}
/* If the application writer has told us to erase the entire line if
the only character typed was something bound to rl_newline, do so. */
if (rl_erase_empty_line && rl_done && rl_last_func == rl_newline &&
rl_point == 0 && rl_end == 0)
_rl_erase_entire_line ();
}
STATIC_CALLBACK int
#if defined (READLINE_CALLBACKS)
readline_internal_char ()
#else
readline_internal_charloop ()
#endif
{
static int lastc, eof_found;
int c, code, lk;
lastc = -1;
eof_found = 0;
#if !defined (READLINE_CALLBACKS)
while (rl_done == 0)
{
#endif
lk = _rl_last_command_was_kill;
code = setjmp (_rl_top_level);
if (code)
{
(*rl_redisplay_function) ();
_rl_want_redisplay = 0;
/* If we get here, we're not being called from something dispatched
from _rl_callback_read_char(), which sets up its own value of
_rl_top_level (saving and restoring the old, of course), so
we can just return here. */
if (RL_ISSTATE (RL_STATE_CALLBACK))
return (0);
}
if (rl_pending_input == 0)
{
/* Then initialize the argument and number of keys read. */
_rl_reset_argument ();
rl_key_sequence_length = 0;
}
RL_SETSTATE(RL_STATE_READCMD);
c = rl_read_key ();
RL_UNSETSTATE(RL_STATE_READCMD);
/* look at input.c:rl_getc() for the circumstances under which this will
be returned; punt immediately on read error without converting it to
a newline. */
if (c == READERR)
{
#if defined (READLINE_CALLBACKS)
RL_SETSTATE(RL_STATE_DONE);
return (rl_done = 1);
#else
eof_found = 1;
break;
#endif
}
/* EOF typed to a non-blank line is a <NL>. */
if (c == EOF && rl_end)
c = NEWLINE;
/* The character _rl_eof_char typed to blank line, and not as the
previous character is interpreted as EOF. */
if (((c == _rl_eof_char && lastc != c) || c == EOF) && !rl_end)
{
#if defined (READLINE_CALLBACKS)
RL_SETSTATE(RL_STATE_DONE);
return (rl_done = 1);
#else
eof_found = 1;
break;
#endif
}
lastc = c;
_rl_dispatch ((unsigned char)c, _rl_keymap);
RL_CHECK_SIGNALS ();
/* If there was no change in _rl_last_command_was_kill, then no kill
has taken place. Note that if input is pending we are reading
a prefix command, so nothing has changed yet. */
if (rl_pending_input == 0 && lk == _rl_last_command_was_kill)
_rl_last_command_was_kill = 0;
_rl_internal_char_cleanup ();
#if defined (READLINE_CALLBACKS)
return 0;
#else
}
return (eof_found);
#endif
}
#if defined (READLINE_CALLBACKS)
static int
readline_internal_charloop ()
{
int eof = 1;
while (rl_done == 0)
eof = readline_internal_char ();
return (eof);
}
#endif /* READLINE_CALLBACKS */
/* Read a line of input from the global rl_instream, doing output on
the global rl_outstream.
If rl_prompt is non-null, then that is our prompt. */
static char *
readline_internal ()
{
int eof;
readline_internal_setup ();
eof = readline_internal_charloop ();
return (readline_internal_teardown (eof));
}
void
_rl_init_line_state ()
{
rl_point = rl_end = rl_mark = 0;
the_line = rl_line_buffer;
the_line[0] = 0;
}
void
_rl_set_the_line ()
{
the_line = rl_line_buffer;
}
#if defined (READLINE_CALLBACKS)
_rl_keyseq_cxt *
_rl_keyseq_cxt_alloc ()
{
_rl_keyseq_cxt *cxt;
cxt = (_rl_keyseq_cxt *)xmalloc (sizeof (_rl_keyseq_cxt));
cxt->flags = cxt->subseq_arg = cxt->subseq_retval = 0;
cxt->okey = 0;
cxt->ocxt = _rl_kscxt;
cxt->childval = 42; /* sentinel value */
return cxt;
}
void
_rl_keyseq_cxt_dispose (cxt)
_rl_keyseq_cxt *cxt;
{
xfree (cxt);
}
void
_rl_keyseq_chain_dispose ()
{
_rl_keyseq_cxt *cxt;
while (_rl_kscxt)
{
cxt = _rl_kscxt;
_rl_kscxt = _rl_kscxt->ocxt;
_rl_keyseq_cxt_dispose (cxt);
}
}
#endif
static int
_rl_subseq_getchar (key)
int key;
{
int k;
if (key == ESC)
RL_SETSTATE(RL_STATE_METANEXT);
RL_SETSTATE(RL_STATE_MOREINPUT);
k = rl_read_key ();
RL_UNSETSTATE(RL_STATE_MOREINPUT);
if (key == ESC)
RL_UNSETSTATE(RL_STATE_METANEXT);
return k;
}
#if defined (READLINE_CALLBACKS)
int
_rl_dispatch_callback (cxt)
_rl_keyseq_cxt *cxt;
{
int nkey, r;
/* For now */
/* The first time this context is used, we want to read input and dispatch
on it. When traversing the chain of contexts back `up', we want to use
the value from the next context down. We're simulating recursion using
a chain of contexts. */
if ((cxt->flags & KSEQ_DISPATCHED) == 0)
{
nkey = _rl_subseq_getchar (cxt->okey);
if (nkey < 0)
{
_rl_abort_internal ();
return -1;
}
r = _rl_dispatch_subseq (nkey, cxt->dmap, cxt->subseq_arg);
cxt->flags |= KSEQ_DISPATCHED;
}
else
r = cxt->childval;
/* For now */
if (r != -3) /* don't do this if we indicate there will be other matches */
r = _rl_subseq_result (r, cxt->oldmap, cxt->okey, (cxt->flags & KSEQ_SUBSEQ));
RL_CHECK_SIGNALS ();
if (r == 0) /* success! */
{
_rl_keyseq_chain_dispose ();
RL_UNSETSTATE (RL_STATE_MULTIKEY);
return r;
}
if (r != -3) /* magic value that says we added to the chain */
_rl_kscxt = cxt->ocxt;
if (_rl_kscxt)
_rl_kscxt->childval = r;
if (r != -3)
_rl_keyseq_cxt_dispose (cxt);
return r;
}
#endif /* READLINE_CALLBACKS */
/* Do the command associated with KEY in MAP.
If the associated command is really a keymap, then read
another key, and dispatch into that map. */
int
_rl_dispatch (key, map)
register int key;
Keymap map;
{
_rl_dispatching_keymap = map;
return _rl_dispatch_subseq (key, map, 0);
}
int
_rl_dispatch_subseq (key, map, got_subseq)
register int key;
Keymap map;
int got_subseq;
{
int r, newkey;
char *macro;
rl_command_func_t *func;
#if defined (READLINE_CALLBACKS)
_rl_keyseq_cxt *cxt;
#endif
if (META_CHAR (key) && _rl_convert_meta_chars_to_ascii)
{
if (map[ESC].type == ISKMAP)
{
if (RL_ISSTATE (RL_STATE_MACRODEF))
_rl_add_macro_char (ESC);
map = FUNCTION_TO_KEYMAP (map, ESC);
key = UNMETA (key);
rl_key_sequence_length += 2;
return (_rl_dispatch (key, map));
}
else
rl_ding ();
return 0;
}
if (RL_ISSTATE (RL_STATE_MACRODEF))
_rl_add_macro_char (key);
r = 0;
switch (map[key].type)
{
case ISFUNC:
func = map[key].function;
if (func)
{
/* Special case rl_do_lowercase_version (). */
if (func == rl_do_lowercase_version)
return (_rl_dispatch (_rl_to_lower (key), map));
rl_executing_keymap = map;
rl_dispatching = 1;
RL_SETSTATE(RL_STATE_DISPATCHING);
(*map[key].function)(rl_numeric_arg * rl_arg_sign, key);
RL_UNSETSTATE(RL_STATE_DISPATCHING);
rl_dispatching = 0;
/* If we have input pending, then the last command was a prefix
command. Don't change the state of rl_last_func. Otherwise,
remember the last command executed in this variable. */
if (rl_pending_input == 0 && map[key].function != rl_digit_argument)
rl_last_func = map[key].function;
RL_CHECK_SIGNALS ();
}
else if (map[ANYOTHERKEY].function)
{
/* OK, there's no function bound in this map, but there is a
shadow function that was overridden when the current keymap
was created. Return -2 to note that. */
_rl_unget_char (key);
return -2;
}
else if (got_subseq)
{
/* Return -1 to note that we're in a subsequence, but we don't
have a matching key, nor was one overridden. This means
we need to back up the recursion chain and find the last
subsequence that is bound to a function. */
_rl_unget_char (key);
return -1;
}
else
{
#if defined (READLINE_CALLBACKS)
RL_UNSETSTATE (RL_STATE_MULTIKEY);
_rl_keyseq_chain_dispose ();
#endif
_rl_abort_internal ();
return -1;
}
break;
case ISKMAP:
if (map[key].function != 0)
{
#if defined (VI_MODE)
/* The only way this test will be true is if a subsequence has been
bound starting with ESC, generally the arrow keys. What we do is
check whether there's input in the queue, which there generally
will be if an arrow key has been pressed, and, if there's not,
just dispatch to (what we assume is) rl_vi_movement_mode right
away. This is essentially an input test with a zero timeout. */
if (rl_editing_mode == vi_mode && key == ESC && map == vi_insertion_keymap
&& _rl_input_queued (0) == 0)
return (_rl_dispatch (ANYOTHERKEY, FUNCTION_TO_KEYMAP (map, key)));
#endif
rl_key_sequence_length++;
_rl_dispatching_keymap = FUNCTION_TO_KEYMAP (map, key);
/* Allocate new context here. Use linked contexts (linked through
cxt->ocxt) to simulate recursion */
#if defined (READLINE_CALLBACKS)
if (RL_ISSTATE (RL_STATE_CALLBACK))
{
/* Return 0 only the first time, to indicate success to
_rl_callback_read_char. The rest of the time, we're called
from _rl_dispatch_callback, so we return -3 to indicate
special handling is necessary. */
r = RL_ISSTATE (RL_STATE_MULTIKEY) ? -3 : 0;
cxt = _rl_keyseq_cxt_alloc ();
if (got_subseq)
cxt->flags |= KSEQ_SUBSEQ;
cxt->okey = key;
cxt->oldmap = map;
cxt->dmap = _rl_dispatching_keymap;
cxt->subseq_arg = got_subseq || cxt->dmap[ANYOTHERKEY].function;
RL_SETSTATE (RL_STATE_MULTIKEY);
_rl_kscxt = cxt;
return r; /* don't indicate immediate success */
}
#endif
newkey = _rl_subseq_getchar (key);
if (newkey < 0)
{
_rl_abort_internal ();
return -1;
}
r = _rl_dispatch_subseq (newkey, _rl_dispatching_keymap, got_subseq || map[ANYOTHERKEY].function);
return _rl_subseq_result (r, map, key, got_subseq);
}
else
{
_rl_abort_internal ();
return -1;
}
break;
case ISMACR:
if (map[key].function != 0)
{
macro = savestring ((char *)map[key].function);
_rl_with_macro_input (macro);
return 0;
}
break;
}
#if defined (VI_MODE)
if (rl_editing_mode == vi_mode && _rl_keymap == vi_movement_keymap &&
key != ANYOTHERKEY &&
_rl_vi_textmod_command (key))
_rl_vi_set_last (key, rl_numeric_arg, rl_arg_sign);
#endif
return (r);
}
static int
_rl_subseq_result (r, map, key, got_subseq)
int r;
Keymap map;
int key, got_subseq;
{
Keymap m;
int type, nt;
rl_command_func_t *func, *nf;
if (r == -2)
/* We didn't match anything, and the keymap we're indexed into
shadowed a function previously bound to that prefix. Call
the function. The recursive call to _rl_dispatch_subseq has
already taken care of pushing any necessary input back onto
the input queue with _rl_unget_char. */
{
m = _rl_dispatching_keymap;
type = m[ANYOTHERKEY].type;
func = m[ANYOTHERKEY].function;
if (type == ISFUNC && func == rl_do_lowercase_version)
r = _rl_dispatch (_rl_to_lower (key), map);
else if (type == ISFUNC && func == rl_insert)
{
/* If the function that was shadowed was self-insert, we
somehow need a keymap with map[key].func == self-insert.
Let's use this one. */
nt = m[key].type;
nf = m[key].function;
m[key].type = type;
m[key].function = func;
r = _rl_dispatch (key, m);
m[key].type = nt;
m[key].function = nf;
}
else
r = _rl_dispatch (ANYOTHERKEY, m);
}
else if (r && map[ANYOTHERKEY].function)
{
/* We didn't match (r is probably -1), so return something to
tell the caller that it should try ANYOTHERKEY for an
overridden function. */
_rl_unget_char (key);
_rl_dispatching_keymap = map;
return -2;
}
else if (r && got_subseq)
{
/* OK, back up the chain. */
_rl_unget_char (key);
_rl_dispatching_keymap = map;
return -1;
}
return r;
}
/* **************************************************************** */
/* */
/* Initializations */
/* */
/* **************************************************************** */
/* Initialize readline (and terminal if not already). */
int
rl_initialize ()
{
/* If we have never been called before, initialize the
terminal and data structures. */
if (!rl_initialized)
{
RL_SETSTATE(RL_STATE_INITIALIZING);
readline_initialize_everything ();
RL_UNSETSTATE(RL_STATE_INITIALIZING);
rl_initialized++;
RL_SETSTATE(RL_STATE_INITIALIZED);
}
/* Initalize the current line information. */
_rl_init_line_state ();
/* We aren't done yet. We haven't even gotten started yet! */
rl_done = 0;
RL_UNSETSTATE(RL_STATE_DONE);
/* Tell the history routines what is going on. */
_rl_start_using_history ();
/* Make the display buffer match the state of the line. */
rl_reset_line_state ();
/* No such function typed yet. */
rl_last_func = (rl_command_func_t *)NULL;
/* Parsing of key-bindings begins in an enabled state. */
_rl_parsing_conditionalized_out = 0;
#if defined (VI_MODE)
if (rl_editing_mode == vi_mode)
_rl_vi_initialize_line ();
#endif
/* Each line starts in insert mode (the default). */
_rl_set_insert_mode (RL_IM_DEFAULT, 1);
return 0;
}
#if 0
#if defined (__EMX__)
static void
_emx_build_environ ()
{
TIB *tibp;
PIB *pibp;
char *t, **tp;
int c;
DosGetInfoBlocks (&tibp, &pibp);
t = pibp->pib_pchenv;
for (c = 1; *t; c++)
t += strlen (t) + 1;
tp = environ = (char **)xmalloc ((c + 1) * sizeof (char *));
t = pibp->pib_pchenv;
while (*t)
{
*tp++ = t;
t += strlen (t) + 1;
}
*tp = 0;
}
#endif /* __EMX__ */
#endif
/* Initialize the entire state of the world. */
static void
readline_initialize_everything ()
{
#if 0
#if defined (__EMX__)
if (environ == 0)
_emx_build_environ ();
#endif
#endif
#if 0
/* Find out if we are running in Emacs -- UNUSED. */
running_in_emacs = sh_get_env_value ("EMACS") != (char *)0;
#endif
/* Set up input and output if they are not already set up. */
if (!rl_instream)
rl_instream = stdin;
if (!rl_outstream)
rl_outstream = stdout;
/* Bind _rl_in_stream and _rl_out_stream immediately. These values
may change, but they may also be used before readline_internal ()
is called. */
_rl_in_stream = rl_instream;
_rl_out_stream = rl_outstream;
/* Allocate data structures. */
if (rl_line_buffer == 0)
rl_line_buffer = (char *)xmalloc (rl_line_buffer_len = DEFAULT_BUFFER_SIZE);
/* Initialize the terminal interface. */
if (rl_terminal_name == 0)
rl_terminal_name = sh_get_env_value ("TERM");
_rl_init_terminal_io (rl_terminal_name);
/* Bind tty characters to readline functions. */
readline_default_bindings ();
/* Initialize the function names. */
rl_initialize_funmap ();
/* Decide whether we should automatically go into eight-bit mode. */
_rl_init_eightbit ();
/* Read in the init file. */
rl_read_init_file ((char *)NULL);
/* XXX */
if (_rl_horizontal_scroll_mode && _rl_term_autowrap)
{
_rl_screenwidth--;
_rl_screenchars -= _rl_screenheight;
}
/* Override the effect of any `set keymap' assignments in the
inputrc file. */
rl_set_keymap_from_edit_mode ();
/* Try to bind a common arrow key prefix, if not already bound. */
bind_arrow_keys ();
/* Enable the meta key, if this terminal has one. */
if (_rl_enable_meta)
_rl_enable_meta_key ();
/* If the completion parser's default word break characters haven't
been set yet, then do so now. */
if (rl_completer_word_break_characters == (char *)NULL)
rl_completer_word_break_characters = (char *)rl_basic_word_break_characters;
}
/* If this system allows us to look at the values of the regular
input editing characters, then bind them to their readline
equivalents, iff the characters are not bound to keymaps. */
static void
readline_default_bindings ()
{
if (_rl_bind_stty_chars)
rl_tty_set_default_bindings (_rl_keymap);
}
/* Reset the default bindings for the terminal special characters we're
interested in back to rl_insert and read the new ones. */
static void
reset_default_bindings ()
{
if (_rl_bind_stty_chars)
{
rl_tty_unset_default_bindings (_rl_keymap);
rl_tty_set_default_bindings (_rl_keymap);
}
}
/* Bind some common arrow key sequences in MAP. */
static void
bind_arrow_keys_internal (map)
Keymap map;
{
Keymap xkeymap;
xkeymap = _rl_keymap;
_rl_keymap = map;
#if defined (__MSDOS__)
rl_bind_keyseq_if_unbound ("\033[0A", rl_get_previous_history);
rl_bind_keyseq_if_unbound ("\033[0B", rl_backward_char);
rl_bind_keyseq_if_unbound ("\033[0C", rl_forward_char);
rl_bind_keyseq_if_unbound ("\033[0D", rl_get_next_history);
#endif
rl_bind_keyseq_if_unbound ("\033[A", rl_get_previous_history);
rl_bind_keyseq_if_unbound ("\033[B", rl_get_next_history);
rl_bind_keyseq_if_unbound ("\033[C", rl_forward_char);
rl_bind_keyseq_if_unbound ("\033[D", rl_backward_char);
rl_bind_keyseq_if_unbound ("\033[H", rl_beg_of_line);
rl_bind_keyseq_if_unbound ("\033[F", rl_end_of_line);
rl_bind_keyseq_if_unbound ("\033OA", rl_get_previous_history);
rl_bind_keyseq_if_unbound ("\033OB", rl_get_next_history);
rl_bind_keyseq_if_unbound ("\033OC", rl_forward_char);
rl_bind_keyseq_if_unbound ("\033OD", rl_backward_char);
rl_bind_keyseq_if_unbound ("\033OH", rl_beg_of_line);
rl_bind_keyseq_if_unbound ("\033OF", rl_end_of_line);
#if defined (__MINGW32__)
rl_bind_keyseq_if_unbound ("\340H", rl_get_previous_history);
rl_bind_keyseq_if_unbound ("\340P", rl_get_next_history);
rl_bind_keyseq_if_unbound ("\340M", rl_forward_char);
rl_bind_keyseq_if_unbound ("\340K", rl_backward_char);
rl_bind_keyseq_if_unbound ("\340G", rl_beg_of_line);
rl_bind_keyseq_if_unbound ("\340O", rl_end_of_line);
rl_bind_keyseq_if_unbound ("\340S", rl_delete);
rl_bind_keyseq_if_unbound ("\340R", rl_overwrite_mode);
#endif
_rl_keymap = xkeymap;
}
/* Try and bind the common arrow key prefixes after giving termcap and
the inputrc file a chance to bind them and create `real' keymaps
for the arrow key prefix. */
static void
bind_arrow_keys ()
{
bind_arrow_keys_internal (emacs_standard_keymap);
#if defined (VI_MODE)
bind_arrow_keys_internal (vi_movement_keymap);
/* Unbind vi_movement_keymap[ESC] to allow users to repeatedly hit ESC
in vi command mode while still allowing the arrow keys to work. */
if (vi_movement_keymap[ESC].type == ISKMAP)
rl_bind_keyseq_in_map ("\033", (rl_command_func_t *)NULL, vi_movement_keymap);
bind_arrow_keys_internal (vi_insertion_keymap);
#endif
}
/* **************************************************************** */
/* */
/* Saving and Restoring Readline's state */
/* */
/* **************************************************************** */
int
rl_save_state (sp)
struct readline_state *sp;
{
if (sp == 0)
return -1;
sp->point = rl_point;
sp->end = rl_end;
sp->mark = rl_mark;
sp->buffer = rl_line_buffer;
sp->buflen = rl_line_buffer_len;
sp->ul = rl_undo_list;
sp->prompt = rl_prompt;
sp->rlstate = rl_readline_state;
sp->done = rl_done;
sp->kmap = _rl_keymap;
sp->lastfunc = rl_last_func;
sp->insmode = rl_insert_mode;
sp->edmode = rl_editing_mode;
sp->kseqlen = rl_key_sequence_length;
sp->inf = rl_instream;
sp->outf = rl_outstream;
sp->pendingin = rl_pending_input;
sp->macro = rl_executing_macro;
sp->catchsigs = rl_catch_signals;
sp->catchsigwinch = rl_catch_sigwinch;
return (0);
}
int
rl_restore_state (sp)
struct readline_state *sp;
{
if (sp == 0)
return -1;
rl_point = sp->point;
rl_end = sp->end;
rl_mark = sp->mark;
the_line = rl_line_buffer = sp->buffer;
rl_line_buffer_len = sp->buflen;
rl_undo_list = sp->ul;
rl_prompt = sp->prompt;
rl_readline_state = sp->rlstate;
rl_done = sp->done;
_rl_keymap = sp->kmap;
rl_last_func = sp->lastfunc;
rl_insert_mode = sp->insmode;
rl_editing_mode = sp->edmode;
rl_key_sequence_length = sp->kseqlen;
rl_instream = sp->inf;
rl_outstream = sp->outf;
rl_pending_input = sp->pendingin;
rl_executing_macro = sp->macro;
rl_catch_signals = sp->catchsigs;
rl_catch_sigwinch = sp->catchsigwinch;
return (0);
}
| gpl-2.0 |
Buckmarble/LunarMax | scripts/selinux/mdp/mdp.c | 384 | 4221 | /*
*
* mdp - make dummy policy
*
* When pointed at a kernel tree, builds a dummy policy for that kernel
* with exactly one type with full rights to itself.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright (C) IBM Corporation, 2006
*
* Authors: Serge E. Hallyn <serue@us.ibm.com>
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
static void usage(char *name)
{
printf("usage: %s [-m] policy_file context_file\n", name);
exit(1);
}
struct security_class_mapping {
const char *name;
const char *perms[sizeof(unsigned) * 8 + 1];
};
#include "classmap.h"
#include "initial_sid_to_string.h"
int main(int argc, char *argv[])
{
int i, j, mls = 0;
int initial_sid_to_string_len;
char **arg, *polout, *ctxout;
FILE *fout;
if (argc < 3)
usage(argv[0]);
arg = argv+1;
if (argc==4 && strcmp(argv[1], "-m") == 0) {
mls = 1;
arg++;
}
polout = *arg++;
ctxout = *arg;
fout = fopen(polout, "w");
if (!fout) {
printf("Could not open %s for writing\n", polout);
usage(argv[0]);
}
for (i = 0; secclass_map[i].name; i++)
fprintf(fout, "class %s\n", secclass_map[i].name);
fprintf(fout, "\n");
initial_sid_to_string_len = sizeof(initial_sid_to_string) / sizeof (char *);
for (i = 1; i < initial_sid_to_string_len; i++)
fprintf(fout, "sid %s\n", initial_sid_to_string[i]);
fprintf(fout, "\n");
for (i = 0; secclass_map[i].name; i++) {
struct security_class_mapping *map = &secclass_map[i];
fprintf(fout, "class %s\n", map->name);
fprintf(fout, "{\n");
for (j = 0; map->perms[j]; j++)
fprintf(fout, "\t%s\n", map->perms[j]);
fprintf(fout, "}\n\n");
}
fprintf(fout, "\n");
if (mls) {
printf("MLS not yet implemented\n");
exit(1);
}
fprintf(fout, "type base_t;\n");
fprintf(fout, "role base_r types { base_t };\n");
for (i = 0; secclass_map[i].name; i++)
fprintf(fout, "allow base_t base_t:%s *;\n",
secclass_map[i].name);
fprintf(fout, "user user_u roles { base_r };\n");
fprintf(fout, "\n");
for (i = 1; i < initial_sid_to_string_len; i++)
fprintf(fout, "sid %s user_u:base_r:base_t\n", initial_sid_to_string[i]);
fprintf(fout, "\n");
fprintf(fout, "fs_use_xattr ext2 user_u:base_r:base_t;\n");
fprintf(fout, "fs_use_xattr ext3 user_u:base_r:base_t;\n");
fprintf(fout, "fs_use_xattr ext4 user_u:base_r:base_t;\n");
fprintf(fout, "fs_use_xattr jfs user_u:base_r:base_t;\n");
fprintf(fout, "fs_use_xattr xfs user_u:base_r:base_t;\n");
fprintf(fout, "fs_use_xattr reiserfs user_u:base_r:base_t;\n");
fprintf(fout, "fs_use_xattr jffs2 user_u:base_r:base_t;\n");
fprintf(fout, "fs_use_xattr gfs2 user_u:base_r:base_t;\n");
fprintf(fout, "fs_use_xattr lustre user_u:base_r:base_t;\n");
fprintf(fout, "fs_use_task eventpollfs user_u:base_r:base_t;\n");
fprintf(fout, "fs_use_task pipefs user_u:base_r:base_t;\n");
fprintf(fout, "fs_use_task sockfs user_u:base_r:base_t;\n");
fprintf(fout, "fs_use_trans mqueue user_u:base_r:base_t;\n");
fprintf(fout, "fs_use_trans devpts user_u:base_r:base_t;\n");
fprintf(fout, "fs_use_trans hugetlbfs user_u:base_r:base_t;\n");
fprintf(fout, "fs_use_trans tmpfs user_u:base_r:base_t;\n");
fprintf(fout, "fs_use_trans shm user_u:base_r:base_t;\n");
fprintf(fout, "genfscon proc / user_u:base_r:base_t\n");
fclose(fout);
fout = fopen(ctxout, "w");
if (!fout) {
printf("Wrote policy, but cannot open %s for writing\n", ctxout);
usage(argv[0]);
}
fprintf(fout, "/ user_u:base_r:base_t\n");
fprintf(fout, "/.* user_u:base_r:base_t\n");
fclose(fout);
return 0;
}
| gpl-2.0 |
identisoft-rashid/ec3_kernel | drivers/video/fbdev/pxa168fb.c | 640 | 20904 | /*
* linux/drivers/video/pxa168fb.c -- Marvell PXA168 LCD Controller
*
* Copyright (C) 2008 Marvell International Ltd.
* All rights reserved.
*
* 2009-02-16 adapted from original version for PXA168/910
* Jun Nie <njun@marvell.com>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/string.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/fb.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/uaccess.h>
#include <video/pxa168fb.h>
#include "pxa168fb.h"
#define DEFAULT_REFRESH 60 /* Hz */
static int determine_best_pix_fmt(struct fb_var_screeninfo *var)
{
/*
* Pseudocolor mode?
*/
if (var->bits_per_pixel == 8)
return PIX_FMT_PSEUDOCOLOR;
/*
* Check for 565/1555.
*/
if (var->bits_per_pixel == 16 && var->red.length <= 5 &&
var->green.length <= 6 && var->blue.length <= 5) {
if (var->transp.length == 0) {
if (var->red.offset >= var->blue.offset)
return PIX_FMT_RGB565;
else
return PIX_FMT_BGR565;
}
if (var->transp.length == 1 && var->green.length <= 5) {
if (var->red.offset >= var->blue.offset)
return PIX_FMT_RGB1555;
else
return PIX_FMT_BGR1555;
}
/* fall through */
}
/*
* Check for 888/A888.
*/
if (var->bits_per_pixel <= 32 && var->red.length <= 8 &&
var->green.length <= 8 && var->blue.length <= 8) {
if (var->bits_per_pixel == 24 && var->transp.length == 0) {
if (var->red.offset >= var->blue.offset)
return PIX_FMT_RGB888PACK;
else
return PIX_FMT_BGR888PACK;
}
if (var->bits_per_pixel == 32 && var->transp.length == 8) {
if (var->red.offset >= var->blue.offset)
return PIX_FMT_RGBA888;
else
return PIX_FMT_BGRA888;
} else {
if (var->red.offset >= var->blue.offset)
return PIX_FMT_RGB888UNPACK;
else
return PIX_FMT_BGR888UNPACK;
}
/* fall through */
}
return -EINVAL;
}
static void set_pix_fmt(struct fb_var_screeninfo *var, int pix_fmt)
{
switch (pix_fmt) {
case PIX_FMT_RGB565:
var->bits_per_pixel = 16;
var->red.offset = 11; var->red.length = 5;
var->green.offset = 5; var->green.length = 6;
var->blue.offset = 0; var->blue.length = 5;
var->transp.offset = 0; var->transp.length = 0;
break;
case PIX_FMT_BGR565:
var->bits_per_pixel = 16;
var->red.offset = 0; var->red.length = 5;
var->green.offset = 5; var->green.length = 6;
var->blue.offset = 11; var->blue.length = 5;
var->transp.offset = 0; var->transp.length = 0;
break;
case PIX_FMT_RGB1555:
var->bits_per_pixel = 16;
var->red.offset = 10; var->red.length = 5;
var->green.offset = 5; var->green.length = 5;
var->blue.offset = 0; var->blue.length = 5;
var->transp.offset = 15; var->transp.length = 1;
break;
case PIX_FMT_BGR1555:
var->bits_per_pixel = 16;
var->red.offset = 0; var->red.length = 5;
var->green.offset = 5; var->green.length = 5;
var->blue.offset = 10; var->blue.length = 5;
var->transp.offset = 15; var->transp.length = 1;
break;
case PIX_FMT_RGB888PACK:
var->bits_per_pixel = 24;
var->red.offset = 16; var->red.length = 8;
var->green.offset = 8; var->green.length = 8;
var->blue.offset = 0; var->blue.length = 8;
var->transp.offset = 0; var->transp.length = 0;
break;
case PIX_FMT_BGR888PACK:
var->bits_per_pixel = 24;
var->red.offset = 0; var->red.length = 8;
var->green.offset = 8; var->green.length = 8;
var->blue.offset = 16; var->blue.length = 8;
var->transp.offset = 0; var->transp.length = 0;
break;
case PIX_FMT_RGBA888:
var->bits_per_pixel = 32;
var->red.offset = 16; var->red.length = 8;
var->green.offset = 8; var->green.length = 8;
var->blue.offset = 0; var->blue.length = 8;
var->transp.offset = 24; var->transp.length = 8;
break;
case PIX_FMT_BGRA888:
var->bits_per_pixel = 32;
var->red.offset = 0; var->red.length = 8;
var->green.offset = 8; var->green.length = 8;
var->blue.offset = 16; var->blue.length = 8;
var->transp.offset = 24; var->transp.length = 8;
break;
case PIX_FMT_PSEUDOCOLOR:
var->bits_per_pixel = 8;
var->red.offset = 0; var->red.length = 8;
var->green.offset = 0; var->green.length = 8;
var->blue.offset = 0; var->blue.length = 8;
var->transp.offset = 0; var->transp.length = 0;
break;
}
}
static void set_mode(struct pxa168fb_info *fbi, struct fb_var_screeninfo *var,
struct fb_videomode *mode, int pix_fmt, int ystretch)
{
struct fb_info *info = fbi->info;
set_pix_fmt(var, pix_fmt);
var->xres = mode->xres;
var->yres = mode->yres;
var->xres_virtual = max(var->xres, var->xres_virtual);
if (ystretch)
var->yres_virtual = info->fix.smem_len /
(var->xres_virtual * (var->bits_per_pixel >> 3));
else
var->yres_virtual = max(var->yres, var->yres_virtual);
var->grayscale = 0;
var->accel_flags = FB_ACCEL_NONE;
var->pixclock = mode->pixclock;
var->left_margin = mode->left_margin;
var->right_margin = mode->right_margin;
var->upper_margin = mode->upper_margin;
var->lower_margin = mode->lower_margin;
var->hsync_len = mode->hsync_len;
var->vsync_len = mode->vsync_len;
var->sync = mode->sync;
var->vmode = FB_VMODE_NONINTERLACED;
var->rotate = FB_ROTATE_UR;
}
static int pxa168fb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct pxa168fb_info *fbi = info->par;
int pix_fmt;
/*
* Determine which pixel format we're going to use.
*/
pix_fmt = determine_best_pix_fmt(var);
if (pix_fmt < 0)
return pix_fmt;
set_pix_fmt(var, pix_fmt);
fbi->pix_fmt = pix_fmt;
/*
* Basic geometry sanity checks.
*/
if (var->xoffset + var->xres > var->xres_virtual)
return -EINVAL;
if (var->yoffset + var->yres > var->yres_virtual)
return -EINVAL;
if (var->xres + var->right_margin +
var->hsync_len + var->left_margin > 2048)
return -EINVAL;
if (var->yres + var->lower_margin +
var->vsync_len + var->upper_margin > 2048)
return -EINVAL;
/*
* Check size of framebuffer.
*/
if (var->xres_virtual * var->yres_virtual *
(var->bits_per_pixel >> 3) > info->fix.smem_len)
return -EINVAL;
return 0;
}
/*
* The hardware clock divider has an integer and a fractional
* stage:
*
* clk2 = clk_in / integer_divider
* clk_out = clk2 * (1 - (fractional_divider >> 12))
*
* Calculate integer and fractional divider for given clk_in
* and clk_out.
*/
static void set_clock_divider(struct pxa168fb_info *fbi,
const struct fb_videomode *m)
{
int divider_int;
int needed_pixclk;
u64 div_result;
u32 x = 0;
/*
* Notice: The field pixclock is used by linux fb
* is in pixel second. E.g. struct fb_videomode &
* struct fb_var_screeninfo
*/
/*
* Check input values.
*/
if (!m || !m->pixclock || !m->refresh) {
dev_err(fbi->dev, "Input refresh or pixclock is wrong.\n");
return;
}
/*
* Using PLL/AXI clock.
*/
x = 0x80000000;
/*
* Calc divider according to refresh rate.
*/
div_result = 1000000000000ll;
do_div(div_result, m->pixclock);
needed_pixclk = (u32)div_result;
divider_int = clk_get_rate(fbi->clk) / needed_pixclk;
/* check whether divisor is too small. */
if (divider_int < 2) {
dev_warn(fbi->dev, "Warning: clock source is too slow."
"Try smaller resolution\n");
divider_int = 2;
}
/*
* Set setting to reg.
*/
x |= divider_int;
writel(x, fbi->reg_base + LCD_CFG_SCLK_DIV);
}
static void set_dma_control0(struct pxa168fb_info *fbi)
{
u32 x;
/*
* Set bit to enable graphics DMA.
*/
x = readl(fbi->reg_base + LCD_SPU_DMA_CTRL0);
x &= ~CFG_GRA_ENA_MASK;
x |= fbi->active ? CFG_GRA_ENA(1) : CFG_GRA_ENA(0);
/*
* If we are in a pseudo-color mode, we need to enable
* palette lookup.
*/
if (fbi->pix_fmt == PIX_FMT_PSEUDOCOLOR)
x |= 0x10000000;
/*
* Configure hardware pixel format.
*/
x &= ~(0xF << 16);
x |= (fbi->pix_fmt >> 1) << 16;
/*
* Check red and blue pixel swap.
* 1. source data swap
* 2. panel output data swap
*/
x &= ~(1 << 12);
x |= ((fbi->pix_fmt & 1) ^ (fbi->panel_rbswap)) << 12;
writel(x, fbi->reg_base + LCD_SPU_DMA_CTRL0);
}
static void set_dma_control1(struct pxa168fb_info *fbi, int sync)
{
u32 x;
/*
* Configure default bits: vsync triggers DMA, gated clock
* enable, power save enable, configure alpha registers to
* display 100% graphics, and set pixel command.
*/
x = readl(fbi->reg_base + LCD_SPU_DMA_CTRL1);
x |= 0x2032ff81;
/*
* We trigger DMA on the falling edge of vsync if vsync is
* active low, or on the rising edge if vsync is active high.
*/
if (!(sync & FB_SYNC_VERT_HIGH_ACT))
x |= 0x08000000;
writel(x, fbi->reg_base + LCD_SPU_DMA_CTRL1);
}
static void set_graphics_start(struct fb_info *info, int xoffset, int yoffset)
{
struct pxa168fb_info *fbi = info->par;
struct fb_var_screeninfo *var = &info->var;
int pixel_offset;
unsigned long addr;
pixel_offset = (yoffset * var->xres_virtual) + xoffset;
addr = fbi->fb_start_dma + (pixel_offset * (var->bits_per_pixel >> 3));
writel(addr, fbi->reg_base + LCD_CFG_GRA_START_ADDR0);
}
static void set_dumb_panel_control(struct fb_info *info)
{
struct pxa168fb_info *fbi = info->par;
struct pxa168fb_mach_info *mi = dev_get_platdata(fbi->dev);
u32 x;
/*
* Preserve enable flag.
*/
x = readl(fbi->reg_base + LCD_SPU_DUMB_CTRL) & 0x00000001;
x |= (fbi->is_blanked ? 0x7 : mi->dumb_mode) << 28;
x |= mi->gpio_output_data << 20;
x |= mi->gpio_output_mask << 12;
x |= mi->panel_rgb_reverse_lanes ? 0x00000080 : 0;
x |= mi->invert_composite_blank ? 0x00000040 : 0;
x |= (info->var.sync & FB_SYNC_COMP_HIGH_ACT) ? 0x00000020 : 0;
x |= mi->invert_pix_val_ena ? 0x00000010 : 0;
x |= (info->var.sync & FB_SYNC_VERT_HIGH_ACT) ? 0 : 0x00000008;
x |= (info->var.sync & FB_SYNC_HOR_HIGH_ACT) ? 0 : 0x00000004;
x |= mi->invert_pixclock ? 0x00000002 : 0;
writel(x, fbi->reg_base + LCD_SPU_DUMB_CTRL);
}
static void set_dumb_screen_dimensions(struct fb_info *info)
{
struct pxa168fb_info *fbi = info->par;
struct fb_var_screeninfo *v = &info->var;
int x;
int y;
x = v->xres + v->right_margin + v->hsync_len + v->left_margin;
y = v->yres + v->lower_margin + v->vsync_len + v->upper_margin;
writel((y << 16) | x, fbi->reg_base + LCD_SPUT_V_H_TOTAL);
}
static int pxa168fb_set_par(struct fb_info *info)
{
struct pxa168fb_info *fbi = info->par;
struct fb_var_screeninfo *var = &info->var;
struct fb_videomode mode;
u32 x;
struct pxa168fb_mach_info *mi;
mi = dev_get_platdata(fbi->dev);
/*
* Set additional mode info.
*/
if (fbi->pix_fmt == PIX_FMT_PSEUDOCOLOR)
info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
else
info->fix.visual = FB_VISUAL_TRUECOLOR;
info->fix.line_length = var->xres_virtual * var->bits_per_pixel / 8;
info->fix.ypanstep = var->yres;
/*
* Disable panel output while we setup the display.
*/
x = readl(fbi->reg_base + LCD_SPU_DUMB_CTRL);
writel(x & ~1, fbi->reg_base + LCD_SPU_DUMB_CTRL);
/*
* Configure global panel parameters.
*/
writel((var->yres << 16) | var->xres,
fbi->reg_base + LCD_SPU_V_H_ACTIVE);
/*
* convet var to video mode
*/
fb_var_to_videomode(&mode, &info->var);
/* Calculate clock divisor. */
set_clock_divider(fbi, &mode);
/* Configure dma ctrl regs. */
set_dma_control0(fbi);
set_dma_control1(fbi, info->var.sync);
/*
* Configure graphics DMA parameters.
*/
x = readl(fbi->reg_base + LCD_CFG_GRA_PITCH);
x = (x & ~0xFFFF) | ((var->xres_virtual * var->bits_per_pixel) >> 3);
writel(x, fbi->reg_base + LCD_CFG_GRA_PITCH);
writel((var->yres << 16) | var->xres,
fbi->reg_base + LCD_SPU_GRA_HPXL_VLN);
writel((var->yres << 16) | var->xres,
fbi->reg_base + LCD_SPU_GZM_HPXL_VLN);
/*
* Configure dumb panel ctrl regs & timings.
*/
set_dumb_panel_control(info);
set_dumb_screen_dimensions(info);
writel((var->left_margin << 16) | var->right_margin,
fbi->reg_base + LCD_SPU_H_PORCH);
writel((var->upper_margin << 16) | var->lower_margin,
fbi->reg_base + LCD_SPU_V_PORCH);
/*
* Re-enable panel output.
*/
x = readl(fbi->reg_base + LCD_SPU_DUMB_CTRL);
writel(x | 1, fbi->reg_base + LCD_SPU_DUMB_CTRL);
return 0;
}
static unsigned int chan_to_field(unsigned int chan, struct fb_bitfield *bf)
{
return ((chan & 0xffff) >> (16 - bf->length)) << bf->offset;
}
static u32 to_rgb(u16 red, u16 green, u16 blue)
{
red >>= 8;
green >>= 8;
blue >>= 8;
return (red << 16) | (green << 8) | blue;
}
static int
pxa168fb_setcolreg(unsigned int regno, unsigned int red, unsigned int green,
unsigned int blue, unsigned int trans, struct fb_info *info)
{
struct pxa168fb_info *fbi = info->par;
u32 val;
if (info->var.grayscale)
red = green = blue = (19595 * red + 38470 * green +
7471 * blue) >> 16;
if (info->fix.visual == FB_VISUAL_TRUECOLOR && regno < 16) {
val = chan_to_field(red, &info->var.red);
val |= chan_to_field(green, &info->var.green);
val |= chan_to_field(blue , &info->var.blue);
fbi->pseudo_palette[regno] = val;
}
if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR && regno < 256) {
val = to_rgb(red, green, blue);
writel(val, fbi->reg_base + LCD_SPU_SRAM_WRDAT);
writel(0x8300 | regno, fbi->reg_base + LCD_SPU_SRAM_CTRL);
}
return 0;
}
static int pxa168fb_blank(int blank, struct fb_info *info)
{
struct pxa168fb_info *fbi = info->par;
fbi->is_blanked = (blank == FB_BLANK_UNBLANK) ? 0 : 1;
set_dumb_panel_control(info);
return 0;
}
static int pxa168fb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
set_graphics_start(info, var->xoffset, var->yoffset);
return 0;
}
static irqreturn_t pxa168fb_handle_irq(int irq, void *dev_id)
{
struct pxa168fb_info *fbi = dev_id;
u32 isr = readl(fbi->reg_base + SPU_IRQ_ISR);
if ((isr & GRA_FRAME_IRQ0_ENA_MASK)) {
writel(isr & (~GRA_FRAME_IRQ0_ENA_MASK),
fbi->reg_base + SPU_IRQ_ISR);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static struct fb_ops pxa168fb_ops = {
.owner = THIS_MODULE,
.fb_check_var = pxa168fb_check_var,
.fb_set_par = pxa168fb_set_par,
.fb_setcolreg = pxa168fb_setcolreg,
.fb_blank = pxa168fb_blank,
.fb_pan_display = pxa168fb_pan_display,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
};
static int pxa168fb_init_mode(struct fb_info *info,
struct pxa168fb_mach_info *mi)
{
struct pxa168fb_info *fbi = info->par;
struct fb_var_screeninfo *var = &info->var;
int ret = 0;
u32 total_w, total_h, refresh;
u64 div_result;
const struct fb_videomode *m;
/*
* Set default value
*/
refresh = DEFAULT_REFRESH;
/* try to find best video mode. */
m = fb_find_best_mode(&info->var, &info->modelist);
if (m)
fb_videomode_to_var(&info->var, m);
/* Init settings. */
var->xres_virtual = var->xres;
var->yres_virtual = info->fix.smem_len /
(var->xres_virtual * (var->bits_per_pixel >> 3));
dev_dbg(fbi->dev, "pxa168fb: find best mode: res = %dx%d\n",
var->xres, var->yres);
/* correct pixclock. */
total_w = var->xres + var->left_margin + var->right_margin +
var->hsync_len;
total_h = var->yres + var->upper_margin + var->lower_margin +
var->vsync_len;
div_result = 1000000000000ll;
do_div(div_result, total_w * total_h * refresh);
var->pixclock = (u32)div_result;
return ret;
}
static int pxa168fb_probe(struct platform_device *pdev)
{
struct pxa168fb_mach_info *mi;
struct fb_info *info = 0;
struct pxa168fb_info *fbi = 0;
struct resource *res;
struct clk *clk;
int irq, ret;
mi = dev_get_platdata(&pdev->dev);
if (mi == NULL) {
dev_err(&pdev->dev, "no platform data defined\n");
return -EINVAL;
}
clk = clk_get(&pdev->dev, "LCDCLK");
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "unable to get LCDCLK");
return PTR_ERR(clk);
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(&pdev->dev, "no IO memory defined\n");
ret = -ENOENT;
goto failed_put_clk;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "no IRQ defined\n");
ret = -ENOENT;
goto failed_put_clk;
}
info = framebuffer_alloc(sizeof(struct pxa168fb_info), &pdev->dev);
if (info == NULL) {
ret = -ENOMEM;
goto failed_put_clk;
}
/* Initialize private data */
fbi = info->par;
fbi->info = info;
fbi->clk = clk;
fbi->dev = info->dev = &pdev->dev;
fbi->panel_rbswap = mi->panel_rbswap;
fbi->is_blanked = 0;
fbi->active = mi->active;
/*
* Initialise static fb parameters.
*/
info->flags = FBINFO_DEFAULT | FBINFO_PARTIAL_PAN_OK |
FBINFO_HWACCEL_XPAN | FBINFO_HWACCEL_YPAN;
info->node = -1;
strlcpy(info->fix.id, mi->id, 16);
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.type_aux = 0;
info->fix.xpanstep = 0;
info->fix.ypanstep = 0;
info->fix.ywrapstep = 0;
info->fix.mmio_start = res->start;
info->fix.mmio_len = resource_size(res);
info->fix.accel = FB_ACCEL_NONE;
info->fbops = &pxa168fb_ops;
info->pseudo_palette = fbi->pseudo_palette;
/*
* Map LCD controller registers.
*/
fbi->reg_base = devm_ioremap_nocache(&pdev->dev, res->start,
resource_size(res));
if (fbi->reg_base == NULL) {
ret = -ENOMEM;
goto failed_free_info;
}
/*
* Allocate framebuffer memory.
*/
info->fix.smem_len = PAGE_ALIGN(DEFAULT_FB_SIZE);
info->screen_base = dma_alloc_writecombine(fbi->dev, info->fix.smem_len,
&fbi->fb_start_dma, GFP_KERNEL);
if (info->screen_base == NULL) {
ret = -ENOMEM;
goto failed_free_info;
}
info->fix.smem_start = (unsigned long)fbi->fb_start_dma;
set_graphics_start(info, 0, 0);
/*
* Set video mode according to platform data.
*/
set_mode(fbi, &info->var, mi->modes, mi->pix_fmt, 1);
fb_videomode_to_modelist(mi->modes, mi->num_modes, &info->modelist);
/*
* init video mode data.
*/
pxa168fb_init_mode(info, mi);
/*
* Fill in sane defaults.
*/
ret = pxa168fb_check_var(&info->var, info);
if (ret)
goto failed_free_fbmem;
/*
* enable controller clock
*/
clk_enable(fbi->clk);
pxa168fb_set_par(info);
/*
* Configure default register values.
*/
writel(0, fbi->reg_base + LCD_SPU_BLANKCOLOR);
writel(mi->io_pin_allocation_mode, fbi->reg_base + SPU_IOPAD_CONTROL);
writel(0, fbi->reg_base + LCD_CFG_GRA_START_ADDR1);
writel(0, fbi->reg_base + LCD_SPU_GRA_OVSA_HPXL_VLN);
writel(0, fbi->reg_base + LCD_SPU_SRAM_PARA0);
writel(CFG_CSB_256x32(0x1)|CFG_CSB_256x24(0x1)|CFG_CSB_256x8(0x1),
fbi->reg_base + LCD_SPU_SRAM_PARA1);
/*
* Allocate color map.
*/
if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
ret = -ENOMEM;
goto failed_free_clk;
}
/*
* Register irq handler.
*/
ret = devm_request_irq(&pdev->dev, irq, pxa168fb_handle_irq,
IRQF_SHARED, info->fix.id, fbi);
if (ret < 0) {
dev_err(&pdev->dev, "unable to request IRQ\n");
ret = -ENXIO;
goto failed_free_cmap;
}
/*
* Enable GFX interrupt
*/
writel(GRA_FRAME_IRQ0_ENA(0x1), fbi->reg_base + SPU_IRQ_ENA);
/*
* Register framebuffer.
*/
ret = register_framebuffer(info);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register pxa168-fb: %d\n", ret);
ret = -ENXIO;
goto failed_free_cmap;
}
platform_set_drvdata(pdev, fbi);
return 0;
failed_free_cmap:
fb_dealloc_cmap(&info->cmap);
failed_free_clk:
clk_disable(fbi->clk);
failed_free_fbmem:
dma_free_coherent(fbi->dev, info->fix.smem_len,
info->screen_base, fbi->fb_start_dma);
failed_free_info:
kfree(info);
failed_put_clk:
clk_put(clk);
dev_err(&pdev->dev, "frame buffer device init failed with %d\n", ret);
return ret;
}
static int pxa168fb_remove(struct platform_device *pdev)
{
struct pxa168fb_info *fbi = platform_get_drvdata(pdev);
struct fb_info *info;
int irq;
unsigned int data;
if (!fbi)
return 0;
/* disable DMA transfer */
data = readl(fbi->reg_base + LCD_SPU_DMA_CTRL0);
data &= ~CFG_GRA_ENA_MASK;
writel(data, fbi->reg_base + LCD_SPU_DMA_CTRL0);
info = fbi->info;
unregister_framebuffer(info);
writel(GRA_FRAME_IRQ0_ENA(0x0), fbi->reg_base + SPU_IRQ_ENA);
if (info->cmap.len)
fb_dealloc_cmap(&info->cmap);
irq = platform_get_irq(pdev, 0);
dma_free_writecombine(fbi->dev, PAGE_ALIGN(info->fix.smem_len),
info->screen_base, info->fix.smem_start);
clk_disable(fbi->clk);
clk_put(fbi->clk);
framebuffer_release(info);
return 0;
}
static struct platform_driver pxa168fb_driver = {
.driver = {
.name = "pxa168-fb",
},
.probe = pxa168fb_probe,
.remove = pxa168fb_remove,
};
module_platform_driver(pxa168fb_driver);
MODULE_AUTHOR("Lennert Buytenhek <buytenh@marvell.com> "
"Green Wan <gwan@marvell.com>");
MODULE_DESCRIPTION("Framebuffer driver for PXA168/910");
MODULE_LICENSE("GPL");
| gpl-2.0 |
MicrochipTech/linux-pic32 | net/netfilter/nft_lookup.c | 896 | 4046 | /*
* Copyright (c) 2009 Patrick McHardy <kaber@trash.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Development of this code funded by Astaro AG (http://www.astaro.com/)
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/rbtree.h>
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
struct nft_lookup {
struct nft_set *set;
enum nft_registers sreg:8;
enum nft_registers dreg:8;
struct nft_set_binding binding;
};
static void nft_lookup_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
const struct nft_lookup *priv = nft_expr_priv(expr);
const struct nft_set *set = priv->set;
const struct nft_set_ext *ext;
if (set->ops->lookup(set, ®s->data[priv->sreg], &ext)) {
if (set->flags & NFT_SET_MAP)
nft_data_copy(®s->data[priv->dreg],
nft_set_ext_data(ext), set->dlen);
return;
}
regs->verdict.code = NFT_BREAK;
}
static const struct nla_policy nft_lookup_policy[NFTA_LOOKUP_MAX + 1] = {
[NFTA_LOOKUP_SET] = { .type = NLA_STRING },
[NFTA_LOOKUP_SET_ID] = { .type = NLA_U32 },
[NFTA_LOOKUP_SREG] = { .type = NLA_U32 },
[NFTA_LOOKUP_DREG] = { .type = NLA_U32 },
};
static int nft_lookup_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_lookup *priv = nft_expr_priv(expr);
struct nft_set *set;
int err;
if (tb[NFTA_LOOKUP_SET] == NULL ||
tb[NFTA_LOOKUP_SREG] == NULL)
return -EINVAL;
set = nf_tables_set_lookup(ctx->table, tb[NFTA_LOOKUP_SET]);
if (IS_ERR(set)) {
if (tb[NFTA_LOOKUP_SET_ID]) {
set = nf_tables_set_lookup_byid(ctx->net,
tb[NFTA_LOOKUP_SET_ID]);
}
if (IS_ERR(set))
return PTR_ERR(set);
}
if (set->flags & NFT_SET_EVAL)
return -EOPNOTSUPP;
priv->sreg = nft_parse_register(tb[NFTA_LOOKUP_SREG]);
err = nft_validate_register_load(priv->sreg, set->klen);
if (err < 0)
return err;
if (tb[NFTA_LOOKUP_DREG] != NULL) {
if (!(set->flags & NFT_SET_MAP))
return -EINVAL;
priv->dreg = nft_parse_register(tb[NFTA_LOOKUP_DREG]);
err = nft_validate_register_store(ctx, priv->dreg, NULL,
set->dtype, set->dlen);
if (err < 0)
return err;
} else if (set->flags & NFT_SET_MAP)
return -EINVAL;
priv->binding.flags = set->flags & NFT_SET_MAP;
err = nf_tables_bind_set(ctx, set, &priv->binding);
if (err < 0)
return err;
priv->set = set;
return 0;
}
static void nft_lookup_destroy(const struct nft_ctx *ctx,
const struct nft_expr *expr)
{
struct nft_lookup *priv = nft_expr_priv(expr);
nf_tables_unbind_set(ctx, priv->set, &priv->binding);
}
static int nft_lookup_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
const struct nft_lookup *priv = nft_expr_priv(expr);
if (nla_put_string(skb, NFTA_LOOKUP_SET, priv->set->name))
goto nla_put_failure;
if (nft_dump_register(skb, NFTA_LOOKUP_SREG, priv->sreg))
goto nla_put_failure;
if (priv->set->flags & NFT_SET_MAP)
if (nft_dump_register(skb, NFTA_LOOKUP_DREG, priv->dreg))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
static struct nft_expr_type nft_lookup_type;
static const struct nft_expr_ops nft_lookup_ops = {
.type = &nft_lookup_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_lookup)),
.eval = nft_lookup_eval,
.init = nft_lookup_init,
.destroy = nft_lookup_destroy,
.dump = nft_lookup_dump,
};
static struct nft_expr_type nft_lookup_type __read_mostly = {
.name = "lookup",
.ops = &nft_lookup_ops,
.policy = nft_lookup_policy,
.maxattr = NFTA_LOOKUP_MAX,
.owner = THIS_MODULE,
};
int __init nft_lookup_module_init(void)
{
return nft_register_expr(&nft_lookup_type);
}
void nft_lookup_module_exit(void)
{
nft_unregister_expr(&nft_lookup_type);
}
| gpl-2.0 |
Koshu/thinkpad_tablet_ics_kernel | drivers/input/tablet/acecad.c | 896 | 7759 | /*
* Copyright (c) 2001-2005 Edouard TISSERANT <edouard.tisserant@wanadoo.fr>
* Copyright (c) 2004-2005 Stephane VOLTZ <svoltz@numericable.fr>
*
* USB Acecad "Acecad Flair" tablet support
*
* Changelog:
* v3.2 - Added sysfs support
*/
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/usb/input.h>
/*
* Version Information
*/
#define DRIVER_VERSION "v3.2"
#define DRIVER_DESC "USB Acecad Flair tablet driver"
#define DRIVER_LICENSE "GPL"
#define DRIVER_AUTHOR "Edouard TISSERANT <edouard.tisserant@wanadoo.fr>"
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE(DRIVER_LICENSE);
#define USB_VENDOR_ID_ACECAD 0x0460
#define USB_DEVICE_ID_FLAIR 0x0004
#define USB_DEVICE_ID_302 0x0008
struct usb_acecad {
char name[128];
char phys[64];
struct usb_device *usbdev;
struct input_dev *input;
struct urb *irq;
unsigned char *data;
dma_addr_t data_dma;
};
static void usb_acecad_irq(struct urb *urb)
{
struct usb_acecad *acecad = urb->context;
unsigned char *data = acecad->data;
struct input_dev *dev = acecad->input;
int prox, status;
switch (urb->status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dbg("%s - urb shutting down with status: %d", __func__, urb->status);
return;
default:
dbg("%s - nonzero urb status received: %d", __func__, urb->status);
goto resubmit;
}
prox = (data[0] & 0x04) >> 2;
input_report_key(dev, BTN_TOOL_PEN, prox);
if (prox) {
int x = data[1] | (data[2] << 8);
int y = data[3] | (data[4] << 8);
/* Pressure should compute the same way for flair and 302 */
int pressure = data[5] | (data[6] << 8);
int touch = data[0] & 0x01;
int stylus = (data[0] & 0x10) >> 4;
int stylus2 = (data[0] & 0x20) >> 5;
input_report_abs(dev, ABS_X, x);
input_report_abs(dev, ABS_Y, y);
input_report_abs(dev, ABS_PRESSURE, pressure);
input_report_key(dev, BTN_TOUCH, touch);
input_report_key(dev, BTN_STYLUS, stylus);
input_report_key(dev, BTN_STYLUS2, stylus2);
}
/* event termination */
input_sync(dev);
resubmit:
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status)
err("can't resubmit intr, %s-%s/input0, status %d",
acecad->usbdev->bus->bus_name, acecad->usbdev->devpath, status);
}
static int usb_acecad_open(struct input_dev *dev)
{
struct usb_acecad *acecad = input_get_drvdata(dev);
acecad->irq->dev = acecad->usbdev;
if (usb_submit_urb(acecad->irq, GFP_KERNEL))
return -EIO;
return 0;
}
static void usb_acecad_close(struct input_dev *dev)
{
struct usb_acecad *acecad = input_get_drvdata(dev);
usb_kill_urb(acecad->irq);
}
static int usb_acecad_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct usb_device *dev = interface_to_usbdev(intf);
struct usb_host_interface *interface = intf->cur_altsetting;
struct usb_endpoint_descriptor *endpoint;
struct usb_acecad *acecad;
struct input_dev *input_dev;
int pipe, maxp;
int err;
if (interface->desc.bNumEndpoints != 1)
return -ENODEV;
endpoint = &interface->endpoint[0].desc;
if (!usb_endpoint_is_int_in(endpoint))
return -ENODEV;
pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress);
maxp = usb_maxpacket(dev, pipe, usb_pipeout(pipe));
acecad = kzalloc(sizeof(struct usb_acecad), GFP_KERNEL);
input_dev = input_allocate_device();
if (!acecad || !input_dev) {
err = -ENOMEM;
goto fail1;
}
acecad->data = usb_alloc_coherent(dev, 8, GFP_KERNEL, &acecad->data_dma);
if (!acecad->data) {
err= -ENOMEM;
goto fail1;
}
acecad->irq = usb_alloc_urb(0, GFP_KERNEL);
if (!acecad->irq) {
err = -ENOMEM;
goto fail2;
}
acecad->usbdev = dev;
acecad->input = input_dev;
if (dev->manufacturer)
strlcpy(acecad->name, dev->manufacturer, sizeof(acecad->name));
if (dev->product) {
if (dev->manufacturer)
strlcat(acecad->name, " ", sizeof(acecad->name));
strlcat(acecad->name, dev->product, sizeof(acecad->name));
}
usb_make_path(dev, acecad->phys, sizeof(acecad->phys));
strlcat(acecad->phys, "/input0", sizeof(acecad->phys));
input_dev->name = acecad->name;
input_dev->phys = acecad->phys;
usb_to_input_id(dev, &input_dev->id);
input_dev->dev.parent = &intf->dev;
input_set_drvdata(input_dev, acecad);
input_dev->open = usb_acecad_open;
input_dev->close = usb_acecad_close;
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
input_dev->keybit[BIT_WORD(BTN_DIGI)] = BIT_MASK(BTN_TOOL_PEN) |
BIT_MASK(BTN_TOUCH) | BIT_MASK(BTN_STYLUS) |
BIT_MASK(BTN_STYLUS2);
switch (id->driver_info) {
case 0:
input_set_abs_params(input_dev, ABS_X, 0, 5000, 4, 0);
input_set_abs_params(input_dev, ABS_Y, 0, 3750, 4, 0);
input_set_abs_params(input_dev, ABS_PRESSURE, 0, 512, 0, 0);
if (!strlen(acecad->name))
snprintf(acecad->name, sizeof(acecad->name),
"USB Acecad Flair Tablet %04x:%04x",
le16_to_cpu(dev->descriptor.idVendor),
le16_to_cpu(dev->descriptor.idProduct));
break;
case 1:
input_set_abs_params(input_dev, ABS_X, 0, 53000, 4, 0);
input_set_abs_params(input_dev, ABS_Y, 0, 2250, 4, 0);
input_set_abs_params(input_dev, ABS_PRESSURE, 0, 1024, 0, 0);
if (!strlen(acecad->name))
snprintf(acecad->name, sizeof(acecad->name),
"USB Acecad 302 Tablet %04x:%04x",
le16_to_cpu(dev->descriptor.idVendor),
le16_to_cpu(dev->descriptor.idProduct));
break;
}
usb_fill_int_urb(acecad->irq, dev, pipe,
acecad->data, maxp > 8 ? 8 : maxp,
usb_acecad_irq, acecad, endpoint->bInterval);
acecad->irq->transfer_dma = acecad->data_dma;
acecad->irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
err = input_register_device(acecad->input);
if (err)
goto fail2;
usb_set_intfdata(intf, acecad);
return 0;
fail2: usb_free_coherent(dev, 8, acecad->data, acecad->data_dma);
fail1: input_free_device(input_dev);
kfree(acecad);
return err;
}
static void usb_acecad_disconnect(struct usb_interface *intf)
{
struct usb_acecad *acecad = usb_get_intfdata(intf);
usb_set_intfdata(intf, NULL);
input_unregister_device(acecad->input);
usb_free_urb(acecad->irq);
usb_free_coherent(acecad->usbdev, 8, acecad->data, acecad->data_dma);
kfree(acecad);
}
static struct usb_device_id usb_acecad_id_table [] = {
{ USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_FLAIR), .driver_info = 0 },
{ USB_DEVICE(USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_302), .driver_info = 1 },
{ }
};
MODULE_DEVICE_TABLE(usb, usb_acecad_id_table);
static struct usb_driver usb_acecad_driver = {
.name = "usb_acecad",
.probe = usb_acecad_probe,
.disconnect = usb_acecad_disconnect,
.id_table = usb_acecad_id_table,
};
static int __init usb_acecad_init(void)
{
int result = usb_register(&usb_acecad_driver);
if (result == 0)
printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
DRIVER_DESC "\n");
return result;
}
static void __exit usb_acecad_exit(void)
{
usb_deregister(&usb_acecad_driver);
}
module_init(usb_acecad_init);
module_exit(usb_acecad_exit);
| gpl-2.0 |
playfulgod/Kernel_AS85-LG-Ignite | drivers/scsi/megaraid/megaraid_mbox.c | 896 | 102609 | /*
*
* Linux MegaRAID device driver
*
* Copyright (c) 2003-2004 LSI Logic Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* FILE : megaraid_mbox.c
* Version : v2.20.5.1 (Nov 16 2006)
*
* Authors:
* Atul Mukker <Atul.Mukker@lsi.com>
* Sreenivas Bagalkote <Sreenivas.Bagalkote@lsi.com>
* Manoj Jose <Manoj.Jose@lsi.com>
* Seokmann Ju
*
* List of supported controllers
*
* OEM Product Name VID DID SSVID SSID
* --- ------------ --- --- ---- ----
* Dell PERC3/QC 101E 1960 1028 0471
* Dell PERC3/DC 101E 1960 1028 0493
* Dell PERC3/SC 101E 1960 1028 0475
* Dell PERC3/Di 1028 1960 1028 0123
* Dell PERC4/SC 1000 1960 1028 0520
* Dell PERC4/DC 1000 1960 1028 0518
* Dell PERC4/QC 1000 0407 1028 0531
* Dell PERC4/Di 1028 000F 1028 014A
* Dell PERC 4e/Si 1028 0013 1028 016c
* Dell PERC 4e/Di 1028 0013 1028 016d
* Dell PERC 4e/Di 1028 0013 1028 016e
* Dell PERC 4e/Di 1028 0013 1028 016f
* Dell PERC 4e/Di 1028 0013 1028 0170
* Dell PERC 4e/DC 1000 0408 1028 0002
* Dell PERC 4e/SC 1000 0408 1028 0001
*
*
* LSI MegaRAID SCSI 320-0 1000 1960 1000 A520
* LSI MegaRAID SCSI 320-1 1000 1960 1000 0520
* LSI MegaRAID SCSI 320-2 1000 1960 1000 0518
* LSI MegaRAID SCSI 320-0X 1000 0407 1000 0530
* LSI MegaRAID SCSI 320-2X 1000 0407 1000 0532
* LSI MegaRAID SCSI 320-4X 1000 0407 1000 0531
* LSI MegaRAID SCSI 320-1E 1000 0408 1000 0001
* LSI MegaRAID SCSI 320-2E 1000 0408 1000 0002
* LSI MegaRAID SATA 150-4 1000 1960 1000 4523
* LSI MegaRAID SATA 150-6 1000 1960 1000 0523
* LSI MegaRAID SATA 300-4X 1000 0409 1000 3004
* LSI MegaRAID SATA 300-8X 1000 0409 1000 3008
*
* INTEL RAID Controller SRCU42X 1000 0407 8086 0532
* INTEL RAID Controller SRCS16 1000 1960 8086 0523
* INTEL RAID Controller SRCU42E 1000 0408 8086 0002
* INTEL RAID Controller SRCZCRX 1000 0407 8086 0530
* INTEL RAID Controller SRCS28X 1000 0409 8086 3008
* INTEL RAID Controller SROMBU42E 1000 0408 8086 3431
* INTEL RAID Controller SROMBU42E 1000 0408 8086 3499
* INTEL RAID Controller SRCU51L 1000 1960 8086 0520
*
* FSC MegaRAID PCI Express ROMB 1000 0408 1734 1065
*
* ACER MegaRAID ROMB-2E 1000 0408 1025 004D
*
* NEC MegaRAID PCI Express ROMB 1000 0408 1033 8287
*
* For history of changes, see Documentation/ChangeLog.megaraid
*/
#include <linux/slab.h>
#include "megaraid_mbox.h"
static int megaraid_init(void);
static void megaraid_exit(void);
static int megaraid_probe_one(struct pci_dev*, const struct pci_device_id *);
static void megaraid_detach_one(struct pci_dev *);
static void megaraid_mbox_shutdown(struct pci_dev *);
static int megaraid_io_attach(adapter_t *);
static void megaraid_io_detach(adapter_t *);
static int megaraid_init_mbox(adapter_t *);
static void megaraid_fini_mbox(adapter_t *);
static int megaraid_alloc_cmd_packets(adapter_t *);
static void megaraid_free_cmd_packets(adapter_t *);
static int megaraid_mbox_setup_dma_pools(adapter_t *);
static void megaraid_mbox_teardown_dma_pools(adapter_t *);
static int megaraid_sysfs_alloc_resources(adapter_t *);
static void megaraid_sysfs_free_resources(adapter_t *);
static int megaraid_abort_handler(struct scsi_cmnd *);
static int megaraid_reset_handler(struct scsi_cmnd *);
static int mbox_post_sync_cmd(adapter_t *, uint8_t []);
static int mbox_post_sync_cmd_fast(adapter_t *, uint8_t []);
static int megaraid_busywait_mbox(mraid_device_t *);
static int megaraid_mbox_product_info(adapter_t *);
static int megaraid_mbox_extended_cdb(adapter_t *);
static int megaraid_mbox_support_ha(adapter_t *, uint16_t *);
static int megaraid_mbox_support_random_del(adapter_t *);
static int megaraid_mbox_get_max_sg(adapter_t *);
static void megaraid_mbox_enum_raid_scsi(adapter_t *);
static void megaraid_mbox_flush_cache(adapter_t *);
static int megaraid_mbox_fire_sync_cmd(adapter_t *);
static void megaraid_mbox_display_scb(adapter_t *, scb_t *);
static void megaraid_mbox_setup_device_map(adapter_t *);
static int megaraid_queue_command(struct scsi_cmnd *,
void (*)(struct scsi_cmnd *));
static scb_t *megaraid_mbox_build_cmd(adapter_t *, struct scsi_cmnd *, int *);
static void megaraid_mbox_runpendq(adapter_t *, scb_t *);
static void megaraid_mbox_prepare_pthru(adapter_t *, scb_t *,
struct scsi_cmnd *);
static void megaraid_mbox_prepare_epthru(adapter_t *, scb_t *,
struct scsi_cmnd *);
static irqreturn_t megaraid_isr(int, void *);
static void megaraid_mbox_dpc(unsigned long);
static ssize_t megaraid_sysfs_show_app_hndl(struct device *, struct device_attribute *attr, char *);
static ssize_t megaraid_sysfs_show_ldnum(struct device *, struct device_attribute *attr, char *);
static int megaraid_cmm_register(adapter_t *);
static int megaraid_cmm_unregister(adapter_t *);
static int megaraid_mbox_mm_handler(unsigned long, uioc_t *, uint32_t);
static int megaraid_mbox_mm_command(adapter_t *, uioc_t *);
static void megaraid_mbox_mm_done(adapter_t *, scb_t *);
static int gather_hbainfo(adapter_t *, mraid_hba_info_t *);
static int wait_till_fw_empty(adapter_t *);
MODULE_AUTHOR("megaraidlinux@lsi.com");
MODULE_DESCRIPTION("LSI Logic MegaRAID Mailbox Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(MEGARAID_VERSION);
/*
* ### modules parameters for driver ###
*/
/*
* Set to enable driver to expose unconfigured disk to kernel
*/
static int megaraid_expose_unconf_disks = 0;
module_param_named(unconf_disks, megaraid_expose_unconf_disks, int, 0);
MODULE_PARM_DESC(unconf_disks,
"Set to expose unconfigured disks to kernel (default=0)");
/*
* driver wait time if the adapter's mailbox is busy
*/
static unsigned int max_mbox_busy_wait = MBOX_BUSY_WAIT;
module_param_named(busy_wait, max_mbox_busy_wait, int, 0);
MODULE_PARM_DESC(busy_wait,
"Max wait for mailbox in microseconds if busy (default=10)");
/*
* number of sectors per IO command
*/
static unsigned int megaraid_max_sectors = MBOX_MAX_SECTORS;
module_param_named(max_sectors, megaraid_max_sectors, int, 0);
MODULE_PARM_DESC(max_sectors,
"Maximum number of sectors per IO command (default=128)");
/*
* number of commands per logical unit
*/
static unsigned int megaraid_cmd_per_lun = MBOX_DEF_CMD_PER_LUN;
module_param_named(cmd_per_lun, megaraid_cmd_per_lun, int, 0);
MODULE_PARM_DESC(cmd_per_lun,
"Maximum number of commands per logical unit (default=64)");
/*
* Fast driver load option, skip scanning for physical devices during load.
* This would result in non-disk devices being skipped during driver load
* time. These can be later added though, using /proc/scsi/scsi
*/
static unsigned int megaraid_fast_load = 0;
module_param_named(fast_load, megaraid_fast_load, int, 0);
MODULE_PARM_DESC(fast_load,
"Faster loading of the driver, skips physical devices! (default=0)");
/*
* mraid_debug level - threshold for amount of information to be displayed by
* the driver. This level can be changed through modules parameters, ioctl or
* sysfs/proc interface. By default, print the announcement messages only.
*/
int mraid_debug_level = CL_ANN;
module_param_named(debug_level, mraid_debug_level, int, 0);
MODULE_PARM_DESC(debug_level, "Debug level for driver (default=0)");
/*
* ### global data ###
*/
static uint8_t megaraid_mbox_version[8] =
{ 0x02, 0x20, 0x04, 0x06, 3, 7, 20, 5 };
/*
* PCI table for all supported controllers.
*/
static struct pci_device_id pci_id_table_g[] = {
{
PCI_VENDOR_ID_DELL,
PCI_DEVICE_ID_PERC4_DI_DISCOVERY,
PCI_VENDOR_ID_DELL,
PCI_SUBSYS_ID_PERC4_DI_DISCOVERY,
},
{
PCI_VENDOR_ID_LSI_LOGIC,
PCI_DEVICE_ID_PERC4_SC,
PCI_VENDOR_ID_DELL,
PCI_SUBSYS_ID_PERC4_SC,
},
{
PCI_VENDOR_ID_LSI_LOGIC,
PCI_DEVICE_ID_PERC4_DC,
PCI_VENDOR_ID_DELL,
PCI_SUBSYS_ID_PERC4_DC,
},
{
PCI_VENDOR_ID_LSI_LOGIC,
PCI_DEVICE_ID_VERDE,
PCI_ANY_ID,
PCI_ANY_ID,
},
{
PCI_VENDOR_ID_DELL,
PCI_DEVICE_ID_PERC4_DI_EVERGLADES,
PCI_VENDOR_ID_DELL,
PCI_SUBSYS_ID_PERC4_DI_EVERGLADES,
},
{
PCI_VENDOR_ID_DELL,
PCI_DEVICE_ID_PERC4E_SI_BIGBEND,
PCI_VENDOR_ID_DELL,
PCI_SUBSYS_ID_PERC4E_SI_BIGBEND,
},
{
PCI_VENDOR_ID_DELL,
PCI_DEVICE_ID_PERC4E_DI_KOBUK,
PCI_VENDOR_ID_DELL,
PCI_SUBSYS_ID_PERC4E_DI_KOBUK,
},
{
PCI_VENDOR_ID_DELL,
PCI_DEVICE_ID_PERC4E_DI_CORVETTE,
PCI_VENDOR_ID_DELL,
PCI_SUBSYS_ID_PERC4E_DI_CORVETTE,
},
{
PCI_VENDOR_ID_DELL,
PCI_DEVICE_ID_PERC4E_DI_EXPEDITION,
PCI_VENDOR_ID_DELL,
PCI_SUBSYS_ID_PERC4E_DI_EXPEDITION,
},
{
PCI_VENDOR_ID_DELL,
PCI_DEVICE_ID_PERC4E_DI_GUADALUPE,
PCI_VENDOR_ID_DELL,
PCI_SUBSYS_ID_PERC4E_DI_GUADALUPE,
},
{
PCI_VENDOR_ID_LSI_LOGIC,
PCI_DEVICE_ID_DOBSON,
PCI_ANY_ID,
PCI_ANY_ID,
},
{
PCI_VENDOR_ID_AMI,
PCI_DEVICE_ID_AMI_MEGARAID3,
PCI_ANY_ID,
PCI_ANY_ID,
},
{
PCI_VENDOR_ID_LSI_LOGIC,
PCI_DEVICE_ID_AMI_MEGARAID3,
PCI_ANY_ID,
PCI_ANY_ID,
},
{
PCI_VENDOR_ID_LSI_LOGIC,
PCI_DEVICE_ID_LINDSAY,
PCI_ANY_ID,
PCI_ANY_ID,
},
{0} /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, pci_id_table_g);
static struct pci_driver megaraid_pci_driver = {
.name = "megaraid",
.id_table = pci_id_table_g,
.probe = megaraid_probe_one,
.remove = __devexit_p(megaraid_detach_one),
.shutdown = megaraid_mbox_shutdown,
};
// definitions for the device attributes for exporting logical drive number
// for a scsi address (Host, Channel, Id, Lun)
DEVICE_ATTR(megaraid_mbox_app_hndl, S_IRUSR, megaraid_sysfs_show_app_hndl,
NULL);
// Host template initializer for megaraid mbox sysfs device attributes
static struct device_attribute *megaraid_shost_attrs[] = {
&dev_attr_megaraid_mbox_app_hndl,
NULL,
};
DEVICE_ATTR(megaraid_mbox_ld, S_IRUSR, megaraid_sysfs_show_ldnum, NULL);
// Host template initializer for megaraid mbox sysfs device attributes
static struct device_attribute *megaraid_sdev_attrs[] = {
&dev_attr_megaraid_mbox_ld,
NULL,
};
/**
* megaraid_change_queue_depth - Change the device's queue depth
* @sdev: scsi device struct
* @qdepth: depth to set
* @reason: calling context
*
* Return value:
* actual depth set
*/
static int megaraid_change_queue_depth(struct scsi_device *sdev, int qdepth,
int reason)
{
if (reason != SCSI_QDEPTH_DEFAULT)
return -EOPNOTSUPP;
if (qdepth > MBOX_MAX_SCSI_CMDS)
qdepth = MBOX_MAX_SCSI_CMDS;
scsi_adjust_queue_depth(sdev, 0, qdepth);
return sdev->queue_depth;
}
/*
* Scsi host template for megaraid unified driver
*/
static struct scsi_host_template megaraid_template_g = {
.module = THIS_MODULE,
.name = "LSI Logic MegaRAID driver",
.proc_name = "megaraid",
.queuecommand = megaraid_queue_command,
.eh_abort_handler = megaraid_abort_handler,
.eh_device_reset_handler = megaraid_reset_handler,
.eh_bus_reset_handler = megaraid_reset_handler,
.eh_host_reset_handler = megaraid_reset_handler,
.change_queue_depth = megaraid_change_queue_depth,
.use_clustering = ENABLE_CLUSTERING,
.sdev_attrs = megaraid_sdev_attrs,
.shost_attrs = megaraid_shost_attrs,
};
/**
* megaraid_init - module load hook
*
* We register ourselves as hotplug enabled module and let PCI subsystem
* discover our adapters.
*/
static int __init
megaraid_init(void)
{
int rval;
// Announce the driver version
con_log(CL_ANN, (KERN_INFO "megaraid: %s %s\n", MEGARAID_VERSION,
MEGARAID_EXT_VERSION));
// check validity of module parameters
if (megaraid_cmd_per_lun > MBOX_MAX_SCSI_CMDS) {
con_log(CL_ANN, (KERN_WARNING
"megaraid mailbox: max commands per lun reset to %d\n",
MBOX_MAX_SCSI_CMDS));
megaraid_cmd_per_lun = MBOX_MAX_SCSI_CMDS;
}
// register as a PCI hot-plug driver module
rval = pci_register_driver(&megaraid_pci_driver);
if (rval < 0) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: could not register hotplug support.\n"));
}
return rval;
}
/**
* megaraid_exit - driver unload entry point
*
* We simply unwrap the megaraid_init routine here.
*/
static void __exit
megaraid_exit(void)
{
con_log(CL_DLEVEL1, (KERN_NOTICE "megaraid: unloading framework\n"));
// unregister as PCI hotplug driver
pci_unregister_driver(&megaraid_pci_driver);
return;
}
/**
* megaraid_probe_one - PCI hotplug entry point
* @pdev : handle to this controller's PCI configuration space
* @id : pci device id of the class of controllers
*
* This routine should be called whenever a new adapter is detected by the
* PCI hotplug susbsystem.
*/
static int __devinit
megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
adapter_t *adapter;
// detected a new controller
con_log(CL_ANN, (KERN_INFO
"megaraid: probe new device %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
pdev->vendor, pdev->device, pdev->subsystem_vendor,
pdev->subsystem_device));
con_log(CL_ANN, ("bus %d:slot %d:func %d\n", pdev->bus->number,
PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)));
if (pci_enable_device(pdev)) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: pci_enable_device failed\n"));
return -ENODEV;
}
// Enable bus-mastering on this controller
pci_set_master(pdev);
// Allocate the per driver initialization structure
adapter = kzalloc(sizeof(adapter_t), GFP_KERNEL);
if (adapter == NULL) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: out of memory, %s %d.\n", __func__, __LINE__));
goto out_probe_one;
}
// set up PCI related soft state and other pre-known parameters
adapter->unique_id = pdev->bus->number << 8 | pdev->devfn;
adapter->irq = pdev->irq;
adapter->pdev = pdev;
atomic_set(&adapter->being_detached, 0);
// Setup the default DMA mask. This would be changed later on
// depending on hardware capabilities
if (pci_set_dma_mask(adapter->pdev, DMA_BIT_MASK(32)) != 0) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: pci_set_dma_mask failed:%d\n", __LINE__));
goto out_free_adapter;
}
// Initialize the synchronization lock for kernel and LLD
spin_lock_init(&adapter->lock);
// Initialize the command queues: the list of free SCBs and the list
// of pending SCBs.
INIT_LIST_HEAD(&adapter->kscb_pool);
spin_lock_init(SCSI_FREE_LIST_LOCK(adapter));
INIT_LIST_HEAD(&adapter->pend_list);
spin_lock_init(PENDING_LIST_LOCK(adapter));
INIT_LIST_HEAD(&adapter->completed_list);
spin_lock_init(COMPLETED_LIST_LOCK(adapter));
// Start the mailbox based controller
if (megaraid_init_mbox(adapter) != 0) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: maibox adapter did not initialize\n"));
goto out_free_adapter;
}
// Register with LSI Common Management Module
if (megaraid_cmm_register(adapter) != 0) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: could not register with management module\n"));
goto out_fini_mbox;
}
// setup adapter handle in PCI soft state
pci_set_drvdata(pdev, adapter);
// attach with scsi mid-layer
if (megaraid_io_attach(adapter) != 0) {
con_log(CL_ANN, (KERN_WARNING "megaraid: io attach failed\n"));
goto out_cmm_unreg;
}
return 0;
out_cmm_unreg:
pci_set_drvdata(pdev, NULL);
megaraid_cmm_unregister(adapter);
out_fini_mbox:
megaraid_fini_mbox(adapter);
out_free_adapter:
kfree(adapter);
out_probe_one:
pci_disable_device(pdev);
return -ENODEV;
}
/**
* megaraid_detach_one - release framework resources and call LLD release routine
* @pdev : handle for our PCI cofiguration space
*
* This routine is called during driver unload. We free all the allocated
* resources and call the corresponding LLD so that it can also release all
* its resources.
*
* This routine is also called from the PCI hotplug system.
*/
static void
megaraid_detach_one(struct pci_dev *pdev)
{
adapter_t *adapter;
struct Scsi_Host *host;
// Start a rollback on this adapter
adapter = pci_get_drvdata(pdev);
if (!adapter) {
con_log(CL_ANN, (KERN_CRIT
"megaraid: Invalid detach on %#4.04x:%#4.04x:%#4.04x:%#4.04x\n",
pdev->vendor, pdev->device, pdev->subsystem_vendor,
pdev->subsystem_device));
return;
}
else {
con_log(CL_ANN, (KERN_NOTICE
"megaraid: detaching device %#4.04x:%#4.04x:%#4.04x:%#4.04x\n",
pdev->vendor, pdev->device, pdev->subsystem_vendor,
pdev->subsystem_device));
}
host = adapter->host;
// do not allow any more requests from the management module for this
// adapter.
// FIXME: How do we account for the request which might still be
// pending with us?
atomic_set(&adapter->being_detached, 1);
// detach from the IO sub-system
megaraid_io_detach(adapter);
// reset the device state in the PCI structure. We check this
// condition when we enter here. If the device state is NULL,
// that would mean the device has already been removed
pci_set_drvdata(pdev, NULL);
// Unregister from common management module
//
// FIXME: this must return success or failure for conditions if there
// is a command pending with LLD or not.
megaraid_cmm_unregister(adapter);
// finalize the mailbox based controller and release all resources
megaraid_fini_mbox(adapter);
kfree(adapter);
scsi_host_put(host);
pci_disable_device(pdev);
return;
}
/**
* megaraid_mbox_shutdown - PCI shutdown for megaraid HBA
* @pdev : generic driver model device
*
* Shutdown notification, perform flush cache.
*/
static void
megaraid_mbox_shutdown(struct pci_dev *pdev)
{
adapter_t *adapter = pci_get_drvdata(pdev);
static int counter;
if (!adapter) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: null device in shutdown\n"));
return;
}
// flush caches now
con_log(CL_ANN, (KERN_INFO "megaraid: flushing adapter %d...",
counter++));
megaraid_mbox_flush_cache(adapter);
con_log(CL_ANN, ("done\n"));
}
/**
* megaraid_io_attach - attach a device with the IO subsystem
* @adapter : controller's soft state
*
* Attach this device with the IO subsystem.
*/
static int
megaraid_io_attach(adapter_t *adapter)
{
struct Scsi_Host *host;
// Initialize SCSI Host structure
host = scsi_host_alloc(&megaraid_template_g, 8);
if (!host) {
con_log(CL_ANN, (KERN_WARNING
"megaraid mbox: scsi_register failed\n"));
return -1;
}
SCSIHOST2ADAP(host) = (caddr_t)adapter;
adapter->host = host;
host->irq = adapter->irq;
host->unique_id = adapter->unique_id;
host->can_queue = adapter->max_cmds;
host->this_id = adapter->init_id;
host->sg_tablesize = adapter->sglen;
host->max_sectors = adapter->max_sectors;
host->cmd_per_lun = adapter->cmd_per_lun;
host->max_channel = adapter->max_channel;
host->max_id = adapter->max_target;
host->max_lun = adapter->max_lun;
// notify mid-layer about the new controller
if (scsi_add_host(host, &adapter->pdev->dev)) {
con_log(CL_ANN, (KERN_WARNING
"megaraid mbox: scsi_add_host failed\n"));
scsi_host_put(host);
return -1;
}
scsi_scan_host(host);
return 0;
}
/**
* megaraid_io_detach - detach a device from the IO subsystem
* @adapter : controller's soft state
*
* Detach this device from the IO subsystem.
*/
static void
megaraid_io_detach(adapter_t *adapter)
{
struct Scsi_Host *host;
con_log(CL_DLEVEL1, (KERN_INFO "megaraid: io detach\n"));
host = adapter->host;
scsi_remove_host(host);
return;
}
/*
* START: Mailbox Low Level Driver
*
* This is section specific to the single mailbox based controllers
*/
/**
* megaraid_init_mbox - initialize controller
* @adapter : our soft state
*
* - Allocate 16-byte aligned mailbox memory for firmware handshake
* - Allocate controller's memory resources
* - Find out all initialization data
* - Allocate memory required for all the commands
* - Use internal library of FW routines, build up complete soft state
*/
static int __devinit
megaraid_init_mbox(adapter_t *adapter)
{
struct pci_dev *pdev;
mraid_device_t *raid_dev;
int i;
uint32_t magic64;
adapter->ito = MBOX_TIMEOUT;
pdev = adapter->pdev;
/*
* Allocate and initialize the init data structure for mailbox
* controllers
*/
raid_dev = kzalloc(sizeof(mraid_device_t), GFP_KERNEL);
if (raid_dev == NULL) return -1;
/*
* Attach the adapter soft state to raid device soft state
*/
adapter->raid_device = (caddr_t)raid_dev;
raid_dev->fast_load = megaraid_fast_load;
// our baseport
raid_dev->baseport = pci_resource_start(pdev, 0);
if (pci_request_regions(pdev, "MegaRAID: LSI Logic Corporation") != 0) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: mem region busy\n"));
goto out_free_raid_dev;
}
raid_dev->baseaddr = ioremap_nocache(raid_dev->baseport, 128);
if (!raid_dev->baseaddr) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: could not map hba memory\n") );
goto out_release_regions;
}
/* initialize the mutual exclusion lock for the mailbox */
spin_lock_init(&raid_dev->mailbox_lock);
/* allocate memory required for commands */
if (megaraid_alloc_cmd_packets(adapter) != 0)
goto out_iounmap;
/*
* Issue SYNC cmd to flush the pending cmds in the adapter
* and initialize its internal state
*/
if (megaraid_mbox_fire_sync_cmd(adapter))
con_log(CL_ANN, ("megaraid: sync cmd failed\n"));
/*
* Setup the rest of the soft state using the library of
* FW routines
*/
/* request IRQ and register the interrupt service routine */
if (request_irq(adapter->irq, megaraid_isr, IRQF_SHARED, "megaraid",
adapter)) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: Couldn't register IRQ %d!\n", adapter->irq));
goto out_alloc_cmds;
}
// Product info
if (megaraid_mbox_product_info(adapter) != 0)
goto out_free_irq;
// Do we support extended CDBs
adapter->max_cdb_sz = 10;
if (megaraid_mbox_extended_cdb(adapter) == 0) {
adapter->max_cdb_sz = 16;
}
/*
* Do we support cluster environment, if we do, what is the initiator
* id.
* NOTE: In a non-cluster aware firmware environment, the LLD should
* return 7 as initiator id.
*/
adapter->ha = 0;
adapter->init_id = -1;
if (megaraid_mbox_support_ha(adapter, &adapter->init_id) == 0) {
adapter->ha = 1;
}
/*
* Prepare the device ids array to have the mapping between the kernel
* device address and megaraid device address.
* We export the physical devices on their actual addresses. The
* logical drives are exported on a virtual SCSI channel
*/
megaraid_mbox_setup_device_map(adapter);
// If the firmware supports random deletion, update the device id map
if (megaraid_mbox_support_random_del(adapter)) {
// Change the logical drives numbers in device_ids array one
// slot in device_ids is reserved for target id, that's why
// "<=" below
for (i = 0; i <= MAX_LOGICAL_DRIVES_40LD; i++) {
adapter->device_ids[adapter->max_channel][i] += 0x80;
}
adapter->device_ids[adapter->max_channel][adapter->init_id] =
0xFF;
raid_dev->random_del_supported = 1;
}
/*
* find out the maximum number of scatter-gather elements supported by
* this firmware
*/
adapter->sglen = megaraid_mbox_get_max_sg(adapter);
// enumerate RAID and SCSI channels so that all devices on SCSI
// channels can later be exported, including disk devices
megaraid_mbox_enum_raid_scsi(adapter);
/*
* Other parameters required by upper layer
*
* maximum number of sectors per IO command
*/
adapter->max_sectors = megaraid_max_sectors;
/*
* number of queued commands per LUN.
*/
adapter->cmd_per_lun = megaraid_cmd_per_lun;
/*
* Allocate resources required to issue FW calls, when sysfs is
* accessed
*/
if (megaraid_sysfs_alloc_resources(adapter) != 0)
goto out_free_irq;
// Set the DMA mask to 64-bit. All supported controllers as capable of
// DMA in this range
pci_read_config_dword(adapter->pdev, PCI_CONF_AMISIG64, &magic64);
if (((magic64 == HBA_SIGNATURE_64_BIT) &&
((adapter->pdev->subsystem_device !=
PCI_SUBSYS_ID_MEGARAID_SATA_150_6) &&
(adapter->pdev->subsystem_device !=
PCI_SUBSYS_ID_MEGARAID_SATA_150_4))) ||
(adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC &&
adapter->pdev->device == PCI_DEVICE_ID_VERDE) ||
(adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC &&
adapter->pdev->device == PCI_DEVICE_ID_DOBSON) ||
(adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC &&
adapter->pdev->device == PCI_DEVICE_ID_LINDSAY) ||
(adapter->pdev->vendor == PCI_VENDOR_ID_DELL &&
adapter->pdev->device == PCI_DEVICE_ID_PERC4_DI_EVERGLADES) ||
(adapter->pdev->vendor == PCI_VENDOR_ID_DELL &&
adapter->pdev->device == PCI_DEVICE_ID_PERC4E_DI_KOBUK)) {
if (pci_set_dma_mask(adapter->pdev, DMA_BIT_MASK(64))) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: DMA mask for 64-bit failed\n"));
if (pci_set_dma_mask (adapter->pdev, DMA_BIT_MASK(32))) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: 32-bit DMA mask failed\n"));
goto out_free_sysfs_res;
}
}
}
// setup tasklet for DPC
tasklet_init(&adapter->dpc_h, megaraid_mbox_dpc,
(unsigned long)adapter);
con_log(CL_DLEVEL1, (KERN_INFO
"megaraid mbox hba successfully initialized\n"));
return 0;
out_free_sysfs_res:
megaraid_sysfs_free_resources(adapter);
out_free_irq:
free_irq(adapter->irq, adapter);
out_alloc_cmds:
megaraid_free_cmd_packets(adapter);
out_iounmap:
iounmap(raid_dev->baseaddr);
out_release_regions:
pci_release_regions(pdev);
out_free_raid_dev:
kfree(raid_dev);
return -1;
}
/**
* megaraid_fini_mbox - undo controller initialization
* @adapter : our soft state
*/
static void
megaraid_fini_mbox(adapter_t *adapter)
{
mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
// flush all caches
megaraid_mbox_flush_cache(adapter);
tasklet_kill(&adapter->dpc_h);
megaraid_sysfs_free_resources(adapter);
megaraid_free_cmd_packets(adapter);
free_irq(adapter->irq, adapter);
iounmap(raid_dev->baseaddr);
pci_release_regions(adapter->pdev);
kfree(raid_dev);
return;
}
/**
* megaraid_alloc_cmd_packets - allocate shared mailbox
* @adapter : soft state of the raid controller
*
* Allocate and align the shared mailbox. This maibox is used to issue
* all the commands. For IO based controllers, the mailbox is also regsitered
* with the FW. Allocate memory for all commands as well.
* This is our big allocator.
*/
static int
megaraid_alloc_cmd_packets(adapter_t *adapter)
{
mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
struct pci_dev *pdev;
unsigned long align;
scb_t *scb;
mbox_ccb_t *ccb;
struct mraid_pci_blk *epthru_pci_blk;
struct mraid_pci_blk *sg_pci_blk;
struct mraid_pci_blk *mbox_pci_blk;
int i;
pdev = adapter->pdev;
/*
* Setup the mailbox
* Allocate the common 16-byte aligned memory for the handshake
* mailbox.
*/
raid_dev->una_mbox64 = pci_alloc_consistent(adapter->pdev,
sizeof(mbox64_t), &raid_dev->una_mbox64_dma);
if (!raid_dev->una_mbox64) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: out of memory, %s %d\n", __func__,
__LINE__));
return -1;
}
memset(raid_dev->una_mbox64, 0, sizeof(mbox64_t));
/*
* Align the mailbox at 16-byte boundary
*/
raid_dev->mbox = &raid_dev->una_mbox64->mbox32;
raid_dev->mbox = (mbox_t *)((((unsigned long)raid_dev->mbox) + 15) &
(~0UL ^ 0xFUL));
raid_dev->mbox64 = (mbox64_t *)(((unsigned long)raid_dev->mbox) - 8);
align = ((void *)raid_dev->mbox -
((void *)&raid_dev->una_mbox64->mbox32));
raid_dev->mbox_dma = (unsigned long)raid_dev->una_mbox64_dma + 8 +
align;
// Allocate memory for commands issued internally
adapter->ibuf = pci_alloc_consistent(pdev, MBOX_IBUF_SIZE,
&adapter->ibuf_dma_h);
if (!adapter->ibuf) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: out of memory, %s %d\n", __func__,
__LINE__));
goto out_free_common_mbox;
}
memset(adapter->ibuf, 0, MBOX_IBUF_SIZE);
// Allocate memory for our SCSI Command Blocks and their associated
// memory
/*
* Allocate memory for the base list of scb. Later allocate memory for
* CCBs and embedded components of each CCB and point the pointers in
* scb to the allocated components
* NOTE: The code to allocate SCB will be duplicated in all the LLD
* since the calling routine does not yet know the number of available
* commands.
*/
adapter->kscb_list = kcalloc(MBOX_MAX_SCSI_CMDS, sizeof(scb_t), GFP_KERNEL);
if (adapter->kscb_list == NULL) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: out of memory, %s %d\n", __func__,
__LINE__));
goto out_free_ibuf;
}
// memory allocation for our command packets
if (megaraid_mbox_setup_dma_pools(adapter) != 0) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: out of memory, %s %d\n", __func__,
__LINE__));
goto out_free_scb_list;
}
// Adjust the scb pointers and link in the free pool
epthru_pci_blk = raid_dev->epthru_pool;
sg_pci_blk = raid_dev->sg_pool;
mbox_pci_blk = raid_dev->mbox_pool;
for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
scb = adapter->kscb_list + i;
ccb = raid_dev->ccb_list + i;
ccb->mbox = (mbox_t *)(mbox_pci_blk[i].vaddr + 16);
ccb->raw_mbox = (uint8_t *)ccb->mbox;
ccb->mbox64 = (mbox64_t *)(mbox_pci_blk[i].vaddr + 8);
ccb->mbox_dma_h = (unsigned long)mbox_pci_blk[i].dma_addr + 16;
// make sure the mailbox is aligned properly
if (ccb->mbox_dma_h & 0x0F) {
con_log(CL_ANN, (KERN_CRIT
"megaraid mbox: not aligned on 16-bytes\n"));
goto out_teardown_dma_pools;
}
ccb->epthru = (mraid_epassthru_t *)
epthru_pci_blk[i].vaddr;
ccb->epthru_dma_h = epthru_pci_blk[i].dma_addr;
ccb->pthru = (mraid_passthru_t *)ccb->epthru;
ccb->pthru_dma_h = ccb->epthru_dma_h;
ccb->sgl64 = (mbox_sgl64 *)sg_pci_blk[i].vaddr;
ccb->sgl_dma_h = sg_pci_blk[i].dma_addr;
ccb->sgl32 = (mbox_sgl32 *)ccb->sgl64;
scb->ccb = (caddr_t)ccb;
scb->gp = 0;
scb->sno = i; // command index
scb->scp = NULL;
scb->state = SCB_FREE;
scb->dma_direction = PCI_DMA_NONE;
scb->dma_type = MRAID_DMA_NONE;
scb->dev_channel = -1;
scb->dev_target = -1;
// put scb in the free pool
list_add_tail(&scb->list, &adapter->kscb_pool);
}
return 0;
out_teardown_dma_pools:
megaraid_mbox_teardown_dma_pools(adapter);
out_free_scb_list:
kfree(adapter->kscb_list);
out_free_ibuf:
pci_free_consistent(pdev, MBOX_IBUF_SIZE, (void *)adapter->ibuf,
adapter->ibuf_dma_h);
out_free_common_mbox:
pci_free_consistent(adapter->pdev, sizeof(mbox64_t),
(caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma);
return -1;
}
/**
* megaraid_free_cmd_packets - free memory
* @adapter : soft state of the raid controller
*
* Release memory resources allocated for commands.
*/
static void
megaraid_free_cmd_packets(adapter_t *adapter)
{
mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
megaraid_mbox_teardown_dma_pools(adapter);
kfree(adapter->kscb_list);
pci_free_consistent(adapter->pdev, MBOX_IBUF_SIZE,
(void *)adapter->ibuf, adapter->ibuf_dma_h);
pci_free_consistent(adapter->pdev, sizeof(mbox64_t),
(caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma);
return;
}
/**
* megaraid_mbox_setup_dma_pools - setup dma pool for command packets
* @adapter : HBA soft state
*
* Setup the dma pools for mailbox, passthru and extended passthru structures,
* and scatter-gather lists.
*/
static int
megaraid_mbox_setup_dma_pools(adapter_t *adapter)
{
mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
struct mraid_pci_blk *epthru_pci_blk;
struct mraid_pci_blk *sg_pci_blk;
struct mraid_pci_blk *mbox_pci_blk;
int i;
// Allocate memory for 16-bytes aligned mailboxes
raid_dev->mbox_pool_handle = pci_pool_create("megaraid mbox pool",
adapter->pdev,
sizeof(mbox64_t) + 16,
16, 0);
if (raid_dev->mbox_pool_handle == NULL) {
goto fail_setup_dma_pool;
}
mbox_pci_blk = raid_dev->mbox_pool;
for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
mbox_pci_blk[i].vaddr = pci_pool_alloc(
raid_dev->mbox_pool_handle,
GFP_KERNEL,
&mbox_pci_blk[i].dma_addr);
if (!mbox_pci_blk[i].vaddr) {
goto fail_setup_dma_pool;
}
}
/*
* Allocate memory for each embedded passthru strucuture pointer
* Request for a 128 bytes aligned structure for each passthru command
* structure
* Since passthru and extended passthru commands are exclusive, they
* share common memory pool. Passthru structures piggyback on memory
* allocted to extended passthru since passthru is smaller of the two
*/
raid_dev->epthru_pool_handle = pci_pool_create("megaraid mbox pthru",
adapter->pdev, sizeof(mraid_epassthru_t), 128, 0);
if (raid_dev->epthru_pool_handle == NULL) {
goto fail_setup_dma_pool;
}
epthru_pci_blk = raid_dev->epthru_pool;
for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
epthru_pci_blk[i].vaddr = pci_pool_alloc(
raid_dev->epthru_pool_handle,
GFP_KERNEL,
&epthru_pci_blk[i].dma_addr);
if (!epthru_pci_blk[i].vaddr) {
goto fail_setup_dma_pool;
}
}
// Allocate memory for each scatter-gather list. Request for 512 bytes
// alignment for each sg list
raid_dev->sg_pool_handle = pci_pool_create("megaraid mbox sg",
adapter->pdev,
sizeof(mbox_sgl64) * MBOX_MAX_SG_SIZE,
512, 0);
if (raid_dev->sg_pool_handle == NULL) {
goto fail_setup_dma_pool;
}
sg_pci_blk = raid_dev->sg_pool;
for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
sg_pci_blk[i].vaddr = pci_pool_alloc(
raid_dev->sg_pool_handle,
GFP_KERNEL,
&sg_pci_blk[i].dma_addr);
if (!sg_pci_blk[i].vaddr) {
goto fail_setup_dma_pool;
}
}
return 0;
fail_setup_dma_pool:
megaraid_mbox_teardown_dma_pools(adapter);
return -1;
}
/**
* megaraid_mbox_teardown_dma_pools - teardown dma pools for command packets
* @adapter : HBA soft state
*
* Teardown the dma pool for mailbox, passthru and extended passthru
* structures, and scatter-gather lists.
*/
static void
megaraid_mbox_teardown_dma_pools(adapter_t *adapter)
{
mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
struct mraid_pci_blk *epthru_pci_blk;
struct mraid_pci_blk *sg_pci_blk;
struct mraid_pci_blk *mbox_pci_blk;
int i;
sg_pci_blk = raid_dev->sg_pool;
for (i = 0; i < MBOX_MAX_SCSI_CMDS && sg_pci_blk[i].vaddr; i++) {
pci_pool_free(raid_dev->sg_pool_handle, sg_pci_blk[i].vaddr,
sg_pci_blk[i].dma_addr);
}
if (raid_dev->sg_pool_handle)
pci_pool_destroy(raid_dev->sg_pool_handle);
epthru_pci_blk = raid_dev->epthru_pool;
for (i = 0; i < MBOX_MAX_SCSI_CMDS && epthru_pci_blk[i].vaddr; i++) {
pci_pool_free(raid_dev->epthru_pool_handle,
epthru_pci_blk[i].vaddr, epthru_pci_blk[i].dma_addr);
}
if (raid_dev->epthru_pool_handle)
pci_pool_destroy(raid_dev->epthru_pool_handle);
mbox_pci_blk = raid_dev->mbox_pool;
for (i = 0; i < MBOX_MAX_SCSI_CMDS && mbox_pci_blk[i].vaddr; i++) {
pci_pool_free(raid_dev->mbox_pool_handle,
mbox_pci_blk[i].vaddr, mbox_pci_blk[i].dma_addr);
}
if (raid_dev->mbox_pool_handle)
pci_pool_destroy(raid_dev->mbox_pool_handle);
return;
}
/**
* megaraid_alloc_scb - detach and return a scb from the free list
* @adapter : controller's soft state
* @scp : pointer to the scsi command to be executed
*
* Return the scb from the head of the free list. %NULL if there are none
* available.
*/
static scb_t *
megaraid_alloc_scb(adapter_t *adapter, struct scsi_cmnd *scp)
{
struct list_head *head = &adapter->kscb_pool;
scb_t *scb = NULL;
unsigned long flags;
// detach scb from free pool
spin_lock_irqsave(SCSI_FREE_LIST_LOCK(adapter), flags);
if (list_empty(head)) {
spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags);
return NULL;
}
scb = list_entry(head->next, scb_t, list);
list_del_init(&scb->list);
spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags);
scb->state = SCB_ACTIVE;
scb->scp = scp;
scb->dma_type = MRAID_DMA_NONE;
return scb;
}
/**
* megaraid_dealloc_scb - return the scb to the free pool
* @adapter : controller's soft state
* @scb : scb to be freed
*
* Return the scb back to the free list of scbs. The caller must 'flush' the
* SCB before calling us. E.g., performing pci_unamp and/or pci_sync etc.
* NOTE NOTE: Make sure the scb is not on any list before calling this
* routine.
*/
static inline void
megaraid_dealloc_scb(adapter_t *adapter, scb_t *scb)
{
unsigned long flags;
// put scb in the free pool
scb->state = SCB_FREE;
scb->scp = NULL;
spin_lock_irqsave(SCSI_FREE_LIST_LOCK(adapter), flags);
list_add(&scb->list, &adapter->kscb_pool);
spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags);
return;
}
/**
* megaraid_mbox_mksgl - make the scatter-gather list
* @adapter : controller's soft state
* @scb : scsi control block
*
* Prepare the scatter-gather list.
*/
static int
megaraid_mbox_mksgl(adapter_t *adapter, scb_t *scb)
{
struct scatterlist *sgl;
mbox_ccb_t *ccb;
struct scsi_cmnd *scp;
int sgcnt;
int i;
scp = scb->scp;
ccb = (mbox_ccb_t *)scb->ccb;
sgcnt = scsi_dma_map(scp);
BUG_ON(sgcnt < 0 || sgcnt > adapter->sglen);
// no mapping required if no data to be transferred
if (!sgcnt)
return 0;
scb->dma_type = MRAID_DMA_WSG;
scsi_for_each_sg(scp, sgl, sgcnt, i) {
ccb->sgl64[i].address = sg_dma_address(sgl);
ccb->sgl64[i].length = sg_dma_len(sgl);
}
// Return count of SG nodes
return sgcnt;
}
/**
* mbox_post_cmd - issue a mailbox command
* @adapter : controller's soft state
* @scb : command to be issued
*
* Post the command to the controller if mailbox is available.
*/
static int
mbox_post_cmd(adapter_t *adapter, scb_t *scb)
{
mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
mbox64_t *mbox64;
mbox_t *mbox;
mbox_ccb_t *ccb;
unsigned long flags;
unsigned int i = 0;
ccb = (mbox_ccb_t *)scb->ccb;
mbox = raid_dev->mbox;
mbox64 = raid_dev->mbox64;
/*
* Check for busy mailbox. If it is, return failure - the caller
* should retry later.
*/
spin_lock_irqsave(MAILBOX_LOCK(raid_dev), flags);
if (unlikely(mbox->busy)) {
do {
udelay(1);
i++;
rmb();
} while(mbox->busy && (i < max_mbox_busy_wait));
if (mbox->busy) {
spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags);
return -1;
}
}
// Copy this command's mailbox data into "adapter's" mailbox
memcpy((caddr_t)mbox64, (caddr_t)ccb->mbox64, 22);
mbox->cmdid = scb->sno;
adapter->outstanding_cmds++;
if (scb->dma_direction == PCI_DMA_TODEVICE)
pci_dma_sync_sg_for_device(adapter->pdev,
scsi_sglist(scb->scp),
scsi_sg_count(scb->scp),
PCI_DMA_TODEVICE);
mbox->busy = 1; // Set busy
mbox->poll = 0;
mbox->ack = 0;
wmb();
WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags);
return 0;
}
/**
* megaraid_queue_command - generic queue entry point for all LLDs
* @scp : pointer to the scsi command to be executed
* @done : callback routine to be called after the cmd has be completed
*
* Queue entry point for mailbox based controllers.
*/
static int
megaraid_queue_command(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *))
{
adapter_t *adapter;
scb_t *scb;
int if_busy;
adapter = SCP2ADAPTER(scp);
scp->scsi_done = done;
scp->result = 0;
/*
* Allocate and build a SCB request
* if_busy flag will be set if megaraid_mbox_build_cmd() command could
* not allocate scb. We will return non-zero status in that case.
* NOTE: scb can be null even though certain commands completed
* successfully, e.g., MODE_SENSE and TEST_UNIT_READY, it would
* return 0 in that case, and we would do the callback right away.
*/
if_busy = 0;
scb = megaraid_mbox_build_cmd(adapter, scp, &if_busy);
if (!scb) { // command already completed
done(scp);
return 0;
}
megaraid_mbox_runpendq(adapter, scb);
return if_busy;
}
/**
* megaraid_mbox_build_cmd - transform the mid-layer scsi commands
* @adapter : controller's soft state
* @scp : mid-layer scsi command pointer
* @busy : set if request could not be completed because of lack of
* resources
*
* Transform the mid-layer scsi command to megaraid firmware lingua.
* Convert the command issued by mid-layer to format understood by megaraid
* firmware. We also complete certain commands without sending them to firmware.
*/
static scb_t *
megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
{
mraid_device_t *rdev = ADAP2RAIDDEV(adapter);
int channel;
int target;
int islogical;
mbox_ccb_t *ccb;
mraid_passthru_t *pthru;
mbox64_t *mbox64;
mbox_t *mbox;
scb_t *scb;
char skip[] = "skipping";
char scan[] = "scanning";
char *ss;
/*
* Get the appropriate device map for the device this command is
* intended for
*/
MRAID_GET_DEVICE_MAP(adapter, scp, channel, target, islogical);
/*
* Logical drive commands
*/
if (islogical) {
switch (scp->cmnd[0]) {
case TEST_UNIT_READY:
/*
* Do we support clustering and is the support enabled
* If no, return success always
*/
if (!adapter->ha) {
scp->result = (DID_OK << 16);
return NULL;
}
if (!(scb = megaraid_alloc_scb(adapter, scp))) {
scp->result = (DID_ERROR << 16);
*busy = 1;
return NULL;
}
scb->dma_direction = scp->sc_data_direction;
scb->dev_channel = 0xFF;
scb->dev_target = target;
ccb = (mbox_ccb_t *)scb->ccb;
/*
* The command id will be provided by the command
* issuance routine
*/
ccb->raw_mbox[0] = CLUSTER_CMD;
ccb->raw_mbox[2] = RESERVATION_STATUS;
ccb->raw_mbox[3] = target;
return scb;
case MODE_SENSE:
{
struct scatterlist *sgl;
caddr_t vaddr;
sgl = scsi_sglist(scp);
if (sg_page(sgl)) {
vaddr = (caddr_t) sg_virt(&sgl[0]);
memset(vaddr, 0, scp->cmnd[4]);
}
else {
con_log(CL_ANN, (KERN_WARNING
"megaraid mailbox: invalid sg:%d\n",
__LINE__));
}
}
scp->result = (DID_OK << 16);
return NULL;
case INQUIRY:
/*
* Display the channel scan for logical drives
* Do not display scan for a channel if already done.
*/
if (!(rdev->last_disp & (1L << SCP2CHANNEL(scp)))) {
con_log(CL_ANN, (KERN_INFO
"scsi[%d]: scanning scsi channel %d",
adapter->host->host_no,
SCP2CHANNEL(scp)));
con_log(CL_ANN, (
" [virtual] for logical drives\n"));
rdev->last_disp |= (1L << SCP2CHANNEL(scp));
}
if (scp->cmnd[1] & MEGA_SCSI_INQ_EVPD) {
scp->sense_buffer[0] = 0x70;
scp->sense_buffer[2] = ILLEGAL_REQUEST;
scp->sense_buffer[12] = MEGA_INVALID_FIELD_IN_CDB;
scp->result = CHECK_CONDITION << 1;
return NULL;
}
/* Fall through */
case READ_CAPACITY:
/*
* Do not allow LUN > 0 for logical drives and
* requests for more than 40 logical drives
*/
if (SCP2LUN(scp)) {
scp->result = (DID_BAD_TARGET << 16);
return NULL;
}
if ((target % 0x80) >= MAX_LOGICAL_DRIVES_40LD) {
scp->result = (DID_BAD_TARGET << 16);
return NULL;
}
/* Allocate a SCB and initialize passthru */
if (!(scb = megaraid_alloc_scb(adapter, scp))) {
scp->result = (DID_ERROR << 16);
*busy = 1;
return NULL;
}
ccb = (mbox_ccb_t *)scb->ccb;
scb->dev_channel = 0xFF;
scb->dev_target = target;
pthru = ccb->pthru;
mbox = ccb->mbox;
mbox64 = ccb->mbox64;
pthru->timeout = 0;
pthru->ars = 1;
pthru->reqsenselen = 14;
pthru->islogical = 1;
pthru->logdrv = target;
pthru->cdblen = scp->cmd_len;
memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
mbox->cmd = MBOXCMD_PASSTHRU64;
scb->dma_direction = scp->sc_data_direction;
pthru->dataxferlen = scsi_bufflen(scp);
pthru->dataxferaddr = ccb->sgl_dma_h;
pthru->numsge = megaraid_mbox_mksgl(adapter,
scb);
mbox->xferaddr = 0xFFFFFFFF;
mbox64->xferaddr_lo = (uint32_t )ccb->pthru_dma_h;
mbox64->xferaddr_hi = 0;
return scb;
case READ_6:
case WRITE_6:
case READ_10:
case WRITE_10:
case READ_12:
case WRITE_12:
/*
* Allocate a SCB and initialize mailbox
*/
if (!(scb = megaraid_alloc_scb(adapter, scp))) {
scp->result = (DID_ERROR << 16);
*busy = 1;
return NULL;
}
ccb = (mbox_ccb_t *)scb->ccb;
scb->dev_channel = 0xFF;
scb->dev_target = target;
mbox = ccb->mbox;
mbox64 = ccb->mbox64;
mbox->logdrv = target;
/*
* A little HACK: 2nd bit is zero for all scsi read
* commands and is set for all scsi write commands
*/
mbox->cmd = (scp->cmnd[0] & 0x02) ? MBOXCMD_LWRITE64:
MBOXCMD_LREAD64 ;
/*
* 6-byte READ(0x08) or WRITE(0x0A) cdb
*/
if (scp->cmd_len == 6) {
mbox->numsectors = (uint32_t)scp->cmnd[4];
mbox->lba =
((uint32_t)scp->cmnd[1] << 16) |
((uint32_t)scp->cmnd[2] << 8) |
(uint32_t)scp->cmnd[3];
mbox->lba &= 0x1FFFFF;
}
/*
* 10-byte READ(0x28) or WRITE(0x2A) cdb
*/
else if (scp->cmd_len == 10) {
mbox->numsectors =
(uint32_t)scp->cmnd[8] |
((uint32_t)scp->cmnd[7] << 8);
mbox->lba =
((uint32_t)scp->cmnd[2] << 24) |
((uint32_t)scp->cmnd[3] << 16) |
((uint32_t)scp->cmnd[4] << 8) |
(uint32_t)scp->cmnd[5];
}
/*
* 12-byte READ(0xA8) or WRITE(0xAA) cdb
*/
else if (scp->cmd_len == 12) {
mbox->lba =
((uint32_t)scp->cmnd[2] << 24) |
((uint32_t)scp->cmnd[3] << 16) |
((uint32_t)scp->cmnd[4] << 8) |
(uint32_t)scp->cmnd[5];
mbox->numsectors =
((uint32_t)scp->cmnd[6] << 24) |
((uint32_t)scp->cmnd[7] << 16) |
((uint32_t)scp->cmnd[8] << 8) |
(uint32_t)scp->cmnd[9];
}
else {
con_log(CL_ANN, (KERN_WARNING
"megaraid: unsupported CDB length\n"));
megaraid_dealloc_scb(adapter, scb);
scp->result = (DID_ERROR << 16);
return NULL;
}
scb->dma_direction = scp->sc_data_direction;
// Calculate Scatter-Gather info
mbox64->xferaddr_lo = (uint32_t )ccb->sgl_dma_h;
mbox->numsge = megaraid_mbox_mksgl(adapter,
scb);
mbox->xferaddr = 0xFFFFFFFF;
mbox64->xferaddr_hi = 0;
return scb;
case RESERVE:
case RELEASE:
/*
* Do we support clustering and is the support enabled
*/
if (!adapter->ha) {
scp->result = (DID_BAD_TARGET << 16);
return NULL;
}
/*
* Allocate a SCB and initialize mailbox
*/
if (!(scb = megaraid_alloc_scb(adapter, scp))) {
scp->result = (DID_ERROR << 16);
*busy = 1;
return NULL;
}
ccb = (mbox_ccb_t *)scb->ccb;
scb->dev_channel = 0xFF;
scb->dev_target = target;
ccb->raw_mbox[0] = CLUSTER_CMD;
ccb->raw_mbox[2] = (scp->cmnd[0] == RESERVE) ?
RESERVE_LD : RELEASE_LD;
ccb->raw_mbox[3] = target;
scb->dma_direction = scp->sc_data_direction;
return scb;
default:
scp->result = (DID_BAD_TARGET << 16);
return NULL;
}
}
else { // Passthru device commands
// Do not allow access to target id > 15 or LUN > 7
if (target > 15 || SCP2LUN(scp) > 7) {
scp->result = (DID_BAD_TARGET << 16);
return NULL;
}
// if fast load option was set and scan for last device is
// over, reset the fast_load flag so that during a possible
// next scan, devices can be made available
if (rdev->fast_load && (target == 15) &&
(SCP2CHANNEL(scp) == adapter->max_channel -1)) {
con_log(CL_ANN, (KERN_INFO
"megaraid[%d]: physical device scan re-enabled\n",
adapter->host->host_no));
rdev->fast_load = 0;
}
/*
* Display the channel scan for physical devices
*/
if (!(rdev->last_disp & (1L << SCP2CHANNEL(scp)))) {
ss = rdev->fast_load ? skip : scan;
con_log(CL_ANN, (KERN_INFO
"scsi[%d]: %s scsi channel %d [Phy %d]",
adapter->host->host_no, ss, SCP2CHANNEL(scp),
channel));
con_log(CL_ANN, (
" for non-raid devices\n"));
rdev->last_disp |= (1L << SCP2CHANNEL(scp));
}
// disable channel sweep if fast load option given
if (rdev->fast_load) {
scp->result = (DID_BAD_TARGET << 16);
return NULL;
}
// Allocate a SCB and initialize passthru
if (!(scb = megaraid_alloc_scb(adapter, scp))) {
scp->result = (DID_ERROR << 16);
*busy = 1;
return NULL;
}
ccb = (mbox_ccb_t *)scb->ccb;
scb->dev_channel = channel;
scb->dev_target = target;
scb->dma_direction = scp->sc_data_direction;
mbox = ccb->mbox;
mbox64 = ccb->mbox64;
// Does this firmware support extended CDBs
if (adapter->max_cdb_sz == 16) {
mbox->cmd = MBOXCMD_EXTPTHRU;
megaraid_mbox_prepare_epthru(adapter, scb, scp);
mbox64->xferaddr_lo = (uint32_t)ccb->epthru_dma_h;
mbox64->xferaddr_hi = 0;
mbox->xferaddr = 0xFFFFFFFF;
}
else {
mbox->cmd = MBOXCMD_PASSTHRU64;
megaraid_mbox_prepare_pthru(adapter, scb, scp);
mbox64->xferaddr_lo = (uint32_t)ccb->pthru_dma_h;
mbox64->xferaddr_hi = 0;
mbox->xferaddr = 0xFFFFFFFF;
}
return scb;
}
// NOT REACHED
}
/**
* megaraid_mbox_runpendq - execute commands queued in the pending queue
* @adapter : controller's soft state
* @scb_q : SCB to be queued in the pending list
*
* Scan the pending list for commands which are not yet issued and try to
* post to the controller. The SCB can be a null pointer, which would indicate
* no SCB to be queue, just try to execute the ones in the pending list.
*
* NOTE: We do not actually traverse the pending list. The SCBs are plucked
* out from the head of the pending list. If it is successfully issued, the
* next SCB is at the head now.
*/
static void
megaraid_mbox_runpendq(adapter_t *adapter, scb_t *scb_q)
{
scb_t *scb;
unsigned long flags;
spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
if (scb_q) {
scb_q->state = SCB_PENDQ;
list_add_tail(&scb_q->list, &adapter->pend_list);
}
// if the adapter in not in quiescent mode, post the commands to FW
if (adapter->quiescent) {
spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
return;
}
while (!list_empty(&adapter->pend_list)) {
assert_spin_locked(PENDING_LIST_LOCK(adapter));
scb = list_entry(adapter->pend_list.next, scb_t, list);
// remove the scb from the pending list and try to
// issue. If we are unable to issue it, put back in
// the pending list and return
list_del_init(&scb->list);
spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
// if mailbox was busy, return SCB back to pending
// list. Make sure to add at the head, since that's
// where it would have been removed from
scb->state = SCB_ISSUED;
if (mbox_post_cmd(adapter, scb) != 0) {
spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
scb->state = SCB_PENDQ;
list_add(&scb->list, &adapter->pend_list);
spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter),
flags);
return;
}
spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
}
spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
return;
}
/**
* megaraid_mbox_prepare_pthru - prepare a command for physical devices
* @adapter : pointer to controller's soft state
* @scb : scsi control block
* @scp : scsi command from the mid-layer
*
* Prepare a command for the scsi physical devices.
*/
static void
megaraid_mbox_prepare_pthru(adapter_t *adapter, scb_t *scb,
struct scsi_cmnd *scp)
{
mbox_ccb_t *ccb;
mraid_passthru_t *pthru;
uint8_t channel;
uint8_t target;
ccb = (mbox_ccb_t *)scb->ccb;
pthru = ccb->pthru;
channel = scb->dev_channel;
target = scb->dev_target;
// 0=6sec, 1=60sec, 2=10min, 3=3hrs, 4=NO timeout
pthru->timeout = 4;
pthru->ars = 1;
pthru->islogical = 0;
pthru->channel = 0;
pthru->target = (channel << 4) | target;
pthru->logdrv = SCP2LUN(scp);
pthru->reqsenselen = 14;
pthru->cdblen = scp->cmd_len;
memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
if (scsi_bufflen(scp)) {
pthru->dataxferlen = scsi_bufflen(scp);
pthru->dataxferaddr = ccb->sgl_dma_h;
pthru->numsge = megaraid_mbox_mksgl(adapter, scb);
}
else {
pthru->dataxferaddr = 0;
pthru->dataxferlen = 0;
pthru->numsge = 0;
}
return;
}
/**
* megaraid_mbox_prepare_epthru - prepare a command for physical devices
* @adapter : pointer to controller's soft state
* @scb : scsi control block
* @scp : scsi command from the mid-layer
*
* Prepare a command for the scsi physical devices. This rountine prepares
* commands for devices which can take extended CDBs (>10 bytes).
*/
static void
megaraid_mbox_prepare_epthru(adapter_t *adapter, scb_t *scb,
struct scsi_cmnd *scp)
{
mbox_ccb_t *ccb;
mraid_epassthru_t *epthru;
uint8_t channel;
uint8_t target;
ccb = (mbox_ccb_t *)scb->ccb;
epthru = ccb->epthru;
channel = scb->dev_channel;
target = scb->dev_target;
// 0=6sec, 1=60sec, 2=10min, 3=3hrs, 4=NO timeout
epthru->timeout = 4;
epthru->ars = 1;
epthru->islogical = 0;
epthru->channel = 0;
epthru->target = (channel << 4) | target;
epthru->logdrv = SCP2LUN(scp);
epthru->reqsenselen = 14;
epthru->cdblen = scp->cmd_len;
memcpy(epthru->cdb, scp->cmnd, scp->cmd_len);
if (scsi_bufflen(scp)) {
epthru->dataxferlen = scsi_bufflen(scp);
epthru->dataxferaddr = ccb->sgl_dma_h;
epthru->numsge = megaraid_mbox_mksgl(adapter, scb);
}
else {
epthru->dataxferaddr = 0;
epthru->dataxferlen = 0;
epthru->numsge = 0;
}
return;
}
/**
* megaraid_ack_sequence - interrupt ack sequence for memory mapped HBAs
* @adapter : controller's soft state
*
* Interrupt acknowledgement sequence for memory mapped HBAs. Find out the
* completed command and put them on the completed list for later processing.
*
* Returns: 1 if the interrupt is valid, 0 otherwise
*/
static int
megaraid_ack_sequence(adapter_t *adapter)
{
mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
mbox_t *mbox;
scb_t *scb;
uint8_t nstatus;
uint8_t completed[MBOX_MAX_FIRMWARE_STATUS];
struct list_head clist;
int handled;
uint32_t dword;
unsigned long flags;
int i, j;
mbox = raid_dev->mbox;
// move the SCBs from the firmware completed array to our local list
INIT_LIST_HEAD(&clist);
// loop till F/W has more commands for us to complete
handled = 0;
spin_lock_irqsave(MAILBOX_LOCK(raid_dev), flags);
do {
/*
* Check if a valid interrupt is pending. If found, force the
* interrupt line low.
*/
dword = RDOUTDOOR(raid_dev);
if (dword != 0x10001234) break;
handled = 1;
WROUTDOOR(raid_dev, 0x10001234);
nstatus = 0;
// wait for valid numstatus to post
for (i = 0; i < 0xFFFFF; i++) {
if (mbox->numstatus != 0xFF) {
nstatus = mbox->numstatus;
break;
}
rmb();
}
mbox->numstatus = 0xFF;
adapter->outstanding_cmds -= nstatus;
for (i = 0; i < nstatus; i++) {
// wait for valid command index to post
for (j = 0; j < 0xFFFFF; j++) {
if (mbox->completed[i] != 0xFF) break;
rmb();
}
completed[i] = mbox->completed[i];
mbox->completed[i] = 0xFF;
if (completed[i] == 0xFF) {
con_log(CL_ANN, (KERN_CRIT
"megaraid: command posting timed out\n"));
BUG();
continue;
}
// Get SCB associated with this command id
if (completed[i] >= MBOX_MAX_SCSI_CMDS) {
// a cmm command
scb = adapter->uscb_list + (completed[i] -
MBOX_MAX_SCSI_CMDS);
}
else {
// an os command
scb = adapter->kscb_list + completed[i];
}
scb->status = mbox->status;
list_add_tail(&scb->list, &clist);
}
// Acknowledge interrupt
WRINDOOR(raid_dev, 0x02);
} while(1);
spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags);
// put the completed commands in the completed list. DPC would
// complete these commands later
spin_lock_irqsave(COMPLETED_LIST_LOCK(adapter), flags);
list_splice(&clist, &adapter->completed_list);
spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), flags);
// schedule the DPC if there is some work for it
if (handled)
tasklet_schedule(&adapter->dpc_h);
return handled;
}
/**
* megaraid_isr - isr for memory based mailbox based controllers
* @irq : irq
* @devp : pointer to our soft state
*
* Interrupt service routine for memory-mapped mailbox controllers.
*/
static irqreturn_t
megaraid_isr(int irq, void *devp)
{
adapter_t *adapter = devp;
int handled;
handled = megaraid_ack_sequence(adapter);
/* Loop through any pending requests */
if (!adapter->quiescent) {
megaraid_mbox_runpendq(adapter, NULL);
}
return IRQ_RETVAL(handled);
}
/**
* megaraid_mbox_sync_scb - sync kernel buffers
* @adapter : controller's soft state
* @scb : pointer to the resource packet
*
* DMA sync if required.
*/
static void
megaraid_mbox_sync_scb(adapter_t *adapter, scb_t *scb)
{
mbox_ccb_t *ccb;
ccb = (mbox_ccb_t *)scb->ccb;
if (scb->dma_direction == PCI_DMA_FROMDEVICE)
pci_dma_sync_sg_for_cpu(adapter->pdev,
scsi_sglist(scb->scp),
scsi_sg_count(scb->scp),
PCI_DMA_FROMDEVICE);
scsi_dma_unmap(scb->scp);
return;
}
/**
* megaraid_mbox_dpc - the tasklet to complete the commands from completed list
* @devp : pointer to HBA soft state
*
* Pick up the commands from the completed list and send back to the owners.
* This is a reentrant function and does not assume any locks are held while
* it is being called.
*/
static void
megaraid_mbox_dpc(unsigned long devp)
{
adapter_t *adapter = (adapter_t *)devp;
mraid_device_t *raid_dev;
struct list_head clist;
struct scatterlist *sgl;
scb_t *scb;
scb_t *tmp;
struct scsi_cmnd *scp;
mraid_passthru_t *pthru;
mraid_epassthru_t *epthru;
mbox_ccb_t *ccb;
int islogical;
int pdev_index;
int pdev_state;
mbox_t *mbox;
unsigned long flags;
uint8_t c;
int status;
uioc_t *kioc;
if (!adapter) return;
raid_dev = ADAP2RAIDDEV(adapter);
// move the SCBs from the completed list to our local list
INIT_LIST_HEAD(&clist);
spin_lock_irqsave(COMPLETED_LIST_LOCK(adapter), flags);
list_splice_init(&adapter->completed_list, &clist);
spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), flags);
list_for_each_entry_safe(scb, tmp, &clist, list) {
status = scb->status;
scp = scb->scp;
ccb = (mbox_ccb_t *)scb->ccb;
pthru = ccb->pthru;
epthru = ccb->epthru;
mbox = ccb->mbox;
// Make sure f/w has completed a valid command
if (scb->state != SCB_ISSUED) {
con_log(CL_ANN, (KERN_CRIT
"megaraid critical err: invalid command %d:%d:%p\n",
scb->sno, scb->state, scp));
BUG();
continue; // Must never happen!
}
// check for the management command and complete it right away
if (scb->sno >= MBOX_MAX_SCSI_CMDS) {
scb->state = SCB_FREE;
scb->status = status;
// remove from local clist
list_del_init(&scb->list);
kioc = (uioc_t *)scb->gp;
kioc->status = 0;
megaraid_mbox_mm_done(adapter, scb);
continue;
}
// Was an abort issued for this command earlier
if (scb->state & SCB_ABORT) {
con_log(CL_ANN, (KERN_NOTICE
"megaraid: aborted cmd %lx[%x] completed\n",
scp->serial_number, scb->sno));
}
/*
* If the inquiry came of a disk drive which is not part of
* any RAID array, expose it to the kernel. For this to be
* enabled, user must set the "megaraid_expose_unconf_disks"
* flag to 1 by specifying it on module parameter list.
* This would enable data migration off drives from other
* configurations.
*/
islogical = MRAID_IS_LOGICAL(adapter, scp);
if (scp->cmnd[0] == INQUIRY && status == 0 && islogical == 0
&& IS_RAID_CH(raid_dev, scb->dev_channel)) {
sgl = scsi_sglist(scp);
if (sg_page(sgl)) {
c = *(unsigned char *) sg_virt(&sgl[0]);
} else {
con_log(CL_ANN, (KERN_WARNING
"megaraid mailbox: invalid sg:%d\n",
__LINE__));
c = 0;
}
if ((c & 0x1F ) == TYPE_DISK) {
pdev_index = (scb->dev_channel * 16) +
scb->dev_target;
pdev_state =
raid_dev->pdrv_state[pdev_index] & 0x0F;
if (pdev_state == PDRV_ONLINE ||
pdev_state == PDRV_FAILED ||
pdev_state == PDRV_RBLD ||
pdev_state == PDRV_HOTSPARE ||
megaraid_expose_unconf_disks == 0) {
status = 0xF0;
}
}
}
// Convert MegaRAID status to Linux error code
switch (status) {
case 0x00:
scp->result = (DID_OK << 16);
break;
case 0x02:
/* set sense_buffer and result fields */
if (mbox->cmd == MBOXCMD_PASSTHRU ||
mbox->cmd == MBOXCMD_PASSTHRU64) {
memcpy(scp->sense_buffer, pthru->reqsensearea,
14);
scp->result = DRIVER_SENSE << 24 |
DID_OK << 16 | CHECK_CONDITION << 1;
}
else {
if (mbox->cmd == MBOXCMD_EXTPTHRU) {
memcpy(scp->sense_buffer,
epthru->reqsensearea, 14);
scp->result = DRIVER_SENSE << 24 |
DID_OK << 16 |
CHECK_CONDITION << 1;
} else {
scp->sense_buffer[0] = 0x70;
scp->sense_buffer[2] = ABORTED_COMMAND;
scp->result = CHECK_CONDITION << 1;
}
}
break;
case 0x08:
scp->result = DID_BUS_BUSY << 16 | status;
break;
default:
/*
* If TEST_UNIT_READY fails, we know RESERVATION_STATUS
* failed
*/
if (scp->cmnd[0] == TEST_UNIT_READY) {
scp->result = DID_ERROR << 16 |
RESERVATION_CONFLICT << 1;
}
else
/*
* Error code returned is 1 if Reserve or Release
* failed or the input parameter is invalid
*/
if (status == 1 && (scp->cmnd[0] == RESERVE ||
scp->cmnd[0] == RELEASE)) {
scp->result = DID_ERROR << 16 |
RESERVATION_CONFLICT << 1;
}
else {
scp->result = DID_BAD_TARGET << 16 | status;
}
}
// print a debug message for all failed commands
if (status) {
megaraid_mbox_display_scb(adapter, scb);
}
// Free our internal resources and call the mid-layer callback
// routine
megaraid_mbox_sync_scb(adapter, scb);
// remove from local clist
list_del_init(&scb->list);
// put back in free list
megaraid_dealloc_scb(adapter, scb);
// send the scsi packet back to kernel
scp->scsi_done(scp);
}
return;
}
/**
* megaraid_abort_handler - abort the scsi command
* @scp : command to be aborted
*
* Abort a previous SCSI request. Only commands on the pending list can be
* aborted. All the commands issued to the F/W must complete.
**/
static int
megaraid_abort_handler(struct scsi_cmnd *scp)
{
adapter_t *adapter;
mraid_device_t *raid_dev;
scb_t *scb;
scb_t *tmp;
int found;
unsigned long flags;
int i;
adapter = SCP2ADAPTER(scp);
raid_dev = ADAP2RAIDDEV(adapter);
con_log(CL_ANN, (KERN_WARNING
"megaraid: aborting-%ld cmd=%x <c=%d t=%d l=%d>\n",
scp->serial_number, scp->cmnd[0], SCP2CHANNEL(scp),
SCP2TARGET(scp), SCP2LUN(scp)));
// If FW has stopped responding, simply return failure
if (raid_dev->hw_error) {
con_log(CL_ANN, (KERN_NOTICE
"megaraid: hw error, not aborting\n"));
return FAILED;
}
// There might a race here, where the command was completed by the
// firmware and now it is on the completed list. Before we could
// complete the command to the kernel in dpc, the abort came.
// Find out if this is the case to avoid the race.
scb = NULL;
spin_lock_irqsave(COMPLETED_LIST_LOCK(adapter), flags);
list_for_each_entry_safe(scb, tmp, &adapter->completed_list, list) {
if (scb->scp == scp) { // Found command
list_del_init(&scb->list); // from completed list
con_log(CL_ANN, (KERN_WARNING
"megaraid: %ld:%d[%d:%d], abort from completed list\n",
scp->serial_number, scb->sno,
scb->dev_channel, scb->dev_target));
scp->result = (DID_ABORT << 16);
scp->scsi_done(scp);
megaraid_dealloc_scb(adapter, scb);
spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter),
flags);
return SUCCESS;
}
}
spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), flags);
// Find out if this command is still on the pending list. If it is and
// was never issued, abort and return success. If the command is owned
// by the firmware, we must wait for it to complete by the FW.
spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
list_for_each_entry_safe(scb, tmp, &adapter->pend_list, list) {
if (scb->scp == scp) { // Found command
list_del_init(&scb->list); // from pending list
ASSERT(!(scb->state & SCB_ISSUED));
con_log(CL_ANN, (KERN_WARNING
"megaraid abort: %ld[%d:%d], driver owner\n",
scp->serial_number, scb->dev_channel,
scb->dev_target));
scp->result = (DID_ABORT << 16);
scp->scsi_done(scp);
megaraid_dealloc_scb(adapter, scb);
spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter),
flags);
return SUCCESS;
}
}
spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
// Check do we even own this command, in which case this would be
// owned by the firmware. The only way to locate the FW scb is to
// traverse through the list of all SCB, since driver does not
// maintain these SCBs on any list
found = 0;
spin_lock_irq(&adapter->lock);
for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
scb = adapter->kscb_list + i;
if (scb->scp == scp) {
found = 1;
if (!(scb->state & SCB_ISSUED)) {
con_log(CL_ANN, (KERN_WARNING
"megaraid abort: %ld%d[%d:%d], invalid state\n",
scp->serial_number, scb->sno, scb->dev_channel,
scb->dev_target));
BUG();
}
else {
con_log(CL_ANN, (KERN_WARNING
"megaraid abort: %ld:%d[%d:%d], fw owner\n",
scp->serial_number, scb->sno, scb->dev_channel,
scb->dev_target));
}
}
}
spin_unlock_irq(&adapter->lock);
if (!found) {
con_log(CL_ANN, (KERN_WARNING
"megaraid abort: scsi cmd:%ld, do now own\n",
scp->serial_number));
// FIXME: Should there be a callback for this command?
return SUCCESS;
}
// We cannot actually abort a command owned by firmware, return
// failure and wait for reset. In host reset handler, we will find out
// if the HBA is still live
return FAILED;
}
/**
* megaraid_reset_handler - device reset hadler for mailbox based driver
* @scp : reference command
*
* Reset handler for the mailbox based controller. First try to find out if
* the FW is still live, in which case the outstanding commands counter mut go
* down to 0. If that happens, also issue the reservation reset command to
* relinquish (possible) reservations on the logical drives connected to this
* host.
**/
static int
megaraid_reset_handler(struct scsi_cmnd *scp)
{
adapter_t *adapter;
scb_t *scb;
scb_t *tmp;
mraid_device_t *raid_dev;
unsigned long flags;
uint8_t raw_mbox[sizeof(mbox_t)];
int rval;
int recovery_window;
int recovering;
int i;
uioc_t *kioc;
adapter = SCP2ADAPTER(scp);
raid_dev = ADAP2RAIDDEV(adapter);
// return failure if adapter is not responding
if (raid_dev->hw_error) {
con_log(CL_ANN, (KERN_NOTICE
"megaraid: hw error, cannot reset\n"));
return FAILED;
}
// Under exceptional conditions, FW can take up to 3 minutes to
// complete command processing. Wait for additional 2 minutes for the
// pending commands counter to go down to 0. If it doesn't, let the
// controller be marked offline
// Also, reset all the commands currently owned by the driver
spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
list_for_each_entry_safe(scb, tmp, &adapter->pend_list, list) {
list_del_init(&scb->list); // from pending list
if (scb->sno >= MBOX_MAX_SCSI_CMDS) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: IOCTL packet with %d[%d:%d] being reset\n",
scb->sno, scb->dev_channel, scb->dev_target));
scb->status = -1;
kioc = (uioc_t *)scb->gp;
kioc->status = -EFAULT;
megaraid_mbox_mm_done(adapter, scb);
} else {
if (scb->scp == scp) { // Found command
con_log(CL_ANN, (KERN_WARNING
"megaraid: %ld:%d[%d:%d], reset from pending list\n",
scp->serial_number, scb->sno,
scb->dev_channel, scb->dev_target));
} else {
con_log(CL_ANN, (KERN_WARNING
"megaraid: IO packet with %d[%d:%d] being reset\n",
scb->sno, scb->dev_channel, scb->dev_target));
}
scb->scp->result = (DID_RESET << 16);
scb->scp->scsi_done(scb->scp);
megaraid_dealloc_scb(adapter, scb);
}
}
spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
if (adapter->outstanding_cmds) {
con_log(CL_ANN, (KERN_NOTICE
"megaraid: %d outstanding commands. Max wait %d sec\n",
adapter->outstanding_cmds,
(MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT)));
}
recovery_window = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT;
recovering = adapter->outstanding_cmds;
for (i = 0; i < recovery_window; i++) {
megaraid_ack_sequence(adapter);
// print a message once every 5 seconds only
if (!(i % 5)) {
con_log(CL_ANN, (
"megaraid mbox: Wait for %d commands to complete:%d\n",
adapter->outstanding_cmds,
(MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT) - i));
}
// bailout if no recovery happended in reset time
if (adapter->outstanding_cmds == 0) {
break;
}
msleep(1000);
}
spin_lock(&adapter->lock);
// If still outstanding commands, bail out
if (adapter->outstanding_cmds) {
con_log(CL_ANN, (KERN_WARNING
"megaraid mbox: critical hardware error!\n"));
raid_dev->hw_error = 1;
rval = FAILED;
goto out;
}
else {
con_log(CL_ANN, (KERN_NOTICE
"megaraid mbox: reset sequence completed successfully\n"));
}
// If the controller supports clustering, reset reservations
if (!adapter->ha) {
rval = SUCCESS;
goto out;
}
// clear reservations if any
raw_mbox[0] = CLUSTER_CMD;
raw_mbox[2] = RESET_RESERVATIONS;
rval = SUCCESS;
if (mbox_post_sync_cmd_fast(adapter, raw_mbox) == 0) {
con_log(CL_ANN,
(KERN_INFO "megaraid: reservation reset\n"));
}
else {
rval = FAILED;
con_log(CL_ANN, (KERN_WARNING
"megaraid: reservation reset failed\n"));
}
out:
spin_unlock_irq(&adapter->lock);
return rval;
}
/*
* START: internal commands library
*
* This section of the driver has the common routine used by the driver and
* also has all the FW routines
*/
/**
* mbox_post_sync_cmd() - blocking command to the mailbox based controllers
* @adapter : controller's soft state
* @raw_mbox : the mailbox
*
* Issue a scb in synchronous and non-interrupt mode for mailbox based
* controllers.
*/
static int
mbox_post_sync_cmd(adapter_t *adapter, uint8_t raw_mbox[])
{
mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
mbox64_t *mbox64;
mbox_t *mbox;
uint8_t status;
int i;
mbox64 = raid_dev->mbox64;
mbox = raid_dev->mbox;
/*
* Wait until mailbox is free
*/
if (megaraid_busywait_mbox(raid_dev) != 0)
goto blocked_mailbox;
/*
* Copy mailbox data into host structure
*/
memcpy((caddr_t)mbox, (caddr_t)raw_mbox, 16);
mbox->cmdid = 0xFE;
mbox->busy = 1;
mbox->poll = 0;
mbox->ack = 0;
mbox->numstatus = 0xFF;
mbox->status = 0xFF;
wmb();
WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
// wait for maximum 1 second for status to post. If the status is not
// available within 1 second, assume FW is initializing and wait
// for an extended amount of time
if (mbox->numstatus == 0xFF) { // status not yet available
udelay(25);
for (i = 0; mbox->numstatus == 0xFF && i < 1000; i++) {
rmb();
msleep(1);
}
if (i == 1000) {
con_log(CL_ANN, (KERN_NOTICE
"megaraid mailbox: wait for FW to boot "));
for (i = 0; (mbox->numstatus == 0xFF) &&
(i < MBOX_RESET_WAIT); i++) {
rmb();
con_log(CL_ANN, ("\b\b\b\b\b[%03d]",
MBOX_RESET_WAIT - i));
msleep(1000);
}
if (i == MBOX_RESET_WAIT) {
con_log(CL_ANN, (
"\nmegaraid mailbox: status not available\n"));
return -1;
}
con_log(CL_ANN, ("\b\b\b\b\b[ok] \n"));
}
}
// wait for maximum 1 second for poll semaphore
if (mbox->poll != 0x77) {
udelay(25);
for (i = 0; (mbox->poll != 0x77) && (i < 1000); i++) {
rmb();
msleep(1);
}
if (i == 1000) {
con_log(CL_ANN, (KERN_WARNING
"megaraid mailbox: could not get poll semaphore\n"));
return -1;
}
}
WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x2);
wmb();
// wait for maximum 1 second for acknowledgement
if (RDINDOOR(raid_dev) & 0x2) {
udelay(25);
for (i = 0; (RDINDOOR(raid_dev) & 0x2) && (i < 1000); i++) {
rmb();
msleep(1);
}
if (i == 1000) {
con_log(CL_ANN, (KERN_WARNING
"megaraid mailbox: could not acknowledge\n"));
return -1;
}
}
mbox->poll = 0;
mbox->ack = 0x77;
status = mbox->status;
// invalidate the completed command id array. After command
// completion, firmware would write the valid id.
mbox->numstatus = 0xFF;
mbox->status = 0xFF;
for (i = 0; i < MBOX_MAX_FIRMWARE_STATUS; i++) {
mbox->completed[i] = 0xFF;
}
return status;
blocked_mailbox:
con_log(CL_ANN, (KERN_WARNING "megaraid: blocked mailbox\n") );
return -1;
}
/**
* mbox_post_sync_cmd_fast - blocking command to the mailbox based controllers
* @adapter : controller's soft state
* @raw_mbox : the mailbox
*
* Issue a scb in synchronous and non-interrupt mode for mailbox based
* controllers. This is a faster version of the synchronous command and
* therefore can be called in interrupt-context as well.
*/
static int
mbox_post_sync_cmd_fast(adapter_t *adapter, uint8_t raw_mbox[])
{
mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
mbox_t *mbox;
long i;
mbox = raid_dev->mbox;
// return immediately if the mailbox is busy
if (mbox->busy) return -1;
// Copy mailbox data into host structure
memcpy((caddr_t)mbox, (caddr_t)raw_mbox, 14);
mbox->cmdid = 0xFE;
mbox->busy = 1;
mbox->poll = 0;
mbox->ack = 0;
mbox->numstatus = 0xFF;
mbox->status = 0xFF;
wmb();
WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
for (i = 0; i < MBOX_SYNC_WAIT_CNT; i++) {
if (mbox->numstatus != 0xFF) break;
rmb();
udelay(MBOX_SYNC_DELAY_200);
}
if (i == MBOX_SYNC_WAIT_CNT) {
// We may need to re-calibrate the counter
con_log(CL_ANN, (KERN_CRIT
"megaraid: fast sync command timed out\n"));
}
WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x2);
wmb();
return mbox->status;
}
/**
* megaraid_busywait_mbox() - Wait until the controller's mailbox is available
* @raid_dev : RAID device (HBA) soft state
*
* Wait until the controller's mailbox is available to accept more commands.
* Wait for at most 1 second.
*/
static int
megaraid_busywait_mbox(mraid_device_t *raid_dev)
{
mbox_t *mbox = raid_dev->mbox;
int i = 0;
if (mbox->busy) {
udelay(25);
for (i = 0; mbox->busy && i < 1000; i++)
msleep(1);
}
if (i < 1000) return 0;
else return -1;
}
/**
* megaraid_mbox_product_info - some static information about the controller
* @adapter : our soft state
*
* Issue commands to the controller to grab some parameters required by our
* caller.
*/
static int
megaraid_mbox_product_info(adapter_t *adapter)
{
mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
mbox_t *mbox;
uint8_t raw_mbox[sizeof(mbox_t)];
mraid_pinfo_t *pinfo;
dma_addr_t pinfo_dma_h;
mraid_inquiry3_t *mraid_inq3;
int i;
memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox));
mbox = (mbox_t *)raw_mbox;
/*
* Issue an ENQUIRY3 command to find out certain adapter parameters,
* e.g., max channels, max commands etc.
*/
pinfo = pci_alloc_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
&pinfo_dma_h);
if (pinfo == NULL) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: out of memory, %s %d\n", __func__,
__LINE__));
return -1;
}
memset(pinfo, 0, sizeof(mraid_pinfo_t));
mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
raw_mbox[0] = FC_NEW_CONFIG;
raw_mbox[2] = NC_SUBOP_ENQUIRY3;
raw_mbox[3] = ENQ3_GET_SOLICITED_FULL;
// Issue the command
if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
con_log(CL_ANN, (KERN_WARNING "megaraid: Inquiry3 failed\n"));
pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
pinfo, pinfo_dma_h);
return -1;
}
/*
* Collect information about state of each physical drive
* attached to the controller. We will expose all the disks
* which are not part of RAID
*/
mraid_inq3 = (mraid_inquiry3_t *)adapter->ibuf;
for (i = 0; i < MBOX_MAX_PHYSICAL_DRIVES; i++) {
raid_dev->pdrv_state[i] = mraid_inq3->pdrv_state[i];
}
/*
* Get product info for information like number of channels,
* maximum commands supported.
*/
memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox));
mbox->xferaddr = (uint32_t)pinfo_dma_h;
raw_mbox[0] = FC_NEW_CONFIG;
raw_mbox[2] = NC_SUBOP_PRODUCT_INFO;
if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: product info failed\n"));
pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
pinfo, pinfo_dma_h);
return -1;
}
/*
* Setup some parameters for host, as required by our caller
*/
adapter->max_channel = pinfo->nchannels;
/*
* we will export all the logical drives on a single channel.
* Add 1 since inquires do not come for inititor ID
*/
adapter->max_target = MAX_LOGICAL_DRIVES_40LD + 1;
adapter->max_lun = 8; // up to 8 LUNs for non-disk devices
/*
* These are the maximum outstanding commands for the scsi-layer
*/
adapter->max_cmds = MBOX_MAX_SCSI_CMDS;
memset(adapter->fw_version, 0, VERSION_SIZE);
memset(adapter->bios_version, 0, VERSION_SIZE);
memcpy(adapter->fw_version, pinfo->fw_version, 4);
adapter->fw_version[4] = 0;
memcpy(adapter->bios_version, pinfo->bios_version, 4);
adapter->bios_version[4] = 0;
con_log(CL_ANN, (KERN_NOTICE
"megaraid: fw version:[%s] bios version:[%s]\n",
adapter->fw_version, adapter->bios_version));
pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t), pinfo,
pinfo_dma_h);
return 0;
}
/**
* megaraid_mbox_extended_cdb - check for support for extended CDBs
* @adapter : soft state for the controller
*
* This routine check whether the controller in question supports extended
* ( > 10 bytes ) CDBs.
*/
static int
megaraid_mbox_extended_cdb(adapter_t *adapter)
{
mbox_t *mbox;
uint8_t raw_mbox[sizeof(mbox_t)];
int rval;
mbox = (mbox_t *)raw_mbox;
memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox));
mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
raw_mbox[0] = MAIN_MISC_OPCODE;
raw_mbox[2] = SUPPORT_EXT_CDB;
/*
* Issue the command
*/
rval = 0;
if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
rval = -1;
}
return rval;
}
/**
* megaraid_mbox_support_ha - Do we support clustering
* @adapter : soft state for the controller
* @init_id : ID of the initiator
*
* Determine if the firmware supports clustering and the ID of the initiator.
*/
static int
megaraid_mbox_support_ha(adapter_t *adapter, uint16_t *init_id)
{
mbox_t *mbox;
uint8_t raw_mbox[sizeof(mbox_t)];
int rval;
mbox = (mbox_t *)raw_mbox;
memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox));
mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
raw_mbox[0] = GET_TARGET_ID;
// Issue the command
*init_id = 7;
rval = -1;
if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) {
*init_id = *(uint8_t *)adapter->ibuf;
con_log(CL_ANN, (KERN_INFO
"megaraid: cluster firmware, initiator ID: %d\n",
*init_id));
rval = 0;
}
return rval;
}
/**
* megaraid_mbox_support_random_del - Do we support random deletion
* @adapter : soft state for the controller
*
* Determine if the firmware supports random deletion.
* Return: 1 is operation supported, 0 otherwise
*/
static int
megaraid_mbox_support_random_del(adapter_t *adapter)
{
mbox_t *mbox;
uint8_t raw_mbox[sizeof(mbox_t)];
int rval;
/*
* Newer firmware on Dell CERC expect a different
* random deletion handling, so disable it.
*/
if (adapter->pdev->vendor == PCI_VENDOR_ID_AMI &&
adapter->pdev->device == PCI_DEVICE_ID_AMI_MEGARAID3 &&
adapter->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
adapter->pdev->subsystem_device == PCI_SUBSYS_ID_CERC_ATA100_4CH &&
(adapter->fw_version[0] > '6' ||
(adapter->fw_version[0] == '6' &&
adapter->fw_version[2] > '6') ||
(adapter->fw_version[0] == '6'
&& adapter->fw_version[2] == '6'
&& adapter->fw_version[3] > '1'))) {
con_log(CL_DLEVEL1, ("megaraid: disable random deletion\n"));
return 0;
}
mbox = (mbox_t *)raw_mbox;
memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
raw_mbox[0] = FC_DEL_LOGDRV;
raw_mbox[2] = OP_SUP_DEL_LOGDRV;
// Issue the command
rval = 0;
if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) {
con_log(CL_DLEVEL1, ("megaraid: supports random deletion\n"));
rval = 1;
}
return rval;
}
/**
* megaraid_mbox_get_max_sg - maximum sg elements supported by the firmware
* @adapter : soft state for the controller
*
* Find out the maximum number of scatter-gather elements supported by the
* firmware.
*/
static int
megaraid_mbox_get_max_sg(adapter_t *adapter)
{
mbox_t *mbox;
uint8_t raw_mbox[sizeof(mbox_t)];
int nsg;
mbox = (mbox_t *)raw_mbox;
memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
raw_mbox[0] = MAIN_MISC_OPCODE;
raw_mbox[2] = GET_MAX_SG_SUPPORT;
// Issue the command
if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) {
nsg = *(uint8_t *)adapter->ibuf;
}
else {
nsg = MBOX_DEFAULT_SG_SIZE;
}
if (nsg > MBOX_MAX_SG_SIZE) nsg = MBOX_MAX_SG_SIZE;
return nsg;
}
/**
* megaraid_mbox_enum_raid_scsi - enumerate the RAID and SCSI channels
* @adapter : soft state for the controller
*
* Enumerate the RAID and SCSI channels for ROMB platforms so that channels
* can be exported as regular SCSI channels.
*/
static void
megaraid_mbox_enum_raid_scsi(adapter_t *adapter)
{
mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
mbox_t *mbox;
uint8_t raw_mbox[sizeof(mbox_t)];
mbox = (mbox_t *)raw_mbox;
memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
raw_mbox[0] = CHNL_CLASS;
raw_mbox[2] = GET_CHNL_CLASS;
// Issue the command. If the command fails, all channels are RAID
// channels
raid_dev->channel_class = 0xFF;
if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) {
raid_dev->channel_class = *(uint8_t *)adapter->ibuf;
}
return;
}
/**
* megaraid_mbox_flush_cache - flush adapter and disks cache
* @adapter : soft state for the controller
*
* Flush adapter cache followed by disks cache.
*/
static void
megaraid_mbox_flush_cache(adapter_t *adapter)
{
mbox_t *mbox;
uint8_t raw_mbox[sizeof(mbox_t)];
mbox = (mbox_t *)raw_mbox;
memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
raw_mbox[0] = FLUSH_ADAPTER;
if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
con_log(CL_ANN, ("megaraid: flush adapter failed\n"));
}
raw_mbox[0] = FLUSH_SYSTEM;
if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
con_log(CL_ANN, ("megaraid: flush disks cache failed\n"));
}
return;
}
/**
* megaraid_mbox_fire_sync_cmd - fire the sync cmd
* @adapter : soft state for the controller
*
* Clears the pending cmds in FW and reinits its RAID structs.
*/
static int
megaraid_mbox_fire_sync_cmd(adapter_t *adapter)
{
mbox_t *mbox;
uint8_t raw_mbox[sizeof(mbox_t)];
mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
mbox64_t *mbox64;
int status = 0;
int i;
uint32_t dword;
mbox = (mbox_t *)raw_mbox;
memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
raw_mbox[0] = 0xFF;
mbox64 = raid_dev->mbox64;
mbox = raid_dev->mbox;
/* Wait until mailbox is free */
if (megaraid_busywait_mbox(raid_dev) != 0) {
status = 1;
goto blocked_mailbox;
}
/* Copy mailbox data into host structure */
memcpy((caddr_t)mbox, (caddr_t)raw_mbox, 16);
mbox->cmdid = 0xFE;
mbox->busy = 1;
mbox->poll = 0;
mbox->ack = 0;
mbox->numstatus = 0;
mbox->status = 0;
wmb();
WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
/* Wait for maximum 1 min for status to post.
* If the Firmware SUPPORTS the ABOVE COMMAND,
* mbox->cmd will be set to 0
* else
* the firmware will reject the command with
* mbox->numstatus set to 1
*/
i = 0;
status = 0;
while (!mbox->numstatus && mbox->cmd == 0xFF) {
rmb();
msleep(1);
i++;
if (i > 1000 * 60) {
status = 1;
break;
}
}
if (mbox->numstatus == 1)
status = 1; /*cmd not supported*/
/* Check for interrupt line */
dword = RDOUTDOOR(raid_dev);
WROUTDOOR(raid_dev, dword);
WRINDOOR(raid_dev,2);
return status;
blocked_mailbox:
con_log(CL_ANN, (KERN_WARNING "megaraid: blocked mailbox\n"));
return status;
}
/**
* megaraid_mbox_display_scb - display SCB information, mostly debug purposes
* @adapter : controller's soft state
* @scb : SCB to be displayed
* @level : debug level for console print
*
* Diplay information about the given SCB iff the current debug level is
* verbose.
*/
static void
megaraid_mbox_display_scb(adapter_t *adapter, scb_t *scb)
{
mbox_ccb_t *ccb;
struct scsi_cmnd *scp;
mbox_t *mbox;
int level;
int i;
ccb = (mbox_ccb_t *)scb->ccb;
scp = scb->scp;
mbox = ccb->mbox;
level = CL_DLEVEL3;
con_log(level, (KERN_NOTICE
"megaraid mailbox: status:%#x cmd:%#x id:%#x ", scb->status,
mbox->cmd, scb->sno));
con_log(level, ("sec:%#x lba:%#x addr:%#x ld:%d sg:%d\n",
mbox->numsectors, mbox->lba, mbox->xferaddr, mbox->logdrv,
mbox->numsge));
if (!scp) return;
con_log(level, (KERN_NOTICE "scsi cmnd: "));
for (i = 0; i < scp->cmd_len; i++) {
con_log(level, ("%#2.02x ", scp->cmnd[i]));
}
con_log(level, ("\n"));
return;
}
/**
* megaraid_mbox_setup_device_map - manage device ids
* @adapter : Driver's soft state
*
* Manange the device ids to have an appropraite mapping between the kernel
* scsi addresses and megaraid scsi and logical drive addresses. We export
* scsi devices on their actual addresses, whereas the logical drives are
* exported on a virtual scsi channel.
*/
static void
megaraid_mbox_setup_device_map(adapter_t *adapter)
{
uint8_t c;
uint8_t t;
/*
* First fill the values on the logical drive channel
*/
for (t = 0; t < LSI_MAX_LOGICAL_DRIVES_64LD; t++)
adapter->device_ids[adapter->max_channel][t] =
(t < adapter->init_id) ? t : t - 1;
adapter->device_ids[adapter->max_channel][adapter->init_id] = 0xFF;
/*
* Fill the values on the physical devices channels
*/
for (c = 0; c < adapter->max_channel; c++)
for (t = 0; t < LSI_MAX_LOGICAL_DRIVES_64LD; t++)
adapter->device_ids[c][t] = (c << 8) | t;
}
/*
* END: internal commands library
*/
/*
* START: Interface for the common management module
*
* This is the module, which interfaces with the common management module to
* provide support for ioctl and sysfs
*/
/**
* megaraid_cmm_register - register with the management module
* @adapter : HBA soft state
*
* Register with the management module, which allows applications to issue
* ioctl calls to the drivers. This interface is used by the management module
* to setup sysfs support as well.
*/
static int
megaraid_cmm_register(adapter_t *adapter)
{
mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
mraid_mmadp_t adp;
scb_t *scb;
mbox_ccb_t *ccb;
int rval;
int i;
// Allocate memory for the base list of scb for management module.
adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
if (adapter->uscb_list == NULL) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: out of memory, %s %d\n", __func__,
__LINE__));
return -1;
}
// Initialize the synchronization parameters for resources for
// commands for management module
INIT_LIST_HEAD(&adapter->uscb_pool);
spin_lock_init(USER_FREE_LIST_LOCK(adapter));
// link all the packets. Note, CCB for commands, coming from the
// commom management module, mailbox physical address are already
// setup by it. We just need placeholder for that in our local command
// control blocks
for (i = 0; i < MBOX_MAX_USER_CMDS; i++) {
scb = adapter->uscb_list + i;
ccb = raid_dev->uccb_list + i;
scb->ccb = (caddr_t)ccb;
ccb->mbox64 = raid_dev->umbox64 + i;
ccb->mbox = &ccb->mbox64->mbox32;
ccb->raw_mbox = (uint8_t *)ccb->mbox;
scb->gp = 0;
// COMMAND ID 0 - (MBOX_MAX_SCSI_CMDS-1) ARE RESERVED FOR
// COMMANDS COMING FROM IO SUBSYSTEM (MID-LAYER)
scb->sno = i + MBOX_MAX_SCSI_CMDS;
scb->scp = NULL;
scb->state = SCB_FREE;
scb->dma_direction = PCI_DMA_NONE;
scb->dma_type = MRAID_DMA_NONE;
scb->dev_channel = -1;
scb->dev_target = -1;
// put scb in the free pool
list_add_tail(&scb->list, &adapter->uscb_pool);
}
adp.unique_id = adapter->unique_id;
adp.drvr_type = DRVRTYPE_MBOX;
adp.drvr_data = (unsigned long)adapter;
adp.pdev = adapter->pdev;
adp.issue_uioc = megaraid_mbox_mm_handler;
adp.timeout = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT;
adp.max_kioc = MBOX_MAX_USER_CMDS;
if ((rval = mraid_mm_register_adp(&adp)) != 0) {
con_log(CL_ANN, (KERN_WARNING
"megaraid mbox: did not register with CMM\n"));
kfree(adapter->uscb_list);
}
return rval;
}
/**
* megaraid_cmm_unregister - un-register with the management module
* @adapter : HBA soft state
*
* Un-register with the management module.
* FIXME: mgmt module must return failure for unregister if it has pending
* commands in LLD.
*/
static int
megaraid_cmm_unregister(adapter_t *adapter)
{
kfree(adapter->uscb_list);
mraid_mm_unregister_adp(adapter->unique_id);
return 0;
}
/**
* megaraid_mbox_mm_handler - interface for CMM to issue commands to LLD
* @drvr_data : LLD specific data
* @kioc : CMM interface packet
* @action : command action
*
* This routine is invoked whenever the Common Management Module (CMM) has a
* command for us. The 'action' parameter specifies if this is a new command
* or otherwise.
*/
static int
megaraid_mbox_mm_handler(unsigned long drvr_data, uioc_t *kioc, uint32_t action)
{
adapter_t *adapter;
if (action != IOCTL_ISSUE) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: unsupported management action:%#2x\n",
action));
return (-ENOTSUPP);
}
adapter = (adapter_t *)drvr_data;
// make sure this adapter is not being detached right now.
if (atomic_read(&adapter->being_detached)) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: reject management request, detaching\n"));
return (-ENODEV);
}
switch (kioc->opcode) {
case GET_ADAP_INFO:
kioc->status = gather_hbainfo(adapter, (mraid_hba_info_t *)
(unsigned long)kioc->buf_vaddr);
kioc->done(kioc);
return kioc->status;
case MBOX_CMD:
return megaraid_mbox_mm_command(adapter, kioc);
default:
kioc->status = (-EINVAL);
kioc->done(kioc);
return (-EINVAL);
}
return 0; // not reached
}
/**
* megaraid_mbox_mm_command - issues commands routed through CMM
* @adapter : HBA soft state
* @kioc : management command packet
*
* Issues commands, which are routed through the management module.
*/
static int
megaraid_mbox_mm_command(adapter_t *adapter, uioc_t *kioc)
{
struct list_head *head = &adapter->uscb_pool;
mbox64_t *mbox64;
uint8_t *raw_mbox;
scb_t *scb;
mbox_ccb_t *ccb;
unsigned long flags;
// detach one scb from free pool
spin_lock_irqsave(USER_FREE_LIST_LOCK(adapter), flags);
if (list_empty(head)) { // should never happen because of CMM
con_log(CL_ANN, (KERN_WARNING
"megaraid mbox: bug in cmm handler, lost resources\n"));
spin_unlock_irqrestore(USER_FREE_LIST_LOCK(adapter), flags);
return (-EINVAL);
}
scb = list_entry(head->next, scb_t, list);
list_del_init(&scb->list);
spin_unlock_irqrestore(USER_FREE_LIST_LOCK(adapter), flags);
scb->state = SCB_ACTIVE;
scb->dma_type = MRAID_DMA_NONE;
scb->dma_direction = PCI_DMA_NONE;
ccb = (mbox_ccb_t *)scb->ccb;
mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf;
raw_mbox = (uint8_t *)&mbox64->mbox32;
memcpy(ccb->mbox64, mbox64, sizeof(mbox64_t));
scb->gp = (unsigned long)kioc;
/*
* If it is a logdrv random delete operation, we have to wait till
* there are no outstanding cmds at the fw and then issue it directly
*/
if (raw_mbox[0] == FC_DEL_LOGDRV && raw_mbox[2] == OP_DEL_LOGDRV) {
if (wait_till_fw_empty(adapter)) {
con_log(CL_ANN, (KERN_NOTICE
"megaraid mbox: LD delete, timed out\n"));
kioc->status = -ETIME;
scb->status = -1;
megaraid_mbox_mm_done(adapter, scb);
return (-ETIME);
}
INIT_LIST_HEAD(&scb->list);
scb->state = SCB_ISSUED;
if (mbox_post_cmd(adapter, scb) != 0) {
con_log(CL_ANN, (KERN_NOTICE
"megaraid mbox: LD delete, mailbox busy\n"));
kioc->status = -EBUSY;
scb->status = -1;
megaraid_mbox_mm_done(adapter, scb);
return (-EBUSY);
}
return 0;
}
// put the command on the pending list and execute
megaraid_mbox_runpendq(adapter, scb);
return 0;
}
static int
wait_till_fw_empty(adapter_t *adapter)
{
unsigned long flags = 0;
int i;
/*
* Set the quiescent flag to stop issuing cmds to FW.
*/
spin_lock_irqsave(&adapter->lock, flags);
adapter->quiescent++;
spin_unlock_irqrestore(&adapter->lock, flags);
/*
* Wait till there are no more cmds outstanding at FW. Try for at most
* 60 seconds
*/
for (i = 0; i < 60 && adapter->outstanding_cmds; i++) {
con_log(CL_DLEVEL1, (KERN_INFO
"megaraid: FW has %d pending commands\n",
adapter->outstanding_cmds));
msleep(1000);
}
return adapter->outstanding_cmds;
}
/**
* megaraid_mbox_mm_done - callback for CMM commands
* @adapter : HBA soft state
* @scb : completed command
*
* Callback routine for internal commands originated from the management
* module.
*/
static void
megaraid_mbox_mm_done(adapter_t *adapter, scb_t *scb)
{
uioc_t *kioc;
mbox64_t *mbox64;
uint8_t *raw_mbox;
unsigned long flags;
kioc = (uioc_t *)scb->gp;
mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf;
mbox64->mbox32.status = scb->status;
raw_mbox = (uint8_t *)&mbox64->mbox32;
// put scb in the free pool
scb->state = SCB_FREE;
scb->scp = NULL;
spin_lock_irqsave(USER_FREE_LIST_LOCK(adapter), flags);
list_add(&scb->list, &adapter->uscb_pool);
spin_unlock_irqrestore(USER_FREE_LIST_LOCK(adapter), flags);
// if a delete logical drive operation succeeded, restart the
// controller
if (raw_mbox[0] == FC_DEL_LOGDRV && raw_mbox[2] == OP_DEL_LOGDRV) {
adapter->quiescent--;
megaraid_mbox_runpendq(adapter, NULL);
}
kioc->done(kioc);
return;
}
/**
* gather_hbainfo - HBA characteristics for the applications
* @adapter : HBA soft state
* @hinfo : pointer to the caller's host info strucuture
*/
static int
gather_hbainfo(adapter_t *adapter, mraid_hba_info_t *hinfo)
{
uint8_t dmajor;
dmajor = megaraid_mbox_version[0];
hinfo->pci_vendor_id = adapter->pdev->vendor;
hinfo->pci_device_id = adapter->pdev->device;
hinfo->subsys_vendor_id = adapter->pdev->subsystem_vendor;
hinfo->subsys_device_id = adapter->pdev->subsystem_device;
hinfo->pci_bus = adapter->pdev->bus->number;
hinfo->pci_dev_fn = adapter->pdev->devfn;
hinfo->pci_slot = PCI_SLOT(adapter->pdev->devfn);
hinfo->irq = adapter->host->irq;
hinfo->baseport = ADAP2RAIDDEV(adapter)->baseport;
hinfo->unique_id = (hinfo->pci_bus << 8) | adapter->pdev->devfn;
hinfo->host_no = adapter->host->host_no;
return 0;
}
/*
* END: Interface for the common management module
*/
/**
* megaraid_sysfs_alloc_resources - allocate sysfs related resources
* @adapter : controller's soft state
*
* Allocate packets required to issue FW calls whenever the sysfs attributes
* are read. These attributes would require up-to-date information from the
* FW. Also set up resources for mutual exclusion to share these resources and
* the wait queue.
*
* Return 0 on success.
* Return -ERROR_CODE on failure.
*/
static int
megaraid_sysfs_alloc_resources(adapter_t *adapter)
{
mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
int rval = 0;
raid_dev->sysfs_uioc = kmalloc(sizeof(uioc_t), GFP_KERNEL);
raid_dev->sysfs_mbox64 = kmalloc(sizeof(mbox64_t), GFP_KERNEL);
raid_dev->sysfs_buffer = pci_alloc_consistent(adapter->pdev,
PAGE_SIZE, &raid_dev->sysfs_buffer_dma);
if (!raid_dev->sysfs_uioc || !raid_dev->sysfs_mbox64 ||
!raid_dev->sysfs_buffer) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: out of memory, %s %d\n", __func__,
__LINE__));
rval = -ENOMEM;
megaraid_sysfs_free_resources(adapter);
}
mutex_init(&raid_dev->sysfs_mtx);
init_waitqueue_head(&raid_dev->sysfs_wait_q);
return rval;
}
/**
* megaraid_sysfs_free_resources - free sysfs related resources
* @adapter : controller's soft state
*
* Free packets allocated for sysfs FW commands
*/
static void
megaraid_sysfs_free_resources(adapter_t *adapter)
{
mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
kfree(raid_dev->sysfs_uioc);
kfree(raid_dev->sysfs_mbox64);
if (raid_dev->sysfs_buffer) {
pci_free_consistent(adapter->pdev, PAGE_SIZE,
raid_dev->sysfs_buffer, raid_dev->sysfs_buffer_dma);
}
}
/**
* megaraid_sysfs_get_ldmap_done - callback for get ldmap
* @uioc : completed packet
*
* Callback routine called in the ISR/tasklet context for get ldmap call
*/
static void
megaraid_sysfs_get_ldmap_done(uioc_t *uioc)
{
adapter_t *adapter = (adapter_t *)uioc->buf_vaddr;
mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
uioc->status = 0;
wake_up(&raid_dev->sysfs_wait_q);
}
/**
* megaraid_sysfs_get_ldmap_timeout - timeout handling for get ldmap
* @data : timed out packet
*
* Timeout routine to recover and return to application, in case the adapter
* has stopped responding. A timeout of 60 seconds for this command seems like
* a good value.
*/
static void
megaraid_sysfs_get_ldmap_timeout(unsigned long data)
{
uioc_t *uioc = (uioc_t *)data;
adapter_t *adapter = (adapter_t *)uioc->buf_vaddr;
mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
uioc->status = -ETIME;
wake_up(&raid_dev->sysfs_wait_q);
}
/**
* megaraid_sysfs_get_ldmap - get update logical drive map
* @adapter : controller's soft state
*
* This routine will be called whenever user reads the logical drive
* attributes, go get the current logical drive mapping table from the
* firmware. We use the management API's to issue commands to the controller.
*
* NOTE: The commands issuance functionality is not generalized and
* implemented in context of "get ld map" command only. If required, the
* command issuance logical can be trivially pulled out and implemented as a
* standalone libary. For now, this should suffice since there is no other
* user of this interface.
*
* Return 0 on success.
* Return -1 on failure.
*/
static int
megaraid_sysfs_get_ldmap(adapter_t *adapter)
{
mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
uioc_t *uioc;
mbox64_t *mbox64;
mbox_t *mbox;
char *raw_mbox;
struct timer_list sysfs_timer;
struct timer_list *timerp;
caddr_t ldmap;
int rval = 0;
/*
* Allow only one read at a time to go through the sysfs attributes
*/
mutex_lock(&raid_dev->sysfs_mtx);
uioc = raid_dev->sysfs_uioc;
mbox64 = raid_dev->sysfs_mbox64;
ldmap = raid_dev->sysfs_buffer;
memset(uioc, 0, sizeof(uioc_t));
memset(mbox64, 0, sizeof(mbox64_t));
memset(ldmap, 0, sizeof(raid_dev->curr_ldmap));
mbox = &mbox64->mbox32;
raw_mbox = (char *)mbox;
uioc->cmdbuf = (uint64_t)(unsigned long)mbox64;
uioc->buf_vaddr = (caddr_t)adapter;
uioc->status = -ENODATA;
uioc->done = megaraid_sysfs_get_ldmap_done;
/*
* Prepare the mailbox packet to get the current logical drive mapping
* table
*/
mbox->xferaddr = (uint32_t)raid_dev->sysfs_buffer_dma;
raw_mbox[0] = FC_DEL_LOGDRV;
raw_mbox[2] = OP_GET_LDID_MAP;
/*
* Setup a timer to recover from a non-responding controller
*/
timerp = &sysfs_timer;
init_timer(timerp);
timerp->function = megaraid_sysfs_get_ldmap_timeout;
timerp->data = (unsigned long)uioc;
timerp->expires = jiffies + 60 * HZ;
add_timer(timerp);
/*
* Send the command to the firmware
*/
rval = megaraid_mbox_mm_command(adapter, uioc);
if (rval == 0) { // command successfully issued
wait_event(raid_dev->sysfs_wait_q, (uioc->status != -ENODATA));
/*
* Check if the command timed out
*/
if (uioc->status == -ETIME) {
con_log(CL_ANN, (KERN_NOTICE
"megaraid: sysfs get ld map timed out\n"));
rval = -ETIME;
}
else {
rval = mbox->status;
}
if (rval == 0) {
memcpy(raid_dev->curr_ldmap, ldmap,
sizeof(raid_dev->curr_ldmap));
}
else {
con_log(CL_ANN, (KERN_NOTICE
"megaraid: get ld map failed with %x\n", rval));
}
}
else {
con_log(CL_ANN, (KERN_NOTICE
"megaraid: could not issue ldmap command:%x\n", rval));
}
del_timer_sync(timerp);
mutex_unlock(&raid_dev->sysfs_mtx);
return rval;
}
/**
* megaraid_sysfs_show_app_hndl - display application handle for this adapter
* @cdev : class device object representation for the host
* @buf : buffer to send data to
*
* Display the handle used by the applications while executing management
* tasks on the adapter. We invoke a management module API to get the adapter
* handle, since we do not interface with applications directly.
*/
static ssize_t
megaraid_sysfs_show_app_hndl(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
adapter_t *adapter = (adapter_t *)SCSIHOST2ADAP(shost);
uint32_t app_hndl;
app_hndl = mraid_mm_adapter_app_handle(adapter->unique_id);
return snprintf(buf, 8, "%u\n", app_hndl);
}
/**
* megaraid_sysfs_show_ldnum - display the logical drive number for this device
* @dev : device object representation for the scsi device
* @attr : device attribute to show
* @buf : buffer to send data to
*
* Display the logical drive number for the device in question, if it a valid
* logical drive. For physical devices, "-1" is returned.
*
* The logical drive number is displayed in following format:
*
* <SCSI ID> <LD NUM> <LD STICKY ID> <APP ADAPTER HANDLE>
*
* <int> <int> <int> <int>
*/
static ssize_t
megaraid_sysfs_show_ldnum(struct device *dev, struct device_attribute *attr, char *buf)
{
struct scsi_device *sdev = to_scsi_device(dev);
adapter_t *adapter = (adapter_t *)SCSIHOST2ADAP(sdev->host);
mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
int scsi_id = -1;
int logical_drv = -1;
int ldid_map = -1;
uint32_t app_hndl = 0;
int mapped_sdev_id;
int rval;
int i;
if (raid_dev->random_del_supported &&
MRAID_IS_LOGICAL_SDEV(adapter, sdev)) {
rval = megaraid_sysfs_get_ldmap(adapter);
if (rval == 0) {
for (i = 0; i < MAX_LOGICAL_DRIVES_40LD; i++) {
mapped_sdev_id = sdev->id;
if (sdev->id > adapter->init_id) {
mapped_sdev_id -= 1;
}
if (raid_dev->curr_ldmap[i] == mapped_sdev_id) {
scsi_id = sdev->id;
logical_drv = i;
ldid_map = raid_dev->curr_ldmap[i];
app_hndl = mraid_mm_adapter_app_handle(
adapter->unique_id);
break;
}
}
}
else {
con_log(CL_ANN, (KERN_NOTICE
"megaraid: sysfs get ld map failed: %x\n",
rval));
}
}
return snprintf(buf, 36, "%d %d %d %d\n", scsi_id, logical_drv,
ldid_map, app_hndl);
}
/*
* END: Mailbox Low Level Driver
*/
module_init(megaraid_init);
module_exit(megaraid_exit);
/* vim: set ts=8 sw=8 tw=78 ai si: */
| gpl-2.0 |
Altaf-Mahdi/i9100 | drivers/media/rc/keymaps/rc-pctv-sedna.c | 2944 | 2110 | /* pctv-sedna.h - Keytable for pctv_sedna Remote Controller
*
* keymap imported from ir-keymaps.c
*
* Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <media/rc-map.h>
/* Mapping for the 28 key remote control as seen at
http://www.sednacomputer.com/photo/cardbus-tv.jpg
Pavel Mihaylov <bin@bash.info>
Also for the remote bundled with Kozumi KTV-01C card */
static struct rc_map_table pctv_sedna[] = {
{ 0x00, KEY_0 },
{ 0x01, KEY_1 },
{ 0x02, KEY_2 },
{ 0x03, KEY_3 },
{ 0x04, KEY_4 },
{ 0x05, KEY_5 },
{ 0x06, KEY_6 },
{ 0x07, KEY_7 },
{ 0x08, KEY_8 },
{ 0x09, KEY_9 },
{ 0x0a, KEY_AGAIN }, /* Recall */
{ 0x0b, KEY_CHANNELUP },
{ 0x0c, KEY_VOLUMEUP },
{ 0x0d, KEY_MODE }, /* Stereo */
{ 0x0e, KEY_STOP },
{ 0x0f, KEY_PREVIOUSSONG },
{ 0x10, KEY_ZOOM },
{ 0x11, KEY_VIDEO }, /* Source */
{ 0x12, KEY_POWER },
{ 0x13, KEY_MUTE },
{ 0x15, KEY_CHANNELDOWN },
{ 0x18, KEY_VOLUMEDOWN },
{ 0x19, KEY_CAMERA }, /* Snapshot */
{ 0x1a, KEY_NEXTSONG },
{ 0x1b, KEY_TIME }, /* Time Shift */
{ 0x1c, KEY_RADIO }, /* FM Radio */
{ 0x1d, KEY_RECORD },
{ 0x1e, KEY_PAUSE },
/* additional codes for Kozumi's remote */
{ 0x14, KEY_INFO }, /* OSD */
{ 0x16, KEY_OK }, /* OK */
{ 0x17, KEY_DIGITS }, /* Plus */
{ 0x1f, KEY_PLAY }, /* Play */
};
static struct rc_map_list pctv_sedna_map = {
.map = {
.scan = pctv_sedna,
.size = ARRAY_SIZE(pctv_sedna),
.rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */
.name = RC_MAP_PCTV_SEDNA,
}
};
static int __init init_rc_map_pctv_sedna(void)
{
return rc_map_register(&pctv_sedna_map);
}
static void __exit exit_rc_map_pctv_sedna(void)
{
rc_map_unregister(&pctv_sedna_map);
}
module_init(init_rc_map_pctv_sedna)
module_exit(exit_rc_map_pctv_sedna)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
| gpl-2.0 |
TeamJB/kernel_htc_m7 | arch/arm/mach-omap2/board-flash.c | 4736 | 6212 | /*
* board-flash.c
* Modified from mach-omap2/board-3430sdp-flash.c
*
* Copyright (C) 2009 Nokia Corporation
* Copyright (C) 2009 Texas Instruments
*
* Vimal Singh <vimalsingh@ti.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/mtd/physmap.h>
#include <linux/io.h>
#include <plat/irqs.h>
#include <plat/gpmc.h>
#include <plat/nand.h>
#include <plat/onenand.h>
#include <plat/tc.h>
#include "board-flash.h"
#define REG_FPGA_REV 0x10
#define REG_FPGA_DIP_SWITCH_INPUT2 0x60
#define MAX_SUPPORTED_GPMC_CONFIG 3
#define DEBUG_BASE 0x08000000 /* debug board */
/* various memory sizes */
#define FLASH_SIZE_SDPV1 SZ_64M /* NOR flash (64 Meg aligned) */
#define FLASH_SIZE_SDPV2 SZ_128M /* NOR flash (256 Meg aligned) */
static struct physmap_flash_data board_nor_data = {
.width = 2,
};
static struct resource board_nor_resource = {
.flags = IORESOURCE_MEM,
};
static struct platform_device board_nor_device = {
.name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = &board_nor_data,
},
.num_resources = 1,
.resource = &board_nor_resource,
};
static void
__init board_nor_init(struct mtd_partition *nor_parts, u8 nr_parts, u8 cs)
{
int err;
board_nor_data.parts = nor_parts;
board_nor_data.nr_parts = nr_parts;
/* Configure start address and size of NOR device */
if (omap_rev() >= OMAP3430_REV_ES1_0) {
err = gpmc_cs_request(cs, FLASH_SIZE_SDPV2 - 1,
(unsigned long *)&board_nor_resource.start);
board_nor_resource.end = board_nor_resource.start
+ FLASH_SIZE_SDPV2 - 1;
} else {
err = gpmc_cs_request(cs, FLASH_SIZE_SDPV1 - 1,
(unsigned long *)&board_nor_resource.start);
board_nor_resource.end = board_nor_resource.start
+ FLASH_SIZE_SDPV1 - 1;
}
if (err < 0) {
pr_err("NOR: Can't request GPMC CS\n");
return;
}
if (platform_device_register(&board_nor_device) < 0)
pr_err("Unable to register NOR device\n");
}
#if defined(CONFIG_MTD_ONENAND_OMAP2) || \
defined(CONFIG_MTD_ONENAND_OMAP2_MODULE)
static struct omap_onenand_platform_data board_onenand_data = {
.dma_channel = -1, /* disable DMA in OMAP OneNAND driver */
};
static void
__init board_onenand_init(struct mtd_partition *onenand_parts,
u8 nr_parts, u8 cs)
{
board_onenand_data.cs = cs;
board_onenand_data.parts = onenand_parts;
board_onenand_data.nr_parts = nr_parts;
gpmc_onenand_init(&board_onenand_data);
}
#else
static void
__init board_onenand_init(struct mtd_partition *nor_parts, u8 nr_parts, u8 cs)
{
}
#endif /* CONFIG_MTD_ONENAND_OMAP2 || CONFIG_MTD_ONENAND_OMAP2_MODULE */
#if defined(CONFIG_MTD_NAND_OMAP2) || \
defined(CONFIG_MTD_NAND_OMAP2_MODULE)
/* Note that all values in this struct are in nanoseconds */
static struct gpmc_timings nand_timings = {
.sync_clk = 0,
.cs_on = 0,
.cs_rd_off = 36,
.cs_wr_off = 36,
.adv_on = 6,
.adv_rd_off = 24,
.adv_wr_off = 36,
.we_off = 30,
.oe_off = 48,
.access = 54,
.rd_cycle = 72,
.wr_cycle = 72,
.wr_access = 30,
.wr_data_mux_bus = 0,
};
static struct omap_nand_platform_data board_nand_data = {
.gpmc_t = &nand_timings,
};
void
__init board_nand_init(struct mtd_partition *nand_parts,
u8 nr_parts, u8 cs, int nand_type)
{
board_nand_data.cs = cs;
board_nand_data.parts = nand_parts;
board_nand_data.nr_parts = nr_parts;
board_nand_data.devsize = nand_type;
board_nand_data.ecc_opt = OMAP_ECC_HAMMING_CODE_DEFAULT;
board_nand_data.gpmc_irq = OMAP_GPMC_IRQ_BASE + cs;
gpmc_nand_init(&board_nand_data);
}
#endif /* CONFIG_MTD_NAND_OMAP2 || CONFIG_MTD_NAND_OMAP2_MODULE */
/**
* get_gpmc0_type - Reads the FPGA DIP_SWITCH_INPUT_REGISTER2 to get
* the various cs values.
*/
static u8 get_gpmc0_type(void)
{
u8 cs = 0;
void __iomem *fpga_map_addr;
fpga_map_addr = ioremap(DEBUG_BASE, 4096);
if (!fpga_map_addr)
return -ENOMEM;
if (!(__raw_readw(fpga_map_addr + REG_FPGA_REV)))
/* we dont have an DEBUG FPGA??? */
/* Depend on #defines!! default to strata boot return param */
goto unmap;
/* S8-DIP-OFF = 1, S8-DIP-ON = 0 */
cs = __raw_readw(fpga_map_addr + REG_FPGA_DIP_SWITCH_INPUT2) & 0xf;
/* ES2.0 SDP's onwards 4 dip switches are provided for CS */
if (omap_rev() >= OMAP3430_REV_ES1_0)
/* change (S8-1:4=DS-2:0) to (S8-4:1=DS-2:0) */
cs = ((cs & 8) >> 3) | ((cs & 4) >> 1) |
((cs & 2) << 1) | ((cs & 1) << 3);
else
/* change (S8-1:3=DS-2:0) to (S8-3:1=DS-2:0) */
cs = ((cs & 4) >> 2) | (cs & 2) | ((cs & 1) << 2);
unmap:
iounmap(fpga_map_addr);
return cs;
}
/**
* board_flash_init - Identify devices connected to GPMC and register.
*
* @return - void.
*/
void __init board_flash_init(struct flash_partitions partition_info[],
char chip_sel_board[][GPMC_CS_NUM], int nand_type)
{
u8 cs = 0;
u8 norcs = GPMC_CS_NUM + 1;
u8 nandcs = GPMC_CS_NUM + 1;
u8 onenandcs = GPMC_CS_NUM + 1;
u8 idx;
unsigned char *config_sel = NULL;
/* REVISIT: Is this return correct idx for 2430 SDP?
* for which cs configuration matches for 2430 SDP?
*/
idx = get_gpmc0_type();
if (idx >= MAX_SUPPORTED_GPMC_CONFIG) {
pr_err("%s: Invalid chip select: %d\n", __func__, cs);
return;
}
config_sel = (unsigned char *)(chip_sel_board[idx]);
while (cs < GPMC_CS_NUM) {
switch (config_sel[cs]) {
case PDC_NOR:
if (norcs > GPMC_CS_NUM)
norcs = cs;
break;
case PDC_NAND:
if (nandcs > GPMC_CS_NUM)
nandcs = cs;
break;
case PDC_ONENAND:
if (onenandcs > GPMC_CS_NUM)
onenandcs = cs;
break;
};
cs++;
}
if (norcs > GPMC_CS_NUM)
pr_err("NOR: Unable to find configuration in GPMC\n");
else
board_nor_init(partition_info[0].parts,
partition_info[0].nr_parts, norcs);
if (onenandcs > GPMC_CS_NUM)
pr_err("OneNAND: Unable to find configuration in GPMC\n");
else
board_onenand_init(partition_info[1].parts,
partition_info[1].nr_parts, onenandcs);
if (nandcs > GPMC_CS_NUM)
pr_err("NAND: Unable to find configuration in GPMC\n");
else
board_nand_init(partition_info[2].parts,
partition_info[2].nr_parts, nandcs, nand_type);
}
| gpl-2.0 |
friedrich420/S4-AEL-Kernel--early-versions- | arch/arm/mach-imx/mach-pcm043.c | 4736 | 10521 | /*
* Copyright (C) 2009 Sascha Hauer, Pengutronix
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/types.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/mtd/physmap.h>
#include <linux/mtd/plat-ram.h>
#include <linux/memory.h>
#include <linux/gpio.h>
#include <linux/smc911x.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/i2c/at24.h>
#include <linux/usb/otg.h>
#include <linux/usb/ulpi.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
#include <asm/mach/map.h>
#include <mach/hardware.h>
#include <mach/common.h>
#include <mach/iomux-mx35.h>
#include <mach/ulpi.h>
#include "devices-imx35.h"
static const struct fb_videomode fb_modedb[] = {
{
/* 240x320 @ 60 Hz */
.name = "Sharp-LQ035Q7",
.refresh = 60,
.xres = 240,
.yres = 320,
.pixclock = 185925,
.left_margin = 9,
.right_margin = 16,
.upper_margin = 7,
.lower_margin = 9,
.hsync_len = 1,
.vsync_len = 1,
.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_SHARP_MODE | FB_SYNC_CLK_INVERT | FB_SYNC_CLK_IDLE_EN,
.vmode = FB_VMODE_NONINTERLACED,
.flag = 0,
}, {
/* 240x320 @ 60 Hz */
.name = "TX090",
.refresh = 60,
.xres = 240,
.yres = 320,
.pixclock = 38255,
.left_margin = 144,
.right_margin = 0,
.upper_margin = 7,
.lower_margin = 40,
.hsync_len = 96,
.vsync_len = 1,
.sync = FB_SYNC_VERT_HIGH_ACT | FB_SYNC_OE_ACT_HIGH,
.vmode = FB_VMODE_NONINTERLACED,
.flag = 0,
},
};
static const struct ipu_platform_data mx3_ipu_data __initconst = {
.irq_base = MXC_IPU_IRQ_START,
};
static struct mx3fb_platform_data mx3fb_pdata __initdata = {
.name = "Sharp-LQ035Q7",
.mode = fb_modedb,
.num_modes = ARRAY_SIZE(fb_modedb),
};
static struct physmap_flash_data pcm043_flash_data = {
.width = 2,
};
static struct resource pcm043_flash_resource = {
.start = 0xa0000000,
.end = 0xa1ffffff,
.flags = IORESOURCE_MEM,
};
static struct platform_device pcm043_flash = {
.name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = &pcm043_flash_data,
},
.resource = &pcm043_flash_resource,
.num_resources = 1,
};
static const struct imxuart_platform_data uart_pdata __initconst = {
.flags = IMXUART_HAVE_RTSCTS,
};
static const struct imxi2c_platform_data pcm043_i2c0_data __initconst = {
.bitrate = 50000,
};
static struct at24_platform_data board_eeprom = {
.byte_len = 4096,
.page_size = 32,
.flags = AT24_FLAG_ADDR16,
};
static struct i2c_board_info pcm043_i2c_devices[] = {
{
I2C_BOARD_INFO("at24", 0x52), /* E0=0, E1=1, E2=0 */
.platform_data = &board_eeprom,
}, {
I2C_BOARD_INFO("pcf8563", 0x51),
},
};
static struct platform_device *devices[] __initdata = {
&pcm043_flash,
};
static iomux_v3_cfg_t pcm043_pads[] = {
/* UART1 */
MX35_PAD_CTS1__UART1_CTS,
MX35_PAD_RTS1__UART1_RTS,
MX35_PAD_TXD1__UART1_TXD_MUX,
MX35_PAD_RXD1__UART1_RXD_MUX,
/* UART2 */
MX35_PAD_CTS2__UART2_CTS,
MX35_PAD_RTS2__UART2_RTS,
MX35_PAD_TXD2__UART2_TXD_MUX,
MX35_PAD_RXD2__UART2_RXD_MUX,
/* FEC */
MX35_PAD_FEC_TX_CLK__FEC_TX_CLK,
MX35_PAD_FEC_RX_CLK__FEC_RX_CLK,
MX35_PAD_FEC_RX_DV__FEC_RX_DV,
MX35_PAD_FEC_COL__FEC_COL,
MX35_PAD_FEC_RDATA0__FEC_RDATA_0,
MX35_PAD_FEC_TDATA0__FEC_TDATA_0,
MX35_PAD_FEC_TX_EN__FEC_TX_EN,
MX35_PAD_FEC_MDC__FEC_MDC,
MX35_PAD_FEC_MDIO__FEC_MDIO,
MX35_PAD_FEC_TX_ERR__FEC_TX_ERR,
MX35_PAD_FEC_RX_ERR__FEC_RX_ERR,
MX35_PAD_FEC_CRS__FEC_CRS,
MX35_PAD_FEC_RDATA1__FEC_RDATA_1,
MX35_PAD_FEC_TDATA1__FEC_TDATA_1,
MX35_PAD_FEC_RDATA2__FEC_RDATA_2,
MX35_PAD_FEC_TDATA2__FEC_TDATA_2,
MX35_PAD_FEC_RDATA3__FEC_RDATA_3,
MX35_PAD_FEC_TDATA3__FEC_TDATA_3,
/* I2C1 */
MX35_PAD_I2C1_CLK__I2C1_SCL,
MX35_PAD_I2C1_DAT__I2C1_SDA,
/* Display */
MX35_PAD_LD0__IPU_DISPB_DAT_0,
MX35_PAD_LD1__IPU_DISPB_DAT_1,
MX35_PAD_LD2__IPU_DISPB_DAT_2,
MX35_PAD_LD3__IPU_DISPB_DAT_3,
MX35_PAD_LD4__IPU_DISPB_DAT_4,
MX35_PAD_LD5__IPU_DISPB_DAT_5,
MX35_PAD_LD6__IPU_DISPB_DAT_6,
MX35_PAD_LD7__IPU_DISPB_DAT_7,
MX35_PAD_LD8__IPU_DISPB_DAT_8,
MX35_PAD_LD9__IPU_DISPB_DAT_9,
MX35_PAD_LD10__IPU_DISPB_DAT_10,
MX35_PAD_LD11__IPU_DISPB_DAT_11,
MX35_PAD_LD12__IPU_DISPB_DAT_12,
MX35_PAD_LD13__IPU_DISPB_DAT_13,
MX35_PAD_LD14__IPU_DISPB_DAT_14,
MX35_PAD_LD15__IPU_DISPB_DAT_15,
MX35_PAD_LD16__IPU_DISPB_DAT_16,
MX35_PAD_LD17__IPU_DISPB_DAT_17,
MX35_PAD_D3_HSYNC__IPU_DISPB_D3_HSYNC,
MX35_PAD_D3_FPSHIFT__IPU_DISPB_D3_CLK,
MX35_PAD_D3_DRDY__IPU_DISPB_D3_DRDY,
MX35_PAD_CONTRAST__IPU_DISPB_CONTR,
MX35_PAD_D3_VSYNC__IPU_DISPB_D3_VSYNC,
MX35_PAD_D3_REV__IPU_DISPB_D3_REV,
MX35_PAD_D3_CLS__IPU_DISPB_D3_CLS,
/* gpio */
MX35_PAD_ATA_CS0__GPIO2_6,
/* USB host */
MX35_PAD_I2C2_CLK__USB_TOP_USBH2_PWR,
MX35_PAD_I2C2_DAT__USB_TOP_USBH2_OC,
/* SSI */
MX35_PAD_STXFS4__AUDMUX_AUD4_TXFS,
MX35_PAD_STXD4__AUDMUX_AUD4_TXD,
MX35_PAD_SRXD4__AUDMUX_AUD4_RXD,
MX35_PAD_SCK4__AUDMUX_AUD4_TXC,
/* CAN2 */
MX35_PAD_TX5_RX0__CAN2_TXCAN,
MX35_PAD_TX4_RX1__CAN2_RXCAN,
/* esdhc */
MX35_PAD_SD1_CMD__ESDHC1_CMD,
MX35_PAD_SD1_CLK__ESDHC1_CLK,
MX35_PAD_SD1_DATA0__ESDHC1_DAT0,
MX35_PAD_SD1_DATA1__ESDHC1_DAT1,
MX35_PAD_SD1_DATA2__ESDHC1_DAT2,
MX35_PAD_SD1_DATA3__ESDHC1_DAT3,
MX35_PAD_ATA_DATA10__GPIO2_23, /* WriteProtect */
MX35_PAD_ATA_DATA11__GPIO2_24, /* CardDetect */
};
#define AC97_GPIO_TXFS IMX_GPIO_NR(2, 31)
#define AC97_GPIO_TXD IMX_GPIO_NR(2, 28)
#define AC97_GPIO_RESET IMX_GPIO_NR(2, 0)
#define SD1_GPIO_WP IMX_GPIO_NR(2, 23)
#define SD1_GPIO_CD IMX_GPIO_NR(2, 24)
static void pcm043_ac97_warm_reset(struct snd_ac97 *ac97)
{
iomux_v3_cfg_t txfs_gpio = MX35_PAD_STXFS4__GPIO2_31;
iomux_v3_cfg_t txfs = MX35_PAD_STXFS4__AUDMUX_AUD4_TXFS;
int ret;
ret = gpio_request(AC97_GPIO_TXFS, "SSI");
if (ret) {
printk("failed to get GPIO_TXFS: %d\n", ret);
return;
}
mxc_iomux_v3_setup_pad(txfs_gpio);
/* warm reset */
gpio_direction_output(AC97_GPIO_TXFS, 1);
udelay(2);
gpio_set_value(AC97_GPIO_TXFS, 0);
gpio_free(AC97_GPIO_TXFS);
mxc_iomux_v3_setup_pad(txfs);
}
static void pcm043_ac97_cold_reset(struct snd_ac97 *ac97)
{
iomux_v3_cfg_t txfs_gpio = MX35_PAD_STXFS4__GPIO2_31;
iomux_v3_cfg_t txfs = MX35_PAD_STXFS4__AUDMUX_AUD4_TXFS;
iomux_v3_cfg_t txd_gpio = MX35_PAD_STXD4__GPIO2_28;
iomux_v3_cfg_t txd = MX35_PAD_STXD4__AUDMUX_AUD4_TXD;
iomux_v3_cfg_t reset_gpio = MX35_PAD_SD2_CMD__GPIO2_0;
int ret;
ret = gpio_request(AC97_GPIO_TXFS, "SSI");
if (ret)
goto err1;
ret = gpio_request(AC97_GPIO_TXD, "SSI");
if (ret)
goto err2;
ret = gpio_request(AC97_GPIO_RESET, "SSI");
if (ret)
goto err3;
mxc_iomux_v3_setup_pad(txfs_gpio);
mxc_iomux_v3_setup_pad(txd_gpio);
mxc_iomux_v3_setup_pad(reset_gpio);
gpio_direction_output(AC97_GPIO_TXFS, 0);
gpio_direction_output(AC97_GPIO_TXD, 0);
/* cold reset */
gpio_direction_output(AC97_GPIO_RESET, 0);
udelay(10);
gpio_direction_output(AC97_GPIO_RESET, 1);
mxc_iomux_v3_setup_pad(txd);
mxc_iomux_v3_setup_pad(txfs);
gpio_free(AC97_GPIO_RESET);
err3:
gpio_free(AC97_GPIO_TXD);
err2:
gpio_free(AC97_GPIO_TXFS);
err1:
if (ret)
printk("%s failed with %d\n", __func__, ret);
mdelay(1);
}
static const struct imx_ssi_platform_data pcm043_ssi_pdata __initconst = {
.ac97_reset = pcm043_ac97_cold_reset,
.ac97_warm_reset = pcm043_ac97_warm_reset,
.flags = IMX_SSI_USE_AC97,
};
static const struct mxc_nand_platform_data
pcm037_nand_board_info __initconst = {
.width = 1,
.hw_ecc = 1,
};
static int pcm043_otg_init(struct platform_device *pdev)
{
return mx35_initialize_usb_hw(pdev->id, MXC_EHCI_INTERFACE_DIFF_UNI);
}
static struct mxc_usbh_platform_data otg_pdata __initdata = {
.init = pcm043_otg_init,
.portsc = MXC_EHCI_MODE_UTMI,
};
static int pcm043_usbh1_init(struct platform_device *pdev)
{
return mx35_initialize_usb_hw(pdev->id, MXC_EHCI_INTERFACE_SINGLE_UNI |
MXC_EHCI_INTERNAL_PHY | MXC_EHCI_IPPUE_DOWN);
}
static const struct mxc_usbh_platform_data usbh1_pdata __initconst = {
.init = pcm043_usbh1_init,
.portsc = MXC_EHCI_MODE_SERIAL,
};
static const struct fsl_usb2_platform_data otg_device_pdata __initconst = {
.operating_mode = FSL_USB2_DR_DEVICE,
.phy_mode = FSL_USB2_PHY_UTMI,
};
static int otg_mode_host;
static int __init pcm043_otg_mode(char *options)
{
if (!strcmp(options, "host"))
otg_mode_host = 1;
else if (!strcmp(options, "device"))
otg_mode_host = 0;
else
pr_info("otg_mode neither \"host\" nor \"device\". "
"Defaulting to device\n");
return 0;
}
__setup("otg_mode=", pcm043_otg_mode);
static struct esdhc_platform_data sd1_pdata = {
.wp_gpio = SD1_GPIO_WP,
.cd_gpio = SD1_GPIO_CD,
.wp_type = ESDHC_WP_GPIO,
.cd_type = ESDHC_CD_GPIO,
};
/*
* Board specific initialization.
*/
static void __init pcm043_init(void)
{
imx35_soc_init();
mxc_iomux_v3_setup_multiple_pads(pcm043_pads, ARRAY_SIZE(pcm043_pads));
imx35_add_fec(NULL);
platform_add_devices(devices, ARRAY_SIZE(devices));
imx35_add_imx2_wdt(NULL);
imx35_add_imx_uart0(&uart_pdata);
imx35_add_mxc_nand(&pcm037_nand_board_info);
imx35_add_imx_ssi(0, &pcm043_ssi_pdata);
imx35_add_imx_uart1(&uart_pdata);
i2c_register_board_info(0, pcm043_i2c_devices,
ARRAY_SIZE(pcm043_i2c_devices));
imx35_add_imx_i2c0(&pcm043_i2c0_data);
imx35_add_ipu_core(&mx3_ipu_data);
imx35_add_mx3_sdc_fb(&mx3fb_pdata);
if (otg_mode_host) {
otg_pdata.otg = imx_otg_ulpi_create(ULPI_OTG_DRVVBUS |
ULPI_OTG_DRVVBUS_EXT);
if (otg_pdata.otg)
imx35_add_mxc_ehci_otg(&otg_pdata);
}
imx35_add_mxc_ehci_hs(&usbh1_pdata);
if (!otg_mode_host)
imx35_add_fsl_usb2_udc(&otg_device_pdata);
imx35_add_flexcan1(NULL);
imx35_add_sdhci_esdhc_imx(0, &sd1_pdata);
}
static void __init pcm043_timer_init(void)
{
mx35_clocks_init();
}
struct sys_timer pcm043_timer = {
.init = pcm043_timer_init,
};
MACHINE_START(PCM043, "Phytec Phycore pcm043")
/* Maintainer: Pengutronix */
.atag_offset = 0x100,
.map_io = mx35_map_io,
.init_early = imx35_init_early,
.init_irq = mx35_init_irq,
.handle_irq = imx35_handle_irq,
.timer = &pcm043_timer,
.init_machine = pcm043_init,
.restart = mxc_restart,
MACHINE_END
| gpl-2.0 |
realthunder/a33_linux | drivers/input/touchscreen/88pm860x-ts.c | 4992 | 6012 | /*
* Touchscreen driver for Marvell 88PM860x
*
* Copyright (C) 2009 Marvell International Ltd.
* Haojian Zhuang <haojian.zhuang@marvell.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/mfd/88pm860x.h>
#include <linux/slab.h>
#define MEAS_LEN (8)
#define ACCURATE_BIT (12)
/* touch register */
#define MEAS_EN3 (0x52)
#define MEAS_TSIX_1 (0x8D)
#define MEAS_TSIX_2 (0x8E)
#define MEAS_TSIY_1 (0x8F)
#define MEAS_TSIY_2 (0x90)
#define MEAS_TSIZ1_1 (0x91)
#define MEAS_TSIZ1_2 (0x92)
#define MEAS_TSIZ2_1 (0x93)
#define MEAS_TSIZ2_2 (0x94)
/* bit definitions of touch */
#define MEAS_PD_EN (1 << 3)
#define MEAS_TSIX_EN (1 << 4)
#define MEAS_TSIY_EN (1 << 5)
#define MEAS_TSIZ1_EN (1 << 6)
#define MEAS_TSIZ2_EN (1 << 7)
struct pm860x_touch {
struct input_dev *idev;
struct i2c_client *i2c;
struct pm860x_chip *chip;
int irq;
int res_x; /* resistor of Xplate */
};
static irqreturn_t pm860x_touch_handler(int irq, void *data)
{
struct pm860x_touch *touch = data;
struct pm860x_chip *chip = touch->chip;
unsigned char buf[MEAS_LEN];
int x, y, pen_down;
int z1, z2, rt = 0;
int ret;
ret = pm860x_bulk_read(touch->i2c, MEAS_TSIX_1, MEAS_LEN, buf);
if (ret < 0)
goto out;
pen_down = buf[1] & (1 << 6);
x = ((buf[0] & 0xFF) << 4) | (buf[1] & 0x0F);
y = ((buf[2] & 0xFF) << 4) | (buf[3] & 0x0F);
z1 = ((buf[4] & 0xFF) << 4) | (buf[5] & 0x0F);
z2 = ((buf[6] & 0xFF) << 4) | (buf[7] & 0x0F);
if (pen_down) {
if ((x != 0) && (z1 != 0) && (touch->res_x != 0)) {
rt = z2 / z1 - 1;
rt = (rt * touch->res_x * x) >> ACCURATE_BIT;
dev_dbg(chip->dev, "z1:%d, z2:%d, rt:%d\n",
z1, z2, rt);
}
input_report_abs(touch->idev, ABS_X, x);
input_report_abs(touch->idev, ABS_Y, y);
input_report_abs(touch->idev, ABS_PRESSURE, rt);
input_report_key(touch->idev, BTN_TOUCH, 1);
dev_dbg(chip->dev, "pen down at [%d, %d].\n", x, y);
} else {
input_report_abs(touch->idev, ABS_PRESSURE, 0);
input_report_key(touch->idev, BTN_TOUCH, 0);
dev_dbg(chip->dev, "pen release\n");
}
input_sync(touch->idev);
out:
return IRQ_HANDLED;
}
static int pm860x_touch_open(struct input_dev *dev)
{
struct pm860x_touch *touch = input_get_drvdata(dev);
int data, ret;
data = MEAS_PD_EN | MEAS_TSIX_EN | MEAS_TSIY_EN
| MEAS_TSIZ1_EN | MEAS_TSIZ2_EN;
ret = pm860x_set_bits(touch->i2c, MEAS_EN3, data, data);
if (ret < 0)
goto out;
return 0;
out:
return ret;
}
static void pm860x_touch_close(struct input_dev *dev)
{
struct pm860x_touch *touch = input_get_drvdata(dev);
int data;
data = MEAS_PD_EN | MEAS_TSIX_EN | MEAS_TSIY_EN
| MEAS_TSIZ1_EN | MEAS_TSIZ2_EN;
pm860x_set_bits(touch->i2c, MEAS_EN3, data, 0);
}
static int __devinit pm860x_touch_probe(struct platform_device *pdev)
{
struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
struct pm860x_platform_data *pm860x_pdata = \
pdev->dev.parent->platform_data;
struct pm860x_touch_pdata *pdata = NULL;
struct pm860x_touch *touch;
int irq, ret;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "No IRQ resource!\n");
return -EINVAL;
}
if (!pm860x_pdata) {
dev_err(&pdev->dev, "platform data is missing\n");
return -EINVAL;
}
pdata = pm860x_pdata->touch;
if (!pdata) {
dev_err(&pdev->dev, "touchscreen data is missing\n");
return -EINVAL;
}
touch = kzalloc(sizeof(struct pm860x_touch), GFP_KERNEL);
if (touch == NULL)
return -ENOMEM;
dev_set_drvdata(&pdev->dev, touch);
touch->idev = input_allocate_device();
if (touch->idev == NULL) {
dev_err(&pdev->dev, "Failed to allocate input device!\n");
ret = -ENOMEM;
goto out;
}
touch->idev->name = "88pm860x-touch";
touch->idev->phys = "88pm860x/input0";
touch->idev->id.bustype = BUS_I2C;
touch->idev->dev.parent = &pdev->dev;
touch->idev->open = pm860x_touch_open;
touch->idev->close = pm860x_touch_close;
touch->chip = chip;
touch->i2c = (chip->id == CHIP_PM8607) ? chip->client : chip->companion;
touch->irq = irq + chip->irq_base;
touch->res_x = pdata->res_x;
input_set_drvdata(touch->idev, touch);
ret = request_threaded_irq(touch->irq, NULL, pm860x_touch_handler,
IRQF_ONESHOT, "touch", touch);
if (ret < 0)
goto out_irq;
__set_bit(EV_ABS, touch->idev->evbit);
__set_bit(ABS_X, touch->idev->absbit);
__set_bit(ABS_Y, touch->idev->absbit);
__set_bit(ABS_PRESSURE, touch->idev->absbit);
__set_bit(EV_SYN, touch->idev->evbit);
__set_bit(EV_KEY, touch->idev->evbit);
__set_bit(BTN_TOUCH, touch->idev->keybit);
input_set_abs_params(touch->idev, ABS_X, 0, 1 << ACCURATE_BIT, 0, 0);
input_set_abs_params(touch->idev, ABS_Y, 0, 1 << ACCURATE_BIT, 0, 0);
input_set_abs_params(touch->idev, ABS_PRESSURE, 0, 1 << ACCURATE_BIT,
0, 0);
ret = input_register_device(touch->idev);
if (ret < 0) {
dev_err(chip->dev, "Failed to register touch!\n");
goto out_rg;
}
platform_set_drvdata(pdev, touch);
return 0;
out_rg:
free_irq(touch->irq, touch);
out_irq:
input_free_device(touch->idev);
out:
kfree(touch);
return ret;
}
static int __devexit pm860x_touch_remove(struct platform_device *pdev)
{
struct pm860x_touch *touch = platform_get_drvdata(pdev);
input_unregister_device(touch->idev);
free_irq(touch->irq, touch);
platform_set_drvdata(pdev, NULL);
kfree(touch);
return 0;
}
static struct platform_driver pm860x_touch_driver = {
.driver = {
.name = "88pm860x-touch",
.owner = THIS_MODULE,
},
.probe = pm860x_touch_probe,
.remove = __devexit_p(pm860x_touch_remove),
};
module_platform_driver(pm860x_touch_driver);
MODULE_DESCRIPTION("Touchscreen driver for Marvell Semiconductor 88PM860x");
MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:88pm860x-touch");
| gpl-2.0 |
syhost/linux-android-endeavoru-3.6 | sound/pci/ctxfi/cttimer.c | 8320 | 11344 | /*
* PCM timer handling on ctxfi
*
* This source file is released under GPL v2 license (no other versions).
* See the COPYING file included in the main directory of this source
* distribution for the license terms and conditions.
*/
#include <linux/slab.h>
#include <linux/math64.h>
#include <linux/moduleparam.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include "ctatc.h"
#include "cthardware.h"
#include "cttimer.h"
static bool use_system_timer;
MODULE_PARM_DESC(use_system_timer, "Force to use system-timer");
module_param(use_system_timer, bool, S_IRUGO);
struct ct_timer_ops {
void (*init)(struct ct_timer_instance *);
void (*prepare)(struct ct_timer_instance *);
void (*start)(struct ct_timer_instance *);
void (*stop)(struct ct_timer_instance *);
void (*free_instance)(struct ct_timer_instance *);
void (*interrupt)(struct ct_timer *);
void (*free_global)(struct ct_timer *);
};
/* timer instance -- assigned to each PCM stream */
struct ct_timer_instance {
spinlock_t lock;
struct ct_timer *timer_base;
struct ct_atc_pcm *apcm;
struct snd_pcm_substream *substream;
struct timer_list timer;
struct list_head instance_list;
struct list_head running_list;
unsigned int position;
unsigned int frag_count;
unsigned int running:1;
unsigned int need_update:1;
};
/* timer instance manager */
struct ct_timer {
spinlock_t lock; /* global timer lock (for xfitimer) */
spinlock_t list_lock; /* lock for instance list */
struct ct_atc *atc;
struct ct_timer_ops *ops;
struct list_head instance_head;
struct list_head running_head;
unsigned int wc; /* current wallclock */
unsigned int irq_handling:1; /* in IRQ handling */
unsigned int reprogram:1; /* need to reprogram the internval */
unsigned int running:1; /* global timer running */
};
/*
* system-timer-based updates
*/
static void ct_systimer_callback(unsigned long data)
{
struct ct_timer_instance *ti = (struct ct_timer_instance *)data;
struct snd_pcm_substream *substream = ti->substream;
struct snd_pcm_runtime *runtime = substream->runtime;
struct ct_atc_pcm *apcm = ti->apcm;
unsigned int period_size = runtime->period_size;
unsigned int buffer_size = runtime->buffer_size;
unsigned long flags;
unsigned int position, dist, interval;
position = substream->ops->pointer(substream);
dist = (position + buffer_size - ti->position) % buffer_size;
if (dist >= period_size ||
position / period_size != ti->position / period_size) {
apcm->interrupt(apcm);
ti->position = position;
}
/* Add extra HZ*5/1000 to avoid overrun issue when recording
* at 8kHz in 8-bit format or at 88kHz in 24-bit format. */
interval = ((period_size - (position % period_size))
* HZ + (runtime->rate - 1)) / runtime->rate + HZ * 5 / 1000;
spin_lock_irqsave(&ti->lock, flags);
if (ti->running)
mod_timer(&ti->timer, jiffies + interval);
spin_unlock_irqrestore(&ti->lock, flags);
}
static void ct_systimer_init(struct ct_timer_instance *ti)
{
setup_timer(&ti->timer, ct_systimer_callback,
(unsigned long)ti);
}
static void ct_systimer_start(struct ct_timer_instance *ti)
{
struct snd_pcm_runtime *runtime = ti->substream->runtime;
unsigned long flags;
spin_lock_irqsave(&ti->lock, flags);
ti->running = 1;
mod_timer(&ti->timer,
jiffies + (runtime->period_size * HZ +
(runtime->rate - 1)) / runtime->rate);
spin_unlock_irqrestore(&ti->lock, flags);
}
static void ct_systimer_stop(struct ct_timer_instance *ti)
{
unsigned long flags;
spin_lock_irqsave(&ti->lock, flags);
ti->running = 0;
del_timer(&ti->timer);
spin_unlock_irqrestore(&ti->lock, flags);
}
static void ct_systimer_prepare(struct ct_timer_instance *ti)
{
ct_systimer_stop(ti);
try_to_del_timer_sync(&ti->timer);
}
#define ct_systimer_free ct_systimer_prepare
static struct ct_timer_ops ct_systimer_ops = {
.init = ct_systimer_init,
.free_instance = ct_systimer_free,
.prepare = ct_systimer_prepare,
.start = ct_systimer_start,
.stop = ct_systimer_stop,
};
/*
* Handling multiple streams using a global emu20k1 timer irq
*/
#define CT_TIMER_FREQ 48000
#define MIN_TICKS 1
#define MAX_TICKS ((1 << 13) - 1)
static void ct_xfitimer_irq_rearm(struct ct_timer *atimer, int ticks)
{
struct hw *hw = atimer->atc->hw;
if (ticks > MAX_TICKS)
ticks = MAX_TICKS;
hw->set_timer_tick(hw, ticks);
if (!atimer->running)
hw->set_timer_irq(hw, 1);
atimer->running = 1;
}
static void ct_xfitimer_irq_stop(struct ct_timer *atimer)
{
if (atimer->running) {
struct hw *hw = atimer->atc->hw;
hw->set_timer_irq(hw, 0);
hw->set_timer_tick(hw, 0);
atimer->running = 0;
}
}
static inline unsigned int ct_xfitimer_get_wc(struct ct_timer *atimer)
{
struct hw *hw = atimer->atc->hw;
return hw->get_wc(hw);
}
/*
* reprogram the timer interval;
* checks the running instance list and determines the next timer interval.
* also updates the each stream position, returns the number of streams
* to call snd_pcm_period_elapsed() appropriately
*
* call this inside the lock and irq disabled
*/
static int ct_xfitimer_reprogram(struct ct_timer *atimer, int can_update)
{
struct ct_timer_instance *ti;
unsigned int min_intr = (unsigned int)-1;
int updates = 0;
unsigned int wc, diff;
if (list_empty(&atimer->running_head)) {
ct_xfitimer_irq_stop(atimer);
atimer->reprogram = 0; /* clear flag */
return 0;
}
wc = ct_xfitimer_get_wc(atimer);
diff = wc - atimer->wc;
atimer->wc = wc;
list_for_each_entry(ti, &atimer->running_head, running_list) {
if (ti->frag_count > diff)
ti->frag_count -= diff;
else {
unsigned int pos;
unsigned int period_size, rate;
period_size = ti->substream->runtime->period_size;
rate = ti->substream->runtime->rate;
pos = ti->substream->ops->pointer(ti->substream);
if (pos / period_size != ti->position / period_size) {
ti->need_update = 1;
ti->position = pos;
updates++;
}
pos %= period_size;
pos = period_size - pos;
ti->frag_count = div_u64((u64)pos * CT_TIMER_FREQ +
rate - 1, rate);
}
if (ti->need_update && !can_update)
min_intr = 0; /* pending to the next irq */
if (ti->frag_count < min_intr)
min_intr = ti->frag_count;
}
if (min_intr < MIN_TICKS)
min_intr = MIN_TICKS;
ct_xfitimer_irq_rearm(atimer, min_intr);
atimer->reprogram = 0; /* clear flag */
return updates;
}
/* look through the instance list and call period_elapsed if needed */
static void ct_xfitimer_check_period(struct ct_timer *atimer)
{
struct ct_timer_instance *ti;
unsigned long flags;
spin_lock_irqsave(&atimer->list_lock, flags);
list_for_each_entry(ti, &atimer->instance_head, instance_list) {
if (ti->running && ti->need_update) {
ti->need_update = 0;
ti->apcm->interrupt(ti->apcm);
}
}
spin_unlock_irqrestore(&atimer->list_lock, flags);
}
/* Handle timer-interrupt */
static void ct_xfitimer_callback(struct ct_timer *atimer)
{
int update;
unsigned long flags;
spin_lock_irqsave(&atimer->lock, flags);
atimer->irq_handling = 1;
do {
update = ct_xfitimer_reprogram(atimer, 1);
spin_unlock(&atimer->lock);
if (update)
ct_xfitimer_check_period(atimer);
spin_lock(&atimer->lock);
} while (atimer->reprogram);
atimer->irq_handling = 0;
spin_unlock_irqrestore(&atimer->lock, flags);
}
static void ct_xfitimer_prepare(struct ct_timer_instance *ti)
{
ti->frag_count = ti->substream->runtime->period_size;
ti->running = 0;
ti->need_update = 0;
}
/* start/stop the timer */
static void ct_xfitimer_update(struct ct_timer *atimer)
{
unsigned long flags;
spin_lock_irqsave(&atimer->lock, flags);
if (atimer->irq_handling) {
/* reached from IRQ handler; let it handle later */
atimer->reprogram = 1;
spin_unlock_irqrestore(&atimer->lock, flags);
return;
}
ct_xfitimer_irq_stop(atimer);
ct_xfitimer_reprogram(atimer, 0);
spin_unlock_irqrestore(&atimer->lock, flags);
}
static void ct_xfitimer_start(struct ct_timer_instance *ti)
{
struct ct_timer *atimer = ti->timer_base;
unsigned long flags;
spin_lock_irqsave(&atimer->lock, flags);
if (list_empty(&ti->running_list))
atimer->wc = ct_xfitimer_get_wc(atimer);
ti->running = 1;
ti->need_update = 0;
list_add(&ti->running_list, &atimer->running_head);
spin_unlock_irqrestore(&atimer->lock, flags);
ct_xfitimer_update(atimer);
}
static void ct_xfitimer_stop(struct ct_timer_instance *ti)
{
struct ct_timer *atimer = ti->timer_base;
unsigned long flags;
spin_lock_irqsave(&atimer->lock, flags);
list_del_init(&ti->running_list);
ti->running = 0;
spin_unlock_irqrestore(&atimer->lock, flags);
ct_xfitimer_update(atimer);
}
static void ct_xfitimer_free_global(struct ct_timer *atimer)
{
ct_xfitimer_irq_stop(atimer);
}
static struct ct_timer_ops ct_xfitimer_ops = {
.prepare = ct_xfitimer_prepare,
.start = ct_xfitimer_start,
.stop = ct_xfitimer_stop,
.interrupt = ct_xfitimer_callback,
.free_global = ct_xfitimer_free_global,
};
/*
* timer instance
*/
struct ct_timer_instance *
ct_timer_instance_new(struct ct_timer *atimer, struct ct_atc_pcm *apcm)
{
struct ct_timer_instance *ti;
ti = kzalloc(sizeof(*ti), GFP_KERNEL);
if (!ti)
return NULL;
spin_lock_init(&ti->lock);
INIT_LIST_HEAD(&ti->instance_list);
INIT_LIST_HEAD(&ti->running_list);
ti->timer_base = atimer;
ti->apcm = apcm;
ti->substream = apcm->substream;
if (atimer->ops->init)
atimer->ops->init(ti);
spin_lock_irq(&atimer->list_lock);
list_add(&ti->instance_list, &atimer->instance_head);
spin_unlock_irq(&atimer->list_lock);
return ti;
}
void ct_timer_prepare(struct ct_timer_instance *ti)
{
if (ti->timer_base->ops->prepare)
ti->timer_base->ops->prepare(ti);
ti->position = 0;
ti->running = 0;
}
void ct_timer_start(struct ct_timer_instance *ti)
{
struct ct_timer *atimer = ti->timer_base;
atimer->ops->start(ti);
}
void ct_timer_stop(struct ct_timer_instance *ti)
{
struct ct_timer *atimer = ti->timer_base;
atimer->ops->stop(ti);
}
void ct_timer_instance_free(struct ct_timer_instance *ti)
{
struct ct_timer *atimer = ti->timer_base;
atimer->ops->stop(ti); /* to be sure */
if (atimer->ops->free_instance)
atimer->ops->free_instance(ti);
spin_lock_irq(&atimer->list_lock);
list_del(&ti->instance_list);
spin_unlock_irq(&atimer->list_lock);
kfree(ti);
}
/*
* timer manager
*/
static void ct_timer_interrupt(void *data, unsigned int status)
{
struct ct_timer *timer = data;
/* Interval timer interrupt */
if ((status & IT_INT) && timer->ops->interrupt)
timer->ops->interrupt(timer);
}
struct ct_timer *ct_timer_new(struct ct_atc *atc)
{
struct ct_timer *atimer;
struct hw *hw;
atimer = kzalloc(sizeof(*atimer), GFP_KERNEL);
if (!atimer)
return NULL;
spin_lock_init(&atimer->lock);
spin_lock_init(&atimer->list_lock);
INIT_LIST_HEAD(&atimer->instance_head);
INIT_LIST_HEAD(&atimer->running_head);
atimer->atc = atc;
hw = atc->hw;
if (!use_system_timer && hw->set_timer_irq) {
snd_printd(KERN_INFO "ctxfi: Use xfi-native timer\n");
atimer->ops = &ct_xfitimer_ops;
hw->irq_callback_data = atimer;
hw->irq_callback = ct_timer_interrupt;
} else {
snd_printd(KERN_INFO "ctxfi: Use system timer\n");
atimer->ops = &ct_systimer_ops;
}
return atimer;
}
void ct_timer_free(struct ct_timer *atimer)
{
struct hw *hw = atimer->atc->hw;
hw->irq_callback = NULL;
if (atimer->ops->free_global)
atimer->ops->free_global(atimer);
kfree(atimer);
}
| gpl-2.0 |
cbolumar/android_kernel_samsung_a3ulte | drivers/net/wireless/rtlwifi/rtl8192se/table.c | 9600 | 13995 | /******************************************************************************
*
* Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
* Hsinchu 300, Taiwan.
*
* Larry Finger <Larry.Finger@lwfinger.net>
*
* Created on 2010/ 5/18, 1:41
*****************************************************************************/
#include "table.h"
u32 rtl8192sephy_reg_2t2rarray[PHY_REG_2T2RARRAYLENGTH] = {
0x01c, 0x07000000,
0x800, 0x00040000,
0x804, 0x00008003,
0x808, 0x0000fc00,
0x80c, 0x0000000a,
0x810, 0x10005088,
0x814, 0x020c3d10,
0x818, 0x00200185,
0x81c, 0x00000000,
0x820, 0x01000000,
0x824, 0x00390004,
0x828, 0x01000000,
0x82c, 0x00390004,
0x830, 0x00000004,
0x834, 0x00690200,
0x838, 0x00000004,
0x83c, 0x00690200,
0x840, 0x00010000,
0x844, 0x00010000,
0x848, 0x00000000,
0x84c, 0x00000000,
0x850, 0x00000000,
0x854, 0x00000000,
0x858, 0x48484848,
0x85c, 0x65a965a9,
0x860, 0x0f7f0130,
0x864, 0x0f7f0130,
0x868, 0x0f7f0130,
0x86c, 0x0f7f0130,
0x870, 0x03000700,
0x874, 0x03000300,
0x878, 0x00020002,
0x87c, 0x004f0201,
0x880, 0xa8300ac1,
0x884, 0x00000058,
0x888, 0x00000008,
0x88c, 0x00000004,
0x890, 0x00000000,
0x894, 0xfffffffe,
0x898, 0x40302010,
0x89c, 0x00706050,
0x8b0, 0x00000000,
0x8e0, 0x00000000,
0x8e4, 0x00000000,
0xe00, 0x30333333,
0xe04, 0x2a2d2e2f,
0xe08, 0x00003232,
0xe10, 0x30333333,
0xe14, 0x2a2d2e2f,
0xe18, 0x30333333,
0xe1c, 0x2a2d2e2f,
0xe30, 0x01007c00,
0xe34, 0x01004800,
0xe38, 0x1000dc1f,
0xe3c, 0x10008c1f,
0xe40, 0x021400a0,
0xe44, 0x281600a0,
0xe48, 0xf8000001,
0xe4c, 0x00002910,
0xe50, 0x01007c00,
0xe54, 0x01004800,
0xe58, 0x1000dc1f,
0xe5c, 0x10008c1f,
0xe60, 0x021400a0,
0xe64, 0x281600a0,
0xe6c, 0x00002910,
0xe70, 0x31ed92fb,
0xe74, 0x361536fb,
0xe78, 0x361536fb,
0xe7c, 0x361536fb,
0xe80, 0x361536fb,
0xe84, 0x000d92fb,
0xe88, 0x000d92fb,
0xe8c, 0x31ed92fb,
0xed0, 0x31ed92fb,
0xed4, 0x31ed92fb,
0xed8, 0x000d92fb,
0xedc, 0x000d92fb,
0xee0, 0x000d92fb,
0xee4, 0x015e5448,
0xee8, 0x21555448,
0x900, 0x00000000,
0x904, 0x00000023,
0x908, 0x00000000,
0x90c, 0x01121313,
0xa00, 0x00d047c8,
0xa04, 0x80ff0008,
0xa08, 0x8ccd8300,
0xa0c, 0x2e62120f,
0xa10, 0x9500bb78,
0xa14, 0x11144028,
0xa18, 0x00881117,
0xa1c, 0x89140f00,
0xa20, 0x1a1b0000,
0xa24, 0x090e1317,
0xa28, 0x00000204,
0xa2c, 0x10d30000,
0xc00, 0x40071d40,
0xc04, 0x00a05633,
0xc08, 0x000000e4,
0xc0c, 0x6c6c6c6c,
0xc10, 0x08800000,
0xc14, 0x40000100,
0xc18, 0x08000000,
0xc1c, 0x40000100,
0xc20, 0x08000000,
0xc24, 0x40000100,
0xc28, 0x08000000,
0xc2c, 0x40000100,
0xc30, 0x6de9ac44,
0xc34, 0x469652cf,
0xc38, 0x49795994,
0xc3c, 0x0a979764,
0xc40, 0x1f7c403f,
0xc44, 0x000100b7,
0xc48, 0xec020000,
0xc4c, 0x007f037f,
0xc50, 0x69543420,
0xc54, 0x433c0094,
0xc58, 0x69543420,
0xc5c, 0x433c0094,
0xc60, 0x69543420,
0xc64, 0x433c0094,
0xc68, 0x69543420,
0xc6c, 0x433c0094,
0xc70, 0x2c7f000d,
0xc74, 0x0186155b,
0xc78, 0x0000001f,
0xc7c, 0x00b91612,
0xc80, 0x40000100,
0xc84, 0x20f60000,
0xc88, 0x20000080,
0xc8c, 0x20200000,
0xc90, 0x40000100,
0xc94, 0x00000000,
0xc98, 0x40000100,
0xc9c, 0x00000000,
0xca0, 0x00492492,
0xca4, 0x00000000,
0xca8, 0x00000000,
0xcac, 0x00000000,
0xcb0, 0x00000000,
0xcb4, 0x00000000,
0xcb8, 0x00000000,
0xcbc, 0x28000000,
0xcc0, 0x00000000,
0xcc4, 0x00000000,
0xcc8, 0x00000000,
0xccc, 0x00000000,
0xcd0, 0x00000000,
0xcd4, 0x00000000,
0xcd8, 0x64b22427,
0xcdc, 0x00766932,
0xce0, 0x00222222,
0xce4, 0x00000000,
0xce8, 0x37644302,
0xcec, 0x2f97d40c,
0xd00, 0x00000750,
0xd04, 0x00000403,
0xd08, 0x0000907f,
0xd0c, 0x00000001,
0xd10, 0xa0633333,
0xd14, 0x33333c63,
0xd18, 0x6a8f5b6b,
0xd1c, 0x00000000,
0xd20, 0x00000000,
0xd24, 0x00000000,
0xd28, 0x00000000,
0xd2c, 0xcc979975,
0xd30, 0x00000000,
0xd34, 0x00000000,
0xd38, 0x00000000,
0xd3c, 0x00027293,
0xd40, 0x00000000,
0xd44, 0x00000000,
0xd48, 0x00000000,
0xd50, 0x6437140a,
0xd54, 0x024dbd02,
0xd58, 0x00000000,
0xd5c, 0x30032064,
0xd60, 0x4653de68,
0xd64, 0x00518a3c,
0xd68, 0x00002101,
0xf14, 0x00000003,
0xf4c, 0x00000000,
0xf00, 0x00000300,
};
u32 rtl8192sephy_changeto_1t1rarray[PHY_CHANGETO_1T1RARRAYLENGTH] = {
0x844, 0xffffffff, 0x00010000,
0x804, 0x0000000f, 0x00000001,
0x824, 0x00f0000f, 0x00300004,
0x82c, 0x00f0000f, 0x00100002,
0x870, 0x04000000, 0x00000001,
0x864, 0x00000400, 0x00000000,
0x878, 0x000f000f, 0x00000002,
0xe74, 0x0f000000, 0x00000002,
0xe78, 0x0f000000, 0x00000002,
0xe7c, 0x0f000000, 0x00000002,
0xe80, 0x0f000000, 0x00000002,
0x90c, 0x000000ff, 0x00000011,
0xc04, 0x000000ff, 0x00000011,
0xd04, 0x0000000f, 0x00000001,
0x1f4, 0xffff0000, 0x00007777,
0x234, 0xf8000000, 0x0000000a,
};
u32 rtl8192sephy_changeto_1t2rarray[PHY_CHANGETO_1T2RARRAYLENGTH] = {
0x804, 0x0000000f, 0x00000003,
0x824, 0x00f0000f, 0x00300004,
0x82c, 0x00f0000f, 0x00300002,
0x870, 0x04000000, 0x00000001,
0x864, 0x00000400, 0x00000000,
0x878, 0x000f000f, 0x00000002,
0xe74, 0x0f000000, 0x00000002,
0xe78, 0x0f000000, 0x00000002,
0xe7c, 0x0f000000, 0x00000002,
0xe80, 0x0f000000, 0x00000002,
0x90c, 0x000000ff, 0x00000011,
0xc04, 0x000000ff, 0x00000033,
0xd04, 0x0000000f, 0x00000003,
0x1f4, 0xffff0000, 0x00007777,
0x234, 0xf8000000, 0x0000000a,
};
u32 rtl8192sephy_reg_array_pg[PHY_REG_ARRAY_PGLENGTH] = {
0xe00, 0xffffffff, 0x06090909,
0xe04, 0xffffffff, 0x00030406,
0xe08, 0x0000ff00, 0x00000000,
0xe10, 0xffffffff, 0x0a0c0d0e,
0xe14, 0xffffffff, 0x04070809,
0xe18, 0xffffffff, 0x0a0c0d0e,
0xe1c, 0xffffffff, 0x04070809,
0xe00, 0xffffffff, 0x04040404,
0xe04, 0xffffffff, 0x00020204,
0xe08, 0x0000ff00, 0x00000000,
0xe10, 0xffffffff, 0x02040404,
0xe14, 0xffffffff, 0x00000002,
0xe18, 0xffffffff, 0x02040404,
0xe1c, 0xffffffff, 0x00000002,
0xe00, 0xffffffff, 0x04040404,
0xe04, 0xffffffff, 0x00020204,
0xe08, 0x0000ff00, 0x00000000,
0xe10, 0xffffffff, 0x02040404,
0xe14, 0xffffffff, 0x00000002,
0xe18, 0xffffffff, 0x02040404,
0xe1c, 0xffffffff, 0x00000002,
0xe00, 0xffffffff, 0x02020202,
0xe04, 0xffffffff, 0x00020202,
0xe08, 0x0000ff00, 0x00000000,
0xe10, 0xffffffff, 0x02020202,
0xe14, 0xffffffff, 0x00000002,
0xe18, 0xffffffff, 0x02020202,
0xe1c, 0xffffffff, 0x00000002,
};
u32 rtl8192seradioa_1t_array[RADIOA_1T_ARRAYLENGTH] = {
0x000, 0x00030159,
0x001, 0x00030250,
0x002, 0x00010000,
0x010, 0x0008000f,
0x011, 0x000231fc,
0x010, 0x000c000f,
0x011, 0x0003f9f8,
0x010, 0x0002000f,
0x011, 0x00020101,
0x014, 0x0001093e,
0x014, 0x0009093e,
0x015, 0x0000f8f4,
0x017, 0x000f6500,
0x01a, 0x00013056,
0x01b, 0x00060000,
0x01c, 0x00000300,
0x01e, 0x00031059,
0x021, 0x00054000,
0x022, 0x0000083c,
0x023, 0x00001558,
0x024, 0x00000060,
0x025, 0x00022583,
0x026, 0x0000f200,
0x027, 0x000eacf1,
0x028, 0x0009bd54,
0x029, 0x00004582,
0x02a, 0x00000001,
0x02b, 0x00021334,
0x02a, 0x00000000,
0x02b, 0x0000000a,
0x02a, 0x00000001,
0x02b, 0x00000808,
0x02b, 0x00053333,
0x02c, 0x0000000c,
0x02a, 0x00000002,
0x02b, 0x00000808,
0x02b, 0x0005b333,
0x02c, 0x0000000d,
0x02a, 0x00000003,
0x02b, 0x00000808,
0x02b, 0x00063333,
0x02c, 0x0000000d,
0x02a, 0x00000004,
0x02b, 0x00000808,
0x02b, 0x0006b333,
0x02c, 0x0000000d,
0x02a, 0x00000005,
0x02b, 0x00000709,
0x02b, 0x00053333,
0x02c, 0x0000000d,
0x02a, 0x00000006,
0x02b, 0x00000709,
0x02b, 0x0005b333,
0x02c, 0x0000000d,
0x02a, 0x00000007,
0x02b, 0x00000709,
0x02b, 0x00063333,
0x02c, 0x0000000d,
0x02a, 0x00000008,
0x02b, 0x00000709,
0x02b, 0x0006b333,
0x02c, 0x0000000d,
0x02a, 0x00000009,
0x02b, 0x0000060a,
0x02b, 0x00053333,
0x02c, 0x0000000d,
0x02a, 0x0000000a,
0x02b, 0x0000060a,
0x02b, 0x0005b333,
0x02c, 0x0000000d,
0x02a, 0x0000000b,
0x02b, 0x0000060a,
0x02b, 0x00063333,
0x02c, 0x0000000d,
0x02a, 0x0000000c,
0x02b, 0x0000060a,
0x02b, 0x0006b333,
0x02c, 0x0000000d,
0x02a, 0x0000000d,
0x02b, 0x0000050b,
0x02b, 0x00053333,
0x02c, 0x0000000d,
0x02a, 0x0000000e,
0x02b, 0x0000050b,
0x02b, 0x00066623,
0x02c, 0x0000001a,
0x02a, 0x000e4000,
0x030, 0x00020000,
0x031, 0x000b9631,
0x032, 0x0000130d,
0x033, 0x00000187,
0x013, 0x00019e6c,
0x013, 0x00015e94,
0x000, 0x00010159,
0x018, 0x0000f401,
0x0fe, 0x00000000,
0x01e, 0x0003105b,
0x0fe, 0x00000000,
0x000, 0x00030159,
0x010, 0x0004000f,
0x011, 0x000203f9,
};
u32 rtl8192seradiob_array[RADIOB_ARRAYLENGTH] = {
0x000, 0x00030159,
0x001, 0x00001041,
0x002, 0x00011000,
0x005, 0x00080fc0,
0x007, 0x000fc803,
0x013, 0x00017cb0,
0x013, 0x00011cc0,
0x013, 0x0000dc60,
0x013, 0x00008c60,
0x013, 0x00004450,
0x013, 0x00000020,
};
u32 rtl8192seradiob_gm_array[RADIOB_GM_ARRAYLENGTH] = {
0x000, 0x00030159,
0x001, 0x00001041,
0x002, 0x00011000,
0x005, 0x00080fc0,
0x007, 0x000fc803,
};
u32 rtl8192semac_2t_array[MAC_2T_ARRAYLENGTH] = {
0x020, 0x00000035,
0x048, 0x0000000e,
0x049, 0x000000f0,
0x04a, 0x00000077,
0x04b, 0x00000083,
0x0b5, 0x00000021,
0x0dc, 0x000000ff,
0x0dd, 0x000000ff,
0x0de, 0x000000ff,
0x0df, 0x000000ff,
0x116, 0x00000000,
0x117, 0x00000000,
0x118, 0x00000000,
0x119, 0x00000000,
0x11a, 0x00000000,
0x11b, 0x00000000,
0x11c, 0x00000000,
0x11d, 0x00000000,
0x160, 0x0000000b,
0x161, 0x0000000b,
0x162, 0x0000000b,
0x163, 0x0000000b,
0x164, 0x0000000b,
0x165, 0x0000000b,
0x166, 0x0000000b,
0x167, 0x0000000b,
0x168, 0x0000000b,
0x169, 0x0000000b,
0x16a, 0x0000000b,
0x16b, 0x0000000b,
0x16c, 0x0000000b,
0x16d, 0x0000000b,
0x16e, 0x0000000b,
0x16f, 0x0000000b,
0x170, 0x0000000b,
0x171, 0x0000000b,
0x172, 0x0000000b,
0x173, 0x0000000b,
0x174, 0x0000000b,
0x175, 0x0000000b,
0x176, 0x0000000b,
0x177, 0x0000000b,
0x178, 0x0000000b,
0x179, 0x0000000b,
0x17a, 0x0000000b,
0x17b, 0x0000000b,
0x17c, 0x0000000b,
0x17d, 0x0000000b,
0x17e, 0x0000000b,
0x17f, 0x0000000b,
0x236, 0x0000000c,
0x503, 0x00000022,
0x560, 0x00000000,
};
u32 rtl8192seagctab_array[AGCTAB_ARRAYLENGTH] = {
0xc78, 0x7f000001,
0xc78, 0x7f010001,
0xc78, 0x7e020001,
0xc78, 0x7d030001,
0xc78, 0x7c040001,
0xc78, 0x7b050001,
0xc78, 0x7a060001,
0xc78, 0x79070001,
0xc78, 0x78080001,
0xc78, 0x77090001,
0xc78, 0x760a0001,
0xc78, 0x750b0001,
0xc78, 0x740c0001,
0xc78, 0x730d0001,
0xc78, 0x720e0001,
0xc78, 0x710f0001,
0xc78, 0x70100001,
0xc78, 0x6f110001,
0xc78, 0x6f120001,
0xc78, 0x6e130001,
0xc78, 0x6d140001,
0xc78, 0x6d150001,
0xc78, 0x6c160001,
0xc78, 0x6b170001,
0xc78, 0x6a180001,
0xc78, 0x6a190001,
0xc78, 0x691a0001,
0xc78, 0x681b0001,
0xc78, 0x671c0001,
0xc78, 0x661d0001,
0xc78, 0x651e0001,
0xc78, 0x641f0001,
0xc78, 0x63200001,
0xc78, 0x4c210001,
0xc78, 0x4b220001,
0xc78, 0x4a230001,
0xc78, 0x49240001,
0xc78, 0x48250001,
0xc78, 0x47260001,
0xc78, 0x46270001,
0xc78, 0x45280001,
0xc78, 0x44290001,
0xc78, 0x2c2a0001,
0xc78, 0x2b2b0001,
0xc78, 0x2a2c0001,
0xc78, 0x292d0001,
0xc78, 0x282e0001,
0xc78, 0x272f0001,
0xc78, 0x26300001,
0xc78, 0x25310001,
0xc78, 0x24320001,
0xc78, 0x23330001,
0xc78, 0x22340001,
0xc78, 0x09350001,
0xc78, 0x08360001,
0xc78, 0x07370001,
0xc78, 0x06380001,
0xc78, 0x05390001,
0xc78, 0x043a0001,
0xc78, 0x033b0001,
0xc78, 0x023c0001,
0xc78, 0x013d0001,
0xc78, 0x003e0001,
0xc78, 0x003f0001,
0xc78, 0x7f400001,
0xc78, 0x7f410001,
0xc78, 0x7e420001,
0xc78, 0x7d430001,
0xc78, 0x7c440001,
0xc78, 0x7b450001,
0xc78, 0x7a460001,
0xc78, 0x79470001,
0xc78, 0x78480001,
0xc78, 0x77490001,
0xc78, 0x764a0001,
0xc78, 0x754b0001,
0xc78, 0x744c0001,
0xc78, 0x734d0001,
0xc78, 0x724e0001,
0xc78, 0x714f0001,
0xc78, 0x70500001,
0xc78, 0x6f510001,
0xc78, 0x6f520001,
0xc78, 0x6e530001,
0xc78, 0x6d540001,
0xc78, 0x6d550001,
0xc78, 0x6c560001,
0xc78, 0x6b570001,
0xc78, 0x6a580001,
0xc78, 0x6a590001,
0xc78, 0x695a0001,
0xc78, 0x685b0001,
0xc78, 0x675c0001,
0xc78, 0x665d0001,
0xc78, 0x655e0001,
0xc78, 0x645f0001,
0xc78, 0x63600001,
0xc78, 0x4c610001,
0xc78, 0x4b620001,
0xc78, 0x4a630001,
0xc78, 0x49640001,
0xc78, 0x48650001,
0xc78, 0x47660001,
0xc78, 0x46670001,
0xc78, 0x45680001,
0xc78, 0x44690001,
0xc78, 0x2c6a0001,
0xc78, 0x2b6b0001,
0xc78, 0x2a6c0001,
0xc78, 0x296d0001,
0xc78, 0x286e0001,
0xc78, 0x276f0001,
0xc78, 0x26700001,
0xc78, 0x25710001,
0xc78, 0x24720001,
0xc78, 0x23730001,
0xc78, 0x22740001,
0xc78, 0x09750001,
0xc78, 0x08760001,
0xc78, 0x07770001,
0xc78, 0x06780001,
0xc78, 0x05790001,
0xc78, 0x047a0001,
0xc78, 0x037b0001,
0xc78, 0x027c0001,
0xc78, 0x017d0001,
0xc78, 0x007e0001,
0xc78, 0x007f0001,
0xc78, 0x3000001e,
0xc78, 0x3001001e,
0xc78, 0x3002001e,
0xc78, 0x3003001e,
0xc78, 0x3004001e,
0xc78, 0x3405001e,
0xc78, 0x3806001e,
0xc78, 0x3e07001e,
0xc78, 0x3e08001e,
0xc78, 0x4409001e,
0xc78, 0x460a001e,
0xc78, 0x480b001e,
0xc78, 0x480c001e,
0xc78, 0x4e0d001e,
0xc78, 0x560e001e,
0xc78, 0x5a0f001e,
0xc78, 0x5e10001e,
0xc78, 0x6211001e,
0xc78, 0x6c12001e,
0xc78, 0x7213001e,
0xc78, 0x7214001e,
0xc78, 0x7215001e,
0xc78, 0x7216001e,
0xc78, 0x7217001e,
0xc78, 0x7218001e,
0xc78, 0x7219001e,
0xc78, 0x721a001e,
0xc78, 0x721b001e,
0xc78, 0x721c001e,
0xc78, 0x721d001e,
0xc78, 0x721e001e,
0xc78, 0x721f001e,
};
| gpl-2.0 |
jimbojr/linux | drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.c | 9600 | 13995 | /******************************************************************************
*
* Copyright(c) 2009-2012 Realtek Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
* Hsinchu 300, Taiwan.
*
* Larry Finger <Larry.Finger@lwfinger.net>
*
* Created on 2010/ 5/18, 1:41
*****************************************************************************/
#include "table.h"
u32 rtl8192sephy_reg_2t2rarray[PHY_REG_2T2RARRAYLENGTH] = {
0x01c, 0x07000000,
0x800, 0x00040000,
0x804, 0x00008003,
0x808, 0x0000fc00,
0x80c, 0x0000000a,
0x810, 0x10005088,
0x814, 0x020c3d10,
0x818, 0x00200185,
0x81c, 0x00000000,
0x820, 0x01000000,
0x824, 0x00390004,
0x828, 0x01000000,
0x82c, 0x00390004,
0x830, 0x00000004,
0x834, 0x00690200,
0x838, 0x00000004,
0x83c, 0x00690200,
0x840, 0x00010000,
0x844, 0x00010000,
0x848, 0x00000000,
0x84c, 0x00000000,
0x850, 0x00000000,
0x854, 0x00000000,
0x858, 0x48484848,
0x85c, 0x65a965a9,
0x860, 0x0f7f0130,
0x864, 0x0f7f0130,
0x868, 0x0f7f0130,
0x86c, 0x0f7f0130,
0x870, 0x03000700,
0x874, 0x03000300,
0x878, 0x00020002,
0x87c, 0x004f0201,
0x880, 0xa8300ac1,
0x884, 0x00000058,
0x888, 0x00000008,
0x88c, 0x00000004,
0x890, 0x00000000,
0x894, 0xfffffffe,
0x898, 0x40302010,
0x89c, 0x00706050,
0x8b0, 0x00000000,
0x8e0, 0x00000000,
0x8e4, 0x00000000,
0xe00, 0x30333333,
0xe04, 0x2a2d2e2f,
0xe08, 0x00003232,
0xe10, 0x30333333,
0xe14, 0x2a2d2e2f,
0xe18, 0x30333333,
0xe1c, 0x2a2d2e2f,
0xe30, 0x01007c00,
0xe34, 0x01004800,
0xe38, 0x1000dc1f,
0xe3c, 0x10008c1f,
0xe40, 0x021400a0,
0xe44, 0x281600a0,
0xe48, 0xf8000001,
0xe4c, 0x00002910,
0xe50, 0x01007c00,
0xe54, 0x01004800,
0xe58, 0x1000dc1f,
0xe5c, 0x10008c1f,
0xe60, 0x021400a0,
0xe64, 0x281600a0,
0xe6c, 0x00002910,
0xe70, 0x31ed92fb,
0xe74, 0x361536fb,
0xe78, 0x361536fb,
0xe7c, 0x361536fb,
0xe80, 0x361536fb,
0xe84, 0x000d92fb,
0xe88, 0x000d92fb,
0xe8c, 0x31ed92fb,
0xed0, 0x31ed92fb,
0xed4, 0x31ed92fb,
0xed8, 0x000d92fb,
0xedc, 0x000d92fb,
0xee0, 0x000d92fb,
0xee4, 0x015e5448,
0xee8, 0x21555448,
0x900, 0x00000000,
0x904, 0x00000023,
0x908, 0x00000000,
0x90c, 0x01121313,
0xa00, 0x00d047c8,
0xa04, 0x80ff0008,
0xa08, 0x8ccd8300,
0xa0c, 0x2e62120f,
0xa10, 0x9500bb78,
0xa14, 0x11144028,
0xa18, 0x00881117,
0xa1c, 0x89140f00,
0xa20, 0x1a1b0000,
0xa24, 0x090e1317,
0xa28, 0x00000204,
0xa2c, 0x10d30000,
0xc00, 0x40071d40,
0xc04, 0x00a05633,
0xc08, 0x000000e4,
0xc0c, 0x6c6c6c6c,
0xc10, 0x08800000,
0xc14, 0x40000100,
0xc18, 0x08000000,
0xc1c, 0x40000100,
0xc20, 0x08000000,
0xc24, 0x40000100,
0xc28, 0x08000000,
0xc2c, 0x40000100,
0xc30, 0x6de9ac44,
0xc34, 0x469652cf,
0xc38, 0x49795994,
0xc3c, 0x0a979764,
0xc40, 0x1f7c403f,
0xc44, 0x000100b7,
0xc48, 0xec020000,
0xc4c, 0x007f037f,
0xc50, 0x69543420,
0xc54, 0x433c0094,
0xc58, 0x69543420,
0xc5c, 0x433c0094,
0xc60, 0x69543420,
0xc64, 0x433c0094,
0xc68, 0x69543420,
0xc6c, 0x433c0094,
0xc70, 0x2c7f000d,
0xc74, 0x0186155b,
0xc78, 0x0000001f,
0xc7c, 0x00b91612,
0xc80, 0x40000100,
0xc84, 0x20f60000,
0xc88, 0x20000080,
0xc8c, 0x20200000,
0xc90, 0x40000100,
0xc94, 0x00000000,
0xc98, 0x40000100,
0xc9c, 0x00000000,
0xca0, 0x00492492,
0xca4, 0x00000000,
0xca8, 0x00000000,
0xcac, 0x00000000,
0xcb0, 0x00000000,
0xcb4, 0x00000000,
0xcb8, 0x00000000,
0xcbc, 0x28000000,
0xcc0, 0x00000000,
0xcc4, 0x00000000,
0xcc8, 0x00000000,
0xccc, 0x00000000,
0xcd0, 0x00000000,
0xcd4, 0x00000000,
0xcd8, 0x64b22427,
0xcdc, 0x00766932,
0xce0, 0x00222222,
0xce4, 0x00000000,
0xce8, 0x37644302,
0xcec, 0x2f97d40c,
0xd00, 0x00000750,
0xd04, 0x00000403,
0xd08, 0x0000907f,
0xd0c, 0x00000001,
0xd10, 0xa0633333,
0xd14, 0x33333c63,
0xd18, 0x6a8f5b6b,
0xd1c, 0x00000000,
0xd20, 0x00000000,
0xd24, 0x00000000,
0xd28, 0x00000000,
0xd2c, 0xcc979975,
0xd30, 0x00000000,
0xd34, 0x00000000,
0xd38, 0x00000000,
0xd3c, 0x00027293,
0xd40, 0x00000000,
0xd44, 0x00000000,
0xd48, 0x00000000,
0xd50, 0x6437140a,
0xd54, 0x024dbd02,
0xd58, 0x00000000,
0xd5c, 0x30032064,
0xd60, 0x4653de68,
0xd64, 0x00518a3c,
0xd68, 0x00002101,
0xf14, 0x00000003,
0xf4c, 0x00000000,
0xf00, 0x00000300,
};
u32 rtl8192sephy_changeto_1t1rarray[PHY_CHANGETO_1T1RARRAYLENGTH] = {
0x844, 0xffffffff, 0x00010000,
0x804, 0x0000000f, 0x00000001,
0x824, 0x00f0000f, 0x00300004,
0x82c, 0x00f0000f, 0x00100002,
0x870, 0x04000000, 0x00000001,
0x864, 0x00000400, 0x00000000,
0x878, 0x000f000f, 0x00000002,
0xe74, 0x0f000000, 0x00000002,
0xe78, 0x0f000000, 0x00000002,
0xe7c, 0x0f000000, 0x00000002,
0xe80, 0x0f000000, 0x00000002,
0x90c, 0x000000ff, 0x00000011,
0xc04, 0x000000ff, 0x00000011,
0xd04, 0x0000000f, 0x00000001,
0x1f4, 0xffff0000, 0x00007777,
0x234, 0xf8000000, 0x0000000a,
};
u32 rtl8192sephy_changeto_1t2rarray[PHY_CHANGETO_1T2RARRAYLENGTH] = {
0x804, 0x0000000f, 0x00000003,
0x824, 0x00f0000f, 0x00300004,
0x82c, 0x00f0000f, 0x00300002,
0x870, 0x04000000, 0x00000001,
0x864, 0x00000400, 0x00000000,
0x878, 0x000f000f, 0x00000002,
0xe74, 0x0f000000, 0x00000002,
0xe78, 0x0f000000, 0x00000002,
0xe7c, 0x0f000000, 0x00000002,
0xe80, 0x0f000000, 0x00000002,
0x90c, 0x000000ff, 0x00000011,
0xc04, 0x000000ff, 0x00000033,
0xd04, 0x0000000f, 0x00000003,
0x1f4, 0xffff0000, 0x00007777,
0x234, 0xf8000000, 0x0000000a,
};
u32 rtl8192sephy_reg_array_pg[PHY_REG_ARRAY_PGLENGTH] = {
0xe00, 0xffffffff, 0x06090909,
0xe04, 0xffffffff, 0x00030406,
0xe08, 0x0000ff00, 0x00000000,
0xe10, 0xffffffff, 0x0a0c0d0e,
0xe14, 0xffffffff, 0x04070809,
0xe18, 0xffffffff, 0x0a0c0d0e,
0xe1c, 0xffffffff, 0x04070809,
0xe00, 0xffffffff, 0x04040404,
0xe04, 0xffffffff, 0x00020204,
0xe08, 0x0000ff00, 0x00000000,
0xe10, 0xffffffff, 0x02040404,
0xe14, 0xffffffff, 0x00000002,
0xe18, 0xffffffff, 0x02040404,
0xe1c, 0xffffffff, 0x00000002,
0xe00, 0xffffffff, 0x04040404,
0xe04, 0xffffffff, 0x00020204,
0xe08, 0x0000ff00, 0x00000000,
0xe10, 0xffffffff, 0x02040404,
0xe14, 0xffffffff, 0x00000002,
0xe18, 0xffffffff, 0x02040404,
0xe1c, 0xffffffff, 0x00000002,
0xe00, 0xffffffff, 0x02020202,
0xe04, 0xffffffff, 0x00020202,
0xe08, 0x0000ff00, 0x00000000,
0xe10, 0xffffffff, 0x02020202,
0xe14, 0xffffffff, 0x00000002,
0xe18, 0xffffffff, 0x02020202,
0xe1c, 0xffffffff, 0x00000002,
};
u32 rtl8192seradioa_1t_array[RADIOA_1T_ARRAYLENGTH] = {
0x000, 0x00030159,
0x001, 0x00030250,
0x002, 0x00010000,
0x010, 0x0008000f,
0x011, 0x000231fc,
0x010, 0x000c000f,
0x011, 0x0003f9f8,
0x010, 0x0002000f,
0x011, 0x00020101,
0x014, 0x0001093e,
0x014, 0x0009093e,
0x015, 0x0000f8f4,
0x017, 0x000f6500,
0x01a, 0x00013056,
0x01b, 0x00060000,
0x01c, 0x00000300,
0x01e, 0x00031059,
0x021, 0x00054000,
0x022, 0x0000083c,
0x023, 0x00001558,
0x024, 0x00000060,
0x025, 0x00022583,
0x026, 0x0000f200,
0x027, 0x000eacf1,
0x028, 0x0009bd54,
0x029, 0x00004582,
0x02a, 0x00000001,
0x02b, 0x00021334,
0x02a, 0x00000000,
0x02b, 0x0000000a,
0x02a, 0x00000001,
0x02b, 0x00000808,
0x02b, 0x00053333,
0x02c, 0x0000000c,
0x02a, 0x00000002,
0x02b, 0x00000808,
0x02b, 0x0005b333,
0x02c, 0x0000000d,
0x02a, 0x00000003,
0x02b, 0x00000808,
0x02b, 0x00063333,
0x02c, 0x0000000d,
0x02a, 0x00000004,
0x02b, 0x00000808,
0x02b, 0x0006b333,
0x02c, 0x0000000d,
0x02a, 0x00000005,
0x02b, 0x00000709,
0x02b, 0x00053333,
0x02c, 0x0000000d,
0x02a, 0x00000006,
0x02b, 0x00000709,
0x02b, 0x0005b333,
0x02c, 0x0000000d,
0x02a, 0x00000007,
0x02b, 0x00000709,
0x02b, 0x00063333,
0x02c, 0x0000000d,
0x02a, 0x00000008,
0x02b, 0x00000709,
0x02b, 0x0006b333,
0x02c, 0x0000000d,
0x02a, 0x00000009,
0x02b, 0x0000060a,
0x02b, 0x00053333,
0x02c, 0x0000000d,
0x02a, 0x0000000a,
0x02b, 0x0000060a,
0x02b, 0x0005b333,
0x02c, 0x0000000d,
0x02a, 0x0000000b,
0x02b, 0x0000060a,
0x02b, 0x00063333,
0x02c, 0x0000000d,
0x02a, 0x0000000c,
0x02b, 0x0000060a,
0x02b, 0x0006b333,
0x02c, 0x0000000d,
0x02a, 0x0000000d,
0x02b, 0x0000050b,
0x02b, 0x00053333,
0x02c, 0x0000000d,
0x02a, 0x0000000e,
0x02b, 0x0000050b,
0x02b, 0x00066623,
0x02c, 0x0000001a,
0x02a, 0x000e4000,
0x030, 0x00020000,
0x031, 0x000b9631,
0x032, 0x0000130d,
0x033, 0x00000187,
0x013, 0x00019e6c,
0x013, 0x00015e94,
0x000, 0x00010159,
0x018, 0x0000f401,
0x0fe, 0x00000000,
0x01e, 0x0003105b,
0x0fe, 0x00000000,
0x000, 0x00030159,
0x010, 0x0004000f,
0x011, 0x000203f9,
};
u32 rtl8192seradiob_array[RADIOB_ARRAYLENGTH] = {
0x000, 0x00030159,
0x001, 0x00001041,
0x002, 0x00011000,
0x005, 0x00080fc0,
0x007, 0x000fc803,
0x013, 0x00017cb0,
0x013, 0x00011cc0,
0x013, 0x0000dc60,
0x013, 0x00008c60,
0x013, 0x00004450,
0x013, 0x00000020,
};
u32 rtl8192seradiob_gm_array[RADIOB_GM_ARRAYLENGTH] = {
0x000, 0x00030159,
0x001, 0x00001041,
0x002, 0x00011000,
0x005, 0x00080fc0,
0x007, 0x000fc803,
};
u32 rtl8192semac_2t_array[MAC_2T_ARRAYLENGTH] = {
0x020, 0x00000035,
0x048, 0x0000000e,
0x049, 0x000000f0,
0x04a, 0x00000077,
0x04b, 0x00000083,
0x0b5, 0x00000021,
0x0dc, 0x000000ff,
0x0dd, 0x000000ff,
0x0de, 0x000000ff,
0x0df, 0x000000ff,
0x116, 0x00000000,
0x117, 0x00000000,
0x118, 0x00000000,
0x119, 0x00000000,
0x11a, 0x00000000,
0x11b, 0x00000000,
0x11c, 0x00000000,
0x11d, 0x00000000,
0x160, 0x0000000b,
0x161, 0x0000000b,
0x162, 0x0000000b,
0x163, 0x0000000b,
0x164, 0x0000000b,
0x165, 0x0000000b,
0x166, 0x0000000b,
0x167, 0x0000000b,
0x168, 0x0000000b,
0x169, 0x0000000b,
0x16a, 0x0000000b,
0x16b, 0x0000000b,
0x16c, 0x0000000b,
0x16d, 0x0000000b,
0x16e, 0x0000000b,
0x16f, 0x0000000b,
0x170, 0x0000000b,
0x171, 0x0000000b,
0x172, 0x0000000b,
0x173, 0x0000000b,
0x174, 0x0000000b,
0x175, 0x0000000b,
0x176, 0x0000000b,
0x177, 0x0000000b,
0x178, 0x0000000b,
0x179, 0x0000000b,
0x17a, 0x0000000b,
0x17b, 0x0000000b,
0x17c, 0x0000000b,
0x17d, 0x0000000b,
0x17e, 0x0000000b,
0x17f, 0x0000000b,
0x236, 0x0000000c,
0x503, 0x00000022,
0x560, 0x00000000,
};
u32 rtl8192seagctab_array[AGCTAB_ARRAYLENGTH] = {
0xc78, 0x7f000001,
0xc78, 0x7f010001,
0xc78, 0x7e020001,
0xc78, 0x7d030001,
0xc78, 0x7c040001,
0xc78, 0x7b050001,
0xc78, 0x7a060001,
0xc78, 0x79070001,
0xc78, 0x78080001,
0xc78, 0x77090001,
0xc78, 0x760a0001,
0xc78, 0x750b0001,
0xc78, 0x740c0001,
0xc78, 0x730d0001,
0xc78, 0x720e0001,
0xc78, 0x710f0001,
0xc78, 0x70100001,
0xc78, 0x6f110001,
0xc78, 0x6f120001,
0xc78, 0x6e130001,
0xc78, 0x6d140001,
0xc78, 0x6d150001,
0xc78, 0x6c160001,
0xc78, 0x6b170001,
0xc78, 0x6a180001,
0xc78, 0x6a190001,
0xc78, 0x691a0001,
0xc78, 0x681b0001,
0xc78, 0x671c0001,
0xc78, 0x661d0001,
0xc78, 0x651e0001,
0xc78, 0x641f0001,
0xc78, 0x63200001,
0xc78, 0x4c210001,
0xc78, 0x4b220001,
0xc78, 0x4a230001,
0xc78, 0x49240001,
0xc78, 0x48250001,
0xc78, 0x47260001,
0xc78, 0x46270001,
0xc78, 0x45280001,
0xc78, 0x44290001,
0xc78, 0x2c2a0001,
0xc78, 0x2b2b0001,
0xc78, 0x2a2c0001,
0xc78, 0x292d0001,
0xc78, 0x282e0001,
0xc78, 0x272f0001,
0xc78, 0x26300001,
0xc78, 0x25310001,
0xc78, 0x24320001,
0xc78, 0x23330001,
0xc78, 0x22340001,
0xc78, 0x09350001,
0xc78, 0x08360001,
0xc78, 0x07370001,
0xc78, 0x06380001,
0xc78, 0x05390001,
0xc78, 0x043a0001,
0xc78, 0x033b0001,
0xc78, 0x023c0001,
0xc78, 0x013d0001,
0xc78, 0x003e0001,
0xc78, 0x003f0001,
0xc78, 0x7f400001,
0xc78, 0x7f410001,
0xc78, 0x7e420001,
0xc78, 0x7d430001,
0xc78, 0x7c440001,
0xc78, 0x7b450001,
0xc78, 0x7a460001,
0xc78, 0x79470001,
0xc78, 0x78480001,
0xc78, 0x77490001,
0xc78, 0x764a0001,
0xc78, 0x754b0001,
0xc78, 0x744c0001,
0xc78, 0x734d0001,
0xc78, 0x724e0001,
0xc78, 0x714f0001,
0xc78, 0x70500001,
0xc78, 0x6f510001,
0xc78, 0x6f520001,
0xc78, 0x6e530001,
0xc78, 0x6d540001,
0xc78, 0x6d550001,
0xc78, 0x6c560001,
0xc78, 0x6b570001,
0xc78, 0x6a580001,
0xc78, 0x6a590001,
0xc78, 0x695a0001,
0xc78, 0x685b0001,
0xc78, 0x675c0001,
0xc78, 0x665d0001,
0xc78, 0x655e0001,
0xc78, 0x645f0001,
0xc78, 0x63600001,
0xc78, 0x4c610001,
0xc78, 0x4b620001,
0xc78, 0x4a630001,
0xc78, 0x49640001,
0xc78, 0x48650001,
0xc78, 0x47660001,
0xc78, 0x46670001,
0xc78, 0x45680001,
0xc78, 0x44690001,
0xc78, 0x2c6a0001,
0xc78, 0x2b6b0001,
0xc78, 0x2a6c0001,
0xc78, 0x296d0001,
0xc78, 0x286e0001,
0xc78, 0x276f0001,
0xc78, 0x26700001,
0xc78, 0x25710001,
0xc78, 0x24720001,
0xc78, 0x23730001,
0xc78, 0x22740001,
0xc78, 0x09750001,
0xc78, 0x08760001,
0xc78, 0x07770001,
0xc78, 0x06780001,
0xc78, 0x05790001,
0xc78, 0x047a0001,
0xc78, 0x037b0001,
0xc78, 0x027c0001,
0xc78, 0x017d0001,
0xc78, 0x007e0001,
0xc78, 0x007f0001,
0xc78, 0x3000001e,
0xc78, 0x3001001e,
0xc78, 0x3002001e,
0xc78, 0x3003001e,
0xc78, 0x3004001e,
0xc78, 0x3405001e,
0xc78, 0x3806001e,
0xc78, 0x3e07001e,
0xc78, 0x3e08001e,
0xc78, 0x4409001e,
0xc78, 0x460a001e,
0xc78, 0x480b001e,
0xc78, 0x480c001e,
0xc78, 0x4e0d001e,
0xc78, 0x560e001e,
0xc78, 0x5a0f001e,
0xc78, 0x5e10001e,
0xc78, 0x6211001e,
0xc78, 0x6c12001e,
0xc78, 0x7213001e,
0xc78, 0x7214001e,
0xc78, 0x7215001e,
0xc78, 0x7216001e,
0xc78, 0x7217001e,
0xc78, 0x7218001e,
0xc78, 0x7219001e,
0xc78, 0x721a001e,
0xc78, 0x721b001e,
0xc78, 0x721c001e,
0xc78, 0x721d001e,
0xc78, 0x721e001e,
0xc78, 0x721f001e,
};
| gpl-2.0 |
ajopanoor/hydra | drivers/atm/uPD98402.c | 10112 | 6616 | /* drivers/atm/uPD98402.c - NEC uPD98402 (PHY) declarations */
/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/atmdev.h>
#include <linux/sonet.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include <linux/atomic.h>
#include "uPD98402.h"
#if 0
#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
#else
#define DPRINTK(format,args...)
#endif
struct uPD98402_priv {
struct k_sonet_stats sonet_stats;/* link diagnostics */
unsigned char framing; /* SONET/SDH framing */
int loop_mode; /* loopback mode */
spinlock_t lock;
};
#define PRIV(dev) ((struct uPD98402_priv *) dev->phy_data)
#define PUT(val,reg) dev->ops->phy_put(dev,val,uPD98402_##reg)
#define GET(reg) dev->ops->phy_get(dev,uPD98402_##reg)
static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int zero)
{
struct sonet_stats tmp;
int error = 0;
atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
if (zero && !error) {
/* unused fields are reported as -1, but we must not "adjust"
them */
tmp.corr_hcs = tmp.tx_cells = tmp.rx_cells = 0;
sonet_subtract_stats(&PRIV(dev)->sonet_stats,&tmp);
}
return error ? -EFAULT : 0;
}
static int set_framing(struct atm_dev *dev,unsigned char framing)
{
static const unsigned char sonet[] = { 1,2,3,0 };
static const unsigned char sdh[] = { 1,0,0,2 };
const char *set;
unsigned long flags;
switch (framing) {
case SONET_FRAME_SONET:
set = sonet;
break;
case SONET_FRAME_SDH:
set = sdh;
break;
default:
return -EINVAL;
}
spin_lock_irqsave(&PRIV(dev)->lock, flags);
PUT(set[0],C11T);
PUT(set[1],C12T);
PUT(set[2],C13T);
PUT((GET(MDR) & ~uPD98402_MDR_SS_MASK) | (set[3] <<
uPD98402_MDR_SS_SHIFT),MDR);
spin_unlock_irqrestore(&PRIV(dev)->lock, flags);
return 0;
}
static int get_sense(struct atm_dev *dev,u8 __user *arg)
{
unsigned long flags;
unsigned char s[3];
spin_lock_irqsave(&PRIV(dev)->lock, flags);
s[0] = GET(C11R);
s[1] = GET(C12R);
s[2] = GET(C13R);
spin_unlock_irqrestore(&PRIV(dev)->lock, flags);
return (put_user(s[0], arg) || put_user(s[1], arg+1) ||
put_user(s[2], arg+2) || put_user(0xff, arg+3) ||
put_user(0xff, arg+4) || put_user(0xff, arg+5)) ? -EFAULT : 0;
}
static int set_loopback(struct atm_dev *dev,int mode)
{
unsigned char mode_reg;
mode_reg = GET(MDR) & ~(uPD98402_MDR_TPLP | uPD98402_MDR_ALP |
uPD98402_MDR_RPLP);
switch (__ATM_LM_XTLOC(mode)) {
case __ATM_LM_NONE:
break;
case __ATM_LM_PHY:
mode_reg |= uPD98402_MDR_TPLP;
break;
case __ATM_LM_ATM:
mode_reg |= uPD98402_MDR_ALP;
break;
default:
return -EINVAL;
}
switch (__ATM_LM_XTRMT(mode)) {
case __ATM_LM_NONE:
break;
case __ATM_LM_PHY:
mode_reg |= uPD98402_MDR_RPLP;
break;
default:
return -EINVAL;
}
PUT(mode_reg,MDR);
PRIV(dev)->loop_mode = mode;
return 0;
}
static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
{
switch (cmd) {
case SONET_GETSTATZ:
case SONET_GETSTAT:
return fetch_stats(dev,arg, cmd == SONET_GETSTATZ);
case SONET_SETFRAMING:
return set_framing(dev, (int)(unsigned long)arg);
case SONET_GETFRAMING:
return put_user(PRIV(dev)->framing,(int __user *)arg) ?
-EFAULT : 0;
case SONET_GETFRSENSE:
return get_sense(dev,arg);
case ATM_SETLOOP:
return set_loopback(dev, (int)(unsigned long)arg);
case ATM_GETLOOP:
return put_user(PRIV(dev)->loop_mode,(int __user *)arg) ?
-EFAULT : 0;
case ATM_QUERYLOOP:
return put_user(ATM_LM_LOC_PHY | ATM_LM_LOC_ATM |
ATM_LM_RMT_PHY,(int __user *)arg) ? -EFAULT : 0;
default:
return -ENOIOCTLCMD;
}
}
#define ADD_LIMITED(s,v) \
{ atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
static void stat_event(struct atm_dev *dev)
{
unsigned char events;
events = GET(PCR);
if (events & uPD98402_PFM_PFEB) ADD_LIMITED(path_febe,PFECB);
if (events & uPD98402_PFM_LFEB) ADD_LIMITED(line_febe,LECCT);
if (events & uPD98402_PFM_B3E) ADD_LIMITED(path_bip,B3ECT);
if (events & uPD98402_PFM_B2E) ADD_LIMITED(line_bip,B2ECT);
if (events & uPD98402_PFM_B1E) ADD_LIMITED(section_bip,B1ECT);
}
#undef ADD_LIMITED
static void uPD98402_int(struct atm_dev *dev)
{
static unsigned long silence = 0;
unsigned char reason;
while ((reason = GET(PICR))) {
if (reason & uPD98402_INT_LOS)
printk(KERN_NOTICE "%s(itf %d): signal lost\n",
dev->type,dev->number);
if (reason & uPD98402_INT_PFM) stat_event(dev);
if (reason & uPD98402_INT_PCO) {
(void) GET(PCOCR); /* clear interrupt cause */
atomic_add(GET(HECCT),
&PRIV(dev)->sonet_stats.uncorr_hcs);
}
if ((reason & uPD98402_INT_RFO) &&
(time_after(jiffies, silence) || silence == 0)) {
printk(KERN_WARNING "%s(itf %d): uPD98402 receive "
"FIFO overflow\n",dev->type,dev->number);
silence = (jiffies+HZ/2)|1;
}
}
}
static int uPD98402_start(struct atm_dev *dev)
{
DPRINTK("phy_start\n");
if (!(dev->dev_data = kmalloc(sizeof(struct uPD98402_priv),GFP_KERNEL)))
return -ENOMEM;
spin_lock_init(&PRIV(dev)->lock);
memset(&PRIV(dev)->sonet_stats,0,sizeof(struct k_sonet_stats));
(void) GET(PCR); /* clear performance events */
PUT(uPD98402_PFM_FJ,PCMR); /* ignore frequency adj */
(void) GET(PCOCR); /* clear overflows */
PUT(~uPD98402_PCO_HECC,PCOMR);
(void) GET(PICR); /* clear interrupts */
PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
uPD98402_INT_LOS),PIMR); /* enable them */
(void) fetch_stats(dev,NULL,1); /* clear kernel counters */
atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
return 0;
}
static int uPD98402_stop(struct atm_dev *dev)
{
/* let SAR driver worry about stopping interrupts */
kfree(PRIV(dev));
return 0;
}
static const struct atmphy_ops uPD98402_ops = {
.start = uPD98402_start,
.ioctl = uPD98402_ioctl,
.interrupt = uPD98402_int,
.stop = uPD98402_stop,
};
int uPD98402_init(struct atm_dev *dev)
{
DPRINTK("phy_init\n");
dev->phy = &uPD98402_ops;
return 0;
}
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(uPD98402_init);
static __init int uPD98402_module_init(void)
{
return 0;
}
module_init(uPD98402_module_init);
/* module_exit not defined so not unloadable */
| gpl-2.0 |
Diaob/z_bac_150827_android_kernel_oneplus_msm8994 | drivers/atm/uPD98402.c | 10112 | 6616 | /* drivers/atm/uPD98402.c - NEC uPD98402 (PHY) declarations */
/* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/atmdev.h>
#include <linux/sonet.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include <linux/atomic.h>
#include "uPD98402.h"
#if 0
#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
#else
#define DPRINTK(format,args...)
#endif
struct uPD98402_priv {
struct k_sonet_stats sonet_stats;/* link diagnostics */
unsigned char framing; /* SONET/SDH framing */
int loop_mode; /* loopback mode */
spinlock_t lock;
};
#define PRIV(dev) ((struct uPD98402_priv *) dev->phy_data)
#define PUT(val,reg) dev->ops->phy_put(dev,val,uPD98402_##reg)
#define GET(reg) dev->ops->phy_get(dev,uPD98402_##reg)
static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int zero)
{
struct sonet_stats tmp;
int error = 0;
atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
if (zero && !error) {
/* unused fields are reported as -1, but we must not "adjust"
them */
tmp.corr_hcs = tmp.tx_cells = tmp.rx_cells = 0;
sonet_subtract_stats(&PRIV(dev)->sonet_stats,&tmp);
}
return error ? -EFAULT : 0;
}
static int set_framing(struct atm_dev *dev,unsigned char framing)
{
static const unsigned char sonet[] = { 1,2,3,0 };
static const unsigned char sdh[] = { 1,0,0,2 };
const char *set;
unsigned long flags;
switch (framing) {
case SONET_FRAME_SONET:
set = sonet;
break;
case SONET_FRAME_SDH:
set = sdh;
break;
default:
return -EINVAL;
}
spin_lock_irqsave(&PRIV(dev)->lock, flags);
PUT(set[0],C11T);
PUT(set[1],C12T);
PUT(set[2],C13T);
PUT((GET(MDR) & ~uPD98402_MDR_SS_MASK) | (set[3] <<
uPD98402_MDR_SS_SHIFT),MDR);
spin_unlock_irqrestore(&PRIV(dev)->lock, flags);
return 0;
}
static int get_sense(struct atm_dev *dev,u8 __user *arg)
{
unsigned long flags;
unsigned char s[3];
spin_lock_irqsave(&PRIV(dev)->lock, flags);
s[0] = GET(C11R);
s[1] = GET(C12R);
s[2] = GET(C13R);
spin_unlock_irqrestore(&PRIV(dev)->lock, flags);
return (put_user(s[0], arg) || put_user(s[1], arg+1) ||
put_user(s[2], arg+2) || put_user(0xff, arg+3) ||
put_user(0xff, arg+4) || put_user(0xff, arg+5)) ? -EFAULT : 0;
}
static int set_loopback(struct atm_dev *dev,int mode)
{
unsigned char mode_reg;
mode_reg = GET(MDR) & ~(uPD98402_MDR_TPLP | uPD98402_MDR_ALP |
uPD98402_MDR_RPLP);
switch (__ATM_LM_XTLOC(mode)) {
case __ATM_LM_NONE:
break;
case __ATM_LM_PHY:
mode_reg |= uPD98402_MDR_TPLP;
break;
case __ATM_LM_ATM:
mode_reg |= uPD98402_MDR_ALP;
break;
default:
return -EINVAL;
}
switch (__ATM_LM_XTRMT(mode)) {
case __ATM_LM_NONE:
break;
case __ATM_LM_PHY:
mode_reg |= uPD98402_MDR_RPLP;
break;
default:
return -EINVAL;
}
PUT(mode_reg,MDR);
PRIV(dev)->loop_mode = mode;
return 0;
}
static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
{
switch (cmd) {
case SONET_GETSTATZ:
case SONET_GETSTAT:
return fetch_stats(dev,arg, cmd == SONET_GETSTATZ);
case SONET_SETFRAMING:
return set_framing(dev, (int)(unsigned long)arg);
case SONET_GETFRAMING:
return put_user(PRIV(dev)->framing,(int __user *)arg) ?
-EFAULT : 0;
case SONET_GETFRSENSE:
return get_sense(dev,arg);
case ATM_SETLOOP:
return set_loopback(dev, (int)(unsigned long)arg);
case ATM_GETLOOP:
return put_user(PRIV(dev)->loop_mode,(int __user *)arg) ?
-EFAULT : 0;
case ATM_QUERYLOOP:
return put_user(ATM_LM_LOC_PHY | ATM_LM_LOC_ATM |
ATM_LM_RMT_PHY,(int __user *)arg) ? -EFAULT : 0;
default:
return -ENOIOCTLCMD;
}
}
#define ADD_LIMITED(s,v) \
{ atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
static void stat_event(struct atm_dev *dev)
{
unsigned char events;
events = GET(PCR);
if (events & uPD98402_PFM_PFEB) ADD_LIMITED(path_febe,PFECB);
if (events & uPD98402_PFM_LFEB) ADD_LIMITED(line_febe,LECCT);
if (events & uPD98402_PFM_B3E) ADD_LIMITED(path_bip,B3ECT);
if (events & uPD98402_PFM_B2E) ADD_LIMITED(line_bip,B2ECT);
if (events & uPD98402_PFM_B1E) ADD_LIMITED(section_bip,B1ECT);
}
#undef ADD_LIMITED
static void uPD98402_int(struct atm_dev *dev)
{
static unsigned long silence = 0;
unsigned char reason;
while ((reason = GET(PICR))) {
if (reason & uPD98402_INT_LOS)
printk(KERN_NOTICE "%s(itf %d): signal lost\n",
dev->type,dev->number);
if (reason & uPD98402_INT_PFM) stat_event(dev);
if (reason & uPD98402_INT_PCO) {
(void) GET(PCOCR); /* clear interrupt cause */
atomic_add(GET(HECCT),
&PRIV(dev)->sonet_stats.uncorr_hcs);
}
if ((reason & uPD98402_INT_RFO) &&
(time_after(jiffies, silence) || silence == 0)) {
printk(KERN_WARNING "%s(itf %d): uPD98402 receive "
"FIFO overflow\n",dev->type,dev->number);
silence = (jiffies+HZ/2)|1;
}
}
}
static int uPD98402_start(struct atm_dev *dev)
{
DPRINTK("phy_start\n");
if (!(dev->dev_data = kmalloc(sizeof(struct uPD98402_priv),GFP_KERNEL)))
return -ENOMEM;
spin_lock_init(&PRIV(dev)->lock);
memset(&PRIV(dev)->sonet_stats,0,sizeof(struct k_sonet_stats));
(void) GET(PCR); /* clear performance events */
PUT(uPD98402_PFM_FJ,PCMR); /* ignore frequency adj */
(void) GET(PCOCR); /* clear overflows */
PUT(~uPD98402_PCO_HECC,PCOMR);
(void) GET(PICR); /* clear interrupts */
PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
uPD98402_INT_LOS),PIMR); /* enable them */
(void) fetch_stats(dev,NULL,1); /* clear kernel counters */
atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
return 0;
}
static int uPD98402_stop(struct atm_dev *dev)
{
/* let SAR driver worry about stopping interrupts */
kfree(PRIV(dev));
return 0;
}
static const struct atmphy_ops uPD98402_ops = {
.start = uPD98402_start,
.ioctl = uPD98402_ioctl,
.interrupt = uPD98402_int,
.stop = uPD98402_stop,
};
int uPD98402_init(struct atm_dev *dev)
{
DPRINTK("phy_init\n");
dev->phy = &uPD98402_ops;
return 0;
}
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(uPD98402_init);
static __init int uPD98402_module_init(void)
{
return 0;
}
module_init(uPD98402_module_init);
/* module_exit not defined so not unloadable */
| gpl-2.0 |
cipriancraciun/linux | arch/sh/kernel/ftrace.c | 11136 | 10865 | /*
* Copyright (C) 2008 Matt Fleming <matt@console-pimps.org>
* Copyright (C) 2008 Paul Mundt <lethal@linux-sh.org>
*
* Code for replacing ftrace calls with jumps.
*
* Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
*
* Thanks goes to Ingo Molnar, for suggesting the idea.
* Mathieu Desnoyers, for suggesting postponing the modifications.
* Arjan van de Ven, for keeping me straight, and explaining to me
* the dangers of modifying code on the run.
*/
#include <linux/uaccess.h>
#include <linux/ftrace.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <asm/ftrace.h>
#include <asm/cacheflush.h>
#include <asm/unistd.h>
#include <trace/syscall.h>
#ifdef CONFIG_DYNAMIC_FTRACE
static unsigned char ftrace_replaced_code[MCOUNT_INSN_SIZE];
static unsigned char ftrace_nop[4];
/*
* If we're trying to nop out a call to a function, we instead
* place a call to the address after the memory table.
*
* 8c011060 <a>:
* 8c011060: 02 d1 mov.l 8c01106c <a+0xc>,r1
* 8c011062: 22 4f sts.l pr,@-r15
* 8c011064: 02 c7 mova 8c011070 <a+0x10>,r0
* 8c011066: 2b 41 jmp @r1
* 8c011068: 2a 40 lds r0,pr
* 8c01106a: 09 00 nop
* 8c01106c: 68 24 .word 0x2468 <--- ip
* 8c01106e: 1d 8c .word 0x8c1d
* 8c011070: 26 4f lds.l @r15+,pr <--- ip + MCOUNT_INSN_SIZE
*
* We write 0x8c011070 to 0x8c01106c so that on entry to a() we branch
* past the _mcount call and continue executing code like normal.
*/
static unsigned char *ftrace_nop_replace(unsigned long ip)
{
__raw_writel(ip + MCOUNT_INSN_SIZE, ftrace_nop);
return ftrace_nop;
}
static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
{
/* Place the address in the memory table. */
__raw_writel(addr, ftrace_replaced_code);
/*
* No locking needed, this must be called via kstop_machine
* which in essence is like running on a uniprocessor machine.
*/
return ftrace_replaced_code;
}
/*
* Modifying code must take extra care. On an SMP machine, if
* the code being modified is also being executed on another CPU
* that CPU will have undefined results and possibly take a GPF.
* We use kstop_machine to stop other CPUS from exectuing code.
* But this does not stop NMIs from happening. We still need
* to protect against that. We separate out the modification of
* the code to take care of this.
*
* Two buffers are added: An IP buffer and a "code" buffer.
*
* 1) Put the instruction pointer into the IP buffer
* and the new code into the "code" buffer.
* 2) Wait for any running NMIs to finish and set a flag that says
* we are modifying code, it is done in an atomic operation.
* 3) Write the code
* 4) clear the flag.
* 5) Wait for any running NMIs to finish.
*
* If an NMI is executed, the first thing it does is to call
* "ftrace_nmi_enter". This will check if the flag is set to write
* and if it is, it will write what is in the IP and "code" buffers.
*
* The trick is, it does not matter if everyone is writing the same
* content to the code location. Also, if a CPU is executing code
* it is OK to write to that code location if the contents being written
* are the same as what exists.
*/
#define MOD_CODE_WRITE_FLAG (1 << 31) /* set when NMI should do the write */
static atomic_t nmi_running = ATOMIC_INIT(0);
static int mod_code_status; /* holds return value of text write */
static void *mod_code_ip; /* holds the IP to write to */
static void *mod_code_newcode; /* holds the text to write to the IP */
static unsigned nmi_wait_count;
static atomic_t nmi_update_count = ATOMIC_INIT(0);
int ftrace_arch_read_dyn_info(char *buf, int size)
{
int r;
r = snprintf(buf, size, "%u %u",
nmi_wait_count,
atomic_read(&nmi_update_count));
return r;
}
static void clear_mod_flag(void)
{
int old = atomic_read(&nmi_running);
for (;;) {
int new = old & ~MOD_CODE_WRITE_FLAG;
if (old == new)
break;
old = atomic_cmpxchg(&nmi_running, old, new);
}
}
static void ftrace_mod_code(void)
{
/*
* Yes, more than one CPU process can be writing to mod_code_status.
* (and the code itself)
* But if one were to fail, then they all should, and if one were
* to succeed, then they all should.
*/
mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode,
MCOUNT_INSN_SIZE);
/* if we fail, then kill any new writers */
if (mod_code_status)
clear_mod_flag();
}
void ftrace_nmi_enter(void)
{
if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
smp_rmb();
ftrace_mod_code();
atomic_inc(&nmi_update_count);
}
/* Must have previous changes seen before executions */
smp_mb();
}
void ftrace_nmi_exit(void)
{
/* Finish all executions before clearing nmi_running */
smp_mb();
atomic_dec(&nmi_running);
}
static void wait_for_nmi_and_set_mod_flag(void)
{
if (!atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG))
return;
do {
cpu_relax();
} while (atomic_cmpxchg(&nmi_running, 0, MOD_CODE_WRITE_FLAG));
nmi_wait_count++;
}
static void wait_for_nmi(void)
{
if (!atomic_read(&nmi_running))
return;
do {
cpu_relax();
} while (atomic_read(&nmi_running));
nmi_wait_count++;
}
static int
do_ftrace_mod_code(unsigned long ip, void *new_code)
{
mod_code_ip = (void *)ip;
mod_code_newcode = new_code;
/* The buffers need to be visible before we let NMIs write them */
smp_mb();
wait_for_nmi_and_set_mod_flag();
/* Make sure all running NMIs have finished before we write the code */
smp_mb();
ftrace_mod_code();
/* Make sure the write happens before clearing the bit */
smp_mb();
clear_mod_flag();
wait_for_nmi();
return mod_code_status;
}
static int ftrace_modify_code(unsigned long ip, unsigned char *old_code,
unsigned char *new_code)
{
unsigned char replaced[MCOUNT_INSN_SIZE];
/*
* Note: Due to modules and __init, code can
* disappear and change, we need to protect against faulting
* as well as code changing. We do this by using the
* probe_kernel_* functions.
*
* No real locking needed, this code is run through
* kstop_machine, or before SMP starts.
*/
/* read the text we want to modify */
if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
return -EFAULT;
/* Make sure it is what we expect it to be */
if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
return -EINVAL;
/* replace the text with the new text */
if (do_ftrace_mod_code(ip, new_code))
return -EPERM;
flush_icache_range(ip, ip + MCOUNT_INSN_SIZE);
return 0;
}
int ftrace_update_ftrace_func(ftrace_func_t func)
{
unsigned long ip = (unsigned long)(&ftrace_call) + MCOUNT_INSN_OFFSET;
unsigned char old[MCOUNT_INSN_SIZE], *new;
memcpy(old, (unsigned char *)ip, MCOUNT_INSN_SIZE);
new = ftrace_call_replace(ip, (unsigned long)func);
return ftrace_modify_code(ip, old, new);
}
int ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
{
unsigned char *new, *old;
unsigned long ip = rec->ip;
old = ftrace_call_replace(ip, addr);
new = ftrace_nop_replace(ip);
return ftrace_modify_code(rec->ip, old, new);
}
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned char *new, *old;
unsigned long ip = rec->ip;
old = ftrace_nop_replace(ip);
new = ftrace_call_replace(ip, addr);
return ftrace_modify_code(rec->ip, old, new);
}
int __init ftrace_dyn_arch_init(void *data)
{
/* The return code is retured via data */
__raw_writel(0, (unsigned long)data);
return 0;
}
#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#ifdef CONFIG_DYNAMIC_FTRACE
extern void ftrace_graph_call(void);
static int ftrace_mod(unsigned long ip, unsigned long old_addr,
unsigned long new_addr)
{
unsigned char code[MCOUNT_INSN_SIZE];
if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
return -EFAULT;
if (old_addr != __raw_readl((unsigned long *)code))
return -EINVAL;
__raw_writel(new_addr, ip);
return 0;
}
int ftrace_enable_ftrace_graph_caller(void)
{
unsigned long ip, old_addr, new_addr;
ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET;
old_addr = (unsigned long)(&skip_trace);
new_addr = (unsigned long)(&ftrace_graph_caller);
return ftrace_mod(ip, old_addr, new_addr);
}
int ftrace_disable_ftrace_graph_caller(void)
{
unsigned long ip, old_addr, new_addr;
ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET;
old_addr = (unsigned long)(&ftrace_graph_caller);
new_addr = (unsigned long)(&skip_trace);
return ftrace_mod(ip, old_addr, new_addr);
}
#endif /* CONFIG_DYNAMIC_FTRACE */
/*
* Hook the return address and push it in the stack of return addrs
* in the current thread info.
*
* This is the main routine for the function graph tracer. The function
* graph tracer essentially works like this:
*
* parent is the stack address containing self_addr's return address.
* We pull the real return address out of parent and store it in
* current's ret_stack. Then, we replace the return address on the stack
* with the address of return_to_handler. self_addr is the function that
* called mcount.
*
* When self_addr returns, it will jump to return_to_handler which calls
* ftrace_return_to_handler. ftrace_return_to_handler will pull the real
* return address off of current's ret_stack and jump to it.
*/
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
{
unsigned long old;
int faulted, err;
struct ftrace_graph_ent trace;
unsigned long return_hooker = (unsigned long)&return_to_handler;
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
return;
/*
* Protect against fault, even if it shouldn't
* happen. This tool is too much intrusive to
* ignore such a protection.
*/
__asm__ __volatile__(
"1: \n\t"
"mov.l @%2, %0 \n\t"
"2: \n\t"
"mov.l %3, @%2 \n\t"
"mov #0, %1 \n\t"
"3: \n\t"
".section .fixup, \"ax\" \n\t"
"4: \n\t"
"mov.l 5f, %0 \n\t"
"jmp @%0 \n\t"
" mov #1, %1 \n\t"
".balign 4 \n\t"
"5: .long 3b \n\t"
".previous \n\t"
".section __ex_table,\"a\" \n\t"
".long 1b, 4b \n\t"
".long 2b, 4b \n\t"
".previous \n\t"
: "=&r" (old), "=r" (faulted)
: "r" (parent), "r" (return_hooker)
);
if (unlikely(faulted)) {
ftrace_graph_stop();
WARN_ON(1);
return;
}
err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0);
if (err == -EBUSY) {
__raw_writel(old, parent);
return;
}
trace.func = self_addr;
/* Only trace if the calling function expects to */
if (!ftrace_graph_entry(&trace)) {
current->curr_ret_stack--;
__raw_writel(old, parent);
}
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
| gpl-2.0 |
jeboo/kernel_KK_i337_ATT_NB1 | drivers/zorro/zorro-sysfs.c | 11648 | 3155 | /*
* File Attributes for Zorro Devices
*
* Copyright (C) 2003 Geert Uytterhoeven
*
* Loosely based on drivers/pci/pci-sysfs.c
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
#include <linux/kernel.h>
#include <linux/zorro.h>
#include <linux/stat.h>
#include <linux/string.h>
#include "zorro.h"
/* show configuration fields */
#define zorro_config_attr(name, field, format_string) \
static ssize_t \
show_##name(struct device *dev, struct device_attribute *attr, char *buf) \
{ \
struct zorro_dev *z; \
\
z = to_zorro_dev(dev); \
return sprintf(buf, format_string, z->field); \
} \
static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL);
zorro_config_attr(id, id, "0x%08x\n");
zorro_config_attr(type, rom.er_Type, "0x%02x\n");
zorro_config_attr(serial, rom.er_SerialNumber, "0x%08x\n");
zorro_config_attr(slotaddr, slotaddr, "0x%04x\n");
zorro_config_attr(slotsize, slotsize, "0x%04x\n");
static ssize_t zorro_show_resource(struct device *dev, struct device_attribute *attr, char *buf)
{
struct zorro_dev *z = to_zorro_dev(dev);
return sprintf(buf, "0x%08lx 0x%08lx 0x%08lx\n",
(unsigned long)zorro_resource_start(z),
(unsigned long)zorro_resource_end(z),
zorro_resource_flags(z));
}
static DEVICE_ATTR(resource, S_IRUGO, zorro_show_resource, NULL);
static ssize_t zorro_read_config(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct zorro_dev *z = to_zorro_dev(container_of(kobj, struct device,
kobj));
struct ConfigDev cd;
/* Construct a ConfigDev */
memset(&cd, 0, sizeof(cd));
cd.cd_Rom = z->rom;
cd.cd_SlotAddr = z->slotaddr;
cd.cd_SlotSize = z->slotsize;
cd.cd_BoardAddr = (void *)zorro_resource_start(z);
cd.cd_BoardSize = zorro_resource_len(z);
return memory_read_from_buffer(buf, count, &off, &cd, sizeof(cd));
}
static struct bin_attribute zorro_config_attr = {
.attr = {
.name = "config",
.mode = S_IRUGO,
},
.size = sizeof(struct ConfigDev),
.read = zorro_read_config,
};
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct zorro_dev *z = to_zorro_dev(dev);
return sprintf(buf, ZORRO_DEVICE_MODALIAS_FMT "\n", z->id);
}
static DEVICE_ATTR(modalias, S_IRUGO, modalias_show, NULL);
int zorro_create_sysfs_dev_files(struct zorro_dev *z)
{
struct device *dev = &z->dev;
int error;
/* current configuration's attributes */
if ((error = device_create_file(dev, &dev_attr_id)) ||
(error = device_create_file(dev, &dev_attr_type)) ||
(error = device_create_file(dev, &dev_attr_serial)) ||
(error = device_create_file(dev, &dev_attr_slotaddr)) ||
(error = device_create_file(dev, &dev_attr_slotsize)) ||
(error = device_create_file(dev, &dev_attr_resource)) ||
(error = device_create_file(dev, &dev_attr_modalias)) ||
(error = sysfs_create_bin_file(&dev->kobj, &zorro_config_attr)))
return error;
return 0;
}
| gpl-2.0 |
SlimSaber/kernel_samsung_smdk4412 | fs/proc/interrupts.c | 12160 | 1092 | #include <linux/fs.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irqnr.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
/*
* /proc/interrupts
*/
static void *int_seq_start(struct seq_file *f, loff_t *pos)
{
return (*pos <= nr_irqs) ? pos : NULL;
}
static void *int_seq_next(struct seq_file *f, void *v, loff_t *pos)
{
(*pos)++;
if (*pos > nr_irqs)
return NULL;
return pos;
}
static void int_seq_stop(struct seq_file *f, void *v)
{
/* Nothing to do */
}
static const struct seq_operations int_seq_ops = {
.start = int_seq_start,
.next = int_seq_next,
.stop = int_seq_stop,
.show = show_interrupts
};
static int interrupts_open(struct inode *inode, struct file *filp)
{
return seq_open(filp, &int_seq_ops);
}
static const struct file_operations proc_interrupts_operations = {
.open = interrupts_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static int __init proc_interrupts_init(void)
{
proc_create("interrupts", 0, NULL, &proc_interrupts_operations);
return 0;
}
module_init(proc_interrupts_init);
| gpl-2.0 |
TeamLGOG/lge-kernel-gee | drivers/macintosh/ams/ams-pmu.c | 14208 | 4492 | /*
* Apple Motion Sensor driver (PMU variant)
*
* Copyright (C) 2006 Michael Hanselmann (linux-kernel@hansmi.ch)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/adb.h>
#include <linux/pmu.h>
#include "ams.h"
/* Attitude */
#define AMS_X 0x00
#define AMS_Y 0x01
#define AMS_Z 0x02
/* Not exactly known, maybe chip vendor */
#define AMS_VENDOR 0x03
/* Freefall registers */
#define AMS_FF_CLEAR 0x04
#define AMS_FF_ENABLE 0x05
#define AMS_FF_LOW_LIMIT 0x06
#define AMS_FF_DEBOUNCE 0x07
/* Shock registers */
#define AMS_SHOCK_CLEAR 0x08
#define AMS_SHOCK_ENABLE 0x09
#define AMS_SHOCK_HIGH_LIMIT 0x0a
#define AMS_SHOCK_DEBOUNCE 0x0b
/* Global interrupt and power control register */
#define AMS_CONTROL 0x0c
static u8 ams_pmu_cmd;
static void ams_pmu_req_complete(struct adb_request *req)
{
complete((struct completion *)req->arg);
}
/* Only call this function from task context */
static void ams_pmu_set_register(u8 reg, u8 value)
{
static struct adb_request req;
DECLARE_COMPLETION(req_complete);
req.arg = &req_complete;
if (pmu_request(&req, ams_pmu_req_complete, 4, ams_pmu_cmd, 0x00, reg, value))
return;
wait_for_completion(&req_complete);
}
/* Only call this function from task context */
static u8 ams_pmu_get_register(u8 reg)
{
static struct adb_request req;
DECLARE_COMPLETION(req_complete);
req.arg = &req_complete;
if (pmu_request(&req, ams_pmu_req_complete, 3, ams_pmu_cmd, 0x01, reg))
return 0;
wait_for_completion(&req_complete);
if (req.reply_len > 0)
return req.reply[0];
else
return 0;
}
/* Enables or disables the specified interrupts */
static void ams_pmu_set_irq(enum ams_irq reg, char enable)
{
if (reg & AMS_IRQ_FREEFALL) {
u8 val = ams_pmu_get_register(AMS_FF_ENABLE);
if (enable)
val |= 0x80;
else
val &= ~0x80;
ams_pmu_set_register(AMS_FF_ENABLE, val);
}
if (reg & AMS_IRQ_SHOCK) {
u8 val = ams_pmu_get_register(AMS_SHOCK_ENABLE);
if (enable)
val |= 0x80;
else
val &= ~0x80;
ams_pmu_set_register(AMS_SHOCK_ENABLE, val);
}
if (reg & AMS_IRQ_GLOBAL) {
u8 val = ams_pmu_get_register(AMS_CONTROL);
if (enable)
val |= 0x80;
else
val &= ~0x80;
ams_pmu_set_register(AMS_CONTROL, val);
}
}
static void ams_pmu_clear_irq(enum ams_irq reg)
{
if (reg & AMS_IRQ_FREEFALL)
ams_pmu_set_register(AMS_FF_CLEAR, 0x00);
if (reg & AMS_IRQ_SHOCK)
ams_pmu_set_register(AMS_SHOCK_CLEAR, 0x00);
}
static u8 ams_pmu_get_vendor(void)
{
return ams_pmu_get_register(AMS_VENDOR);
}
static void ams_pmu_get_xyz(s8 *x, s8 *y, s8 *z)
{
*x = ams_pmu_get_register(AMS_X);
*y = ams_pmu_get_register(AMS_Y);
*z = ams_pmu_get_register(AMS_Z);
}
static void ams_pmu_exit(void)
{
ams_sensor_detach();
/* Disable interrupts */
ams_pmu_set_irq(AMS_IRQ_ALL, 0);
/* Clear interrupts */
ams_pmu_clear_irq(AMS_IRQ_ALL);
ams_info.has_device = 0;
printk(KERN_INFO "ams: Unloading\n");
}
int __init ams_pmu_init(struct device_node *np)
{
const u32 *prop;
int result;
/* Set implementation stuff */
ams_info.of_node = np;
ams_info.exit = ams_pmu_exit;
ams_info.get_vendor = ams_pmu_get_vendor;
ams_info.get_xyz = ams_pmu_get_xyz;
ams_info.clear_irq = ams_pmu_clear_irq;
ams_info.bustype = BUS_HOST;
/* Get PMU command, should be 0x4e, but we can never know */
prop = of_get_property(ams_info.of_node, "reg", NULL);
if (!prop)
return -ENODEV;
ams_pmu_cmd = ((*prop) >> 8) & 0xff;
/* Disable interrupts */
ams_pmu_set_irq(AMS_IRQ_ALL, 0);
/* Clear interrupts */
ams_pmu_clear_irq(AMS_IRQ_ALL);
result = ams_sensor_attach();
if (result < 0)
return result;
/* Set default values */
ams_pmu_set_register(AMS_FF_LOW_LIMIT, 0x15);
ams_pmu_set_register(AMS_FF_ENABLE, 0x08);
ams_pmu_set_register(AMS_FF_DEBOUNCE, 0x14);
ams_pmu_set_register(AMS_SHOCK_HIGH_LIMIT, 0x60);
ams_pmu_set_register(AMS_SHOCK_ENABLE, 0x0f);
ams_pmu_set_register(AMS_SHOCK_DEBOUNCE, 0x14);
ams_pmu_set_register(AMS_CONTROL, 0x4f);
/* Clear interrupts */
ams_pmu_clear_irq(AMS_IRQ_ALL);
ams_info.has_device = 1;
/* Enable interrupts */
ams_pmu_set_irq(AMS_IRQ_ALL, 1);
printk(KERN_INFO "ams: Found PMU based motion sensor\n");
return 0;
}
| gpl-2.0 |
atniptw/PonyBuntu | drivers/media/common/tuners/mxl5007t.c | 129 | 20812 | /*
* mxl5007t.c - driver for the MaxLinear MxL5007T silicon tuner
*
* Copyright (C) 2008, 2009 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/i2c.h>
#include <linux/types.h>
#include <linux/videodev2.h>
#include "tuner-i2c.h"
#include "mxl5007t.h"
static DEFINE_MUTEX(mxl5007t_list_mutex);
static LIST_HEAD(hybrid_tuner_instance_list);
static int mxl5007t_debug;
module_param_named(debug, mxl5007t_debug, int, 0644);
MODULE_PARM_DESC(debug, "set debug level");
/* ------------------------------------------------------------------------- */
#define mxl_printk(kern, fmt, arg...) \
printk(kern "%s: " fmt "\n", __func__, ##arg)
#define mxl_err(fmt, arg...) \
mxl_printk(KERN_ERR, "%d: " fmt, __LINE__, ##arg)
#define mxl_warn(fmt, arg...) \
mxl_printk(KERN_WARNING, fmt, ##arg)
#define mxl_info(fmt, arg...) \
mxl_printk(KERN_INFO, fmt, ##arg)
#define mxl_debug(fmt, arg...) \
({ \
if (mxl5007t_debug) \
mxl_printk(KERN_DEBUG, fmt, ##arg); \
})
#define mxl_fail(ret) \
({ \
int __ret; \
__ret = (ret < 0); \
if (__ret) \
mxl_printk(KERN_ERR, "error %d on line %d", \
ret, __LINE__); \
__ret; \
})
/* ------------------------------------------------------------------------- */
#define MHz 1000000
enum mxl5007t_mode {
MxL_MODE_ISDBT = 0,
MxL_MODE_DVBT = 1,
MxL_MODE_ATSC = 2,
MxL_MODE_CABLE = 0x10,
};
enum mxl5007t_chip_version {
MxL_UNKNOWN_ID = 0x00,
MxL_5007_V1_F1 = 0x11,
MxL_5007_V1_F2 = 0x12,
MxL_5007_V4 = 0x14,
MxL_5007_V2_100_F1 = 0x21,
MxL_5007_V2_100_F2 = 0x22,
MxL_5007_V2_200_F1 = 0x23,
MxL_5007_V2_200_F2 = 0x24,
};
struct reg_pair_t {
u8 reg;
u8 val;
};
/* ------------------------------------------------------------------------- */
static struct reg_pair_t init_tab[] = {
{ 0x02, 0x06 },
{ 0x03, 0x48 },
{ 0x05, 0x04 },
{ 0x06, 0x10 },
{ 0x2e, 0x15 }, /* OVERRIDE */
{ 0x30, 0x10 }, /* OVERRIDE */
{ 0x45, 0x58 }, /* OVERRIDE */
{ 0x48, 0x19 }, /* OVERRIDE */
{ 0x52, 0x03 }, /* OVERRIDE */
{ 0x53, 0x44 }, /* OVERRIDE */
{ 0x6a, 0x4b }, /* OVERRIDE */
{ 0x76, 0x00 }, /* OVERRIDE */
{ 0x78, 0x18 }, /* OVERRIDE */
{ 0x7a, 0x17 }, /* OVERRIDE */
{ 0x85, 0x06 }, /* OVERRIDE */
{ 0x01, 0x01 }, /* TOP_MASTER_ENABLE */
{ 0, 0 }
};
static struct reg_pair_t init_tab_cable[] = {
{ 0x02, 0x06 },
{ 0x03, 0x48 },
{ 0x05, 0x04 },
{ 0x06, 0x10 },
{ 0x09, 0x3f },
{ 0x0a, 0x3f },
{ 0x0b, 0x3f },
{ 0x2e, 0x15 }, /* OVERRIDE */
{ 0x30, 0x10 }, /* OVERRIDE */
{ 0x45, 0x58 }, /* OVERRIDE */
{ 0x48, 0x19 }, /* OVERRIDE */
{ 0x52, 0x03 }, /* OVERRIDE */
{ 0x53, 0x44 }, /* OVERRIDE */
{ 0x6a, 0x4b }, /* OVERRIDE */
{ 0x76, 0x00 }, /* OVERRIDE */
{ 0x78, 0x18 }, /* OVERRIDE */
{ 0x7a, 0x17 }, /* OVERRIDE */
{ 0x85, 0x06 }, /* OVERRIDE */
{ 0x01, 0x01 }, /* TOP_MASTER_ENABLE */
{ 0, 0 }
};
/* ------------------------------------------------------------------------- */
static struct reg_pair_t reg_pair_rftune[] = {
{ 0x0f, 0x00 }, /* abort tune */
{ 0x0c, 0x15 },
{ 0x0d, 0x40 },
{ 0x0e, 0x0e },
{ 0x1f, 0x87 }, /* OVERRIDE */
{ 0x20, 0x1f }, /* OVERRIDE */
{ 0x21, 0x87 }, /* OVERRIDE */
{ 0x22, 0x1f }, /* OVERRIDE */
{ 0x80, 0x01 }, /* freq dependent */
{ 0x0f, 0x01 }, /* start tune */
{ 0, 0 }
};
/* ------------------------------------------------------------------------- */
struct mxl5007t_state {
struct list_head hybrid_tuner_instance_list;
struct tuner_i2c_props i2c_props;
struct mutex lock;
struct mxl5007t_config *config;
enum mxl5007t_chip_version chip_id;
struct reg_pair_t tab_init[ARRAY_SIZE(init_tab)];
struct reg_pair_t tab_init_cable[ARRAY_SIZE(init_tab_cable)];
struct reg_pair_t tab_rftune[ARRAY_SIZE(reg_pair_rftune)];
u32 frequency;
u32 bandwidth;
};
/* ------------------------------------------------------------------------- */
/* called by _init and _rftun to manipulate the register arrays */
static void set_reg_bits(struct reg_pair_t *reg_pair, u8 reg, u8 mask, u8 val)
{
unsigned int i = 0;
while (reg_pair[i].reg || reg_pair[i].val) {
if (reg_pair[i].reg == reg) {
reg_pair[i].val &= ~mask;
reg_pair[i].val |= val;
}
i++;
}
return;
}
static void copy_reg_bits(struct reg_pair_t *reg_pair1,
struct reg_pair_t *reg_pair2)
{
unsigned int i, j;
i = j = 0;
while (reg_pair1[i].reg || reg_pair1[i].val) {
while (reg_pair2[j].reg || reg_pair2[j].val) {
if (reg_pair1[i].reg != reg_pair2[j].reg) {
j++;
continue;
}
reg_pair2[j].val = reg_pair1[i].val;
break;
}
i++;
}
return;
}
/* ------------------------------------------------------------------------- */
static void mxl5007t_set_mode_bits(struct mxl5007t_state *state,
enum mxl5007t_mode mode,
s32 if_diff_out_level)
{
switch (mode) {
case MxL_MODE_ATSC:
set_reg_bits(state->tab_init, 0x06, 0x1f, 0x12);
break;
case MxL_MODE_DVBT:
set_reg_bits(state->tab_init, 0x06, 0x1f, 0x11);
break;
case MxL_MODE_ISDBT:
set_reg_bits(state->tab_init, 0x06, 0x1f, 0x10);
break;
case MxL_MODE_CABLE:
set_reg_bits(state->tab_init_cable, 0x09, 0xff, 0xc1);
set_reg_bits(state->tab_init_cable, 0x0a, 0xff,
8 - if_diff_out_level);
set_reg_bits(state->tab_init_cable, 0x0b, 0xff, 0x17);
break;
default:
mxl_fail(-EINVAL);
}
return;
}
static void mxl5007t_set_if_freq_bits(struct mxl5007t_state *state,
enum mxl5007t_if_freq if_freq,
int invert_if)
{
u8 val;
switch (if_freq) {
case MxL_IF_4_MHZ:
val = 0x00;
break;
case MxL_IF_4_5_MHZ:
val = 0x02;
break;
case MxL_IF_4_57_MHZ:
val = 0x03;
break;
case MxL_IF_5_MHZ:
val = 0x04;
break;
case MxL_IF_5_38_MHZ:
val = 0x05;
break;
case MxL_IF_6_MHZ:
val = 0x06;
break;
case MxL_IF_6_28_MHZ:
val = 0x07;
break;
case MxL_IF_9_1915_MHZ:
val = 0x08;
break;
case MxL_IF_35_25_MHZ:
val = 0x09;
break;
case MxL_IF_36_15_MHZ:
val = 0x0a;
break;
case MxL_IF_44_MHZ:
val = 0x0b;
break;
default:
mxl_fail(-EINVAL);
return;
}
set_reg_bits(state->tab_init, 0x02, 0x0f, val);
/* set inverted IF or normal IF */
set_reg_bits(state->tab_init, 0x02, 0x10, invert_if ? 0x10 : 0x00);
return;
}
static void mxl5007t_set_xtal_freq_bits(struct mxl5007t_state *state,
enum mxl5007t_xtal_freq xtal_freq)
{
switch (xtal_freq) {
case MxL_XTAL_16_MHZ:
/* select xtal freq & ref freq */
set_reg_bits(state->tab_init, 0x03, 0xf0, 0x00);
set_reg_bits(state->tab_init, 0x05, 0x0f, 0x00);
break;
case MxL_XTAL_20_MHZ:
set_reg_bits(state->tab_init, 0x03, 0xf0, 0x10);
set_reg_bits(state->tab_init, 0x05, 0x0f, 0x01);
break;
case MxL_XTAL_20_25_MHZ:
set_reg_bits(state->tab_init, 0x03, 0xf0, 0x20);
set_reg_bits(state->tab_init, 0x05, 0x0f, 0x02);
break;
case MxL_XTAL_20_48_MHZ:
set_reg_bits(state->tab_init, 0x03, 0xf0, 0x30);
set_reg_bits(state->tab_init, 0x05, 0x0f, 0x03);
break;
case MxL_XTAL_24_MHZ:
set_reg_bits(state->tab_init, 0x03, 0xf0, 0x40);
set_reg_bits(state->tab_init, 0x05, 0x0f, 0x04);
break;
case MxL_XTAL_25_MHZ:
set_reg_bits(state->tab_init, 0x03, 0xf0, 0x50);
set_reg_bits(state->tab_init, 0x05, 0x0f, 0x05);
break;
case MxL_XTAL_25_14_MHZ:
set_reg_bits(state->tab_init, 0x03, 0xf0, 0x60);
set_reg_bits(state->tab_init, 0x05, 0x0f, 0x06);
break;
case MxL_XTAL_27_MHZ:
set_reg_bits(state->tab_init, 0x03, 0xf0, 0x70);
set_reg_bits(state->tab_init, 0x05, 0x0f, 0x07);
break;
case MxL_XTAL_28_8_MHZ:
set_reg_bits(state->tab_init, 0x03, 0xf0, 0x80);
set_reg_bits(state->tab_init, 0x05, 0x0f, 0x08);
break;
case MxL_XTAL_32_MHZ:
set_reg_bits(state->tab_init, 0x03, 0xf0, 0x90);
set_reg_bits(state->tab_init, 0x05, 0x0f, 0x09);
break;
case MxL_XTAL_40_MHZ:
set_reg_bits(state->tab_init, 0x03, 0xf0, 0xa0);
set_reg_bits(state->tab_init, 0x05, 0x0f, 0x0a);
break;
case MxL_XTAL_44_MHZ:
set_reg_bits(state->tab_init, 0x03, 0xf0, 0xb0);
set_reg_bits(state->tab_init, 0x05, 0x0f, 0x0b);
break;
case MxL_XTAL_48_MHZ:
set_reg_bits(state->tab_init, 0x03, 0xf0, 0xc0);
set_reg_bits(state->tab_init, 0x05, 0x0f, 0x0c);
break;
case MxL_XTAL_49_3811_MHZ:
set_reg_bits(state->tab_init, 0x03, 0xf0, 0xd0);
set_reg_bits(state->tab_init, 0x05, 0x0f, 0x0d);
break;
default:
mxl_fail(-EINVAL);
return;
}
return;
}
static struct reg_pair_t *mxl5007t_calc_init_regs(struct mxl5007t_state *state,
enum mxl5007t_mode mode)
{
struct mxl5007t_config *cfg = state->config;
memcpy(&state->tab_init, &init_tab, sizeof(init_tab));
memcpy(&state->tab_init_cable, &init_tab_cable, sizeof(init_tab_cable));
mxl5007t_set_mode_bits(state, mode, cfg->if_diff_out_level);
mxl5007t_set_if_freq_bits(state, cfg->if_freq_hz, cfg->invert_if);
mxl5007t_set_xtal_freq_bits(state, cfg->xtal_freq_hz);
set_reg_bits(state->tab_init, 0x04, 0x01, cfg->loop_thru_enable);
set_reg_bits(state->tab_init, 0x03, 0x08, cfg->clk_out_enable << 3);
set_reg_bits(state->tab_init, 0x03, 0x07, cfg->clk_out_amp);
if (mode >= MxL_MODE_CABLE) {
copy_reg_bits(state->tab_init, state->tab_init_cable);
return state->tab_init_cable;
} else
return state->tab_init;
}
/* ------------------------------------------------------------------------- */
enum mxl5007t_bw_mhz {
MxL_BW_6MHz = 6,
MxL_BW_7MHz = 7,
MxL_BW_8MHz = 8,
};
static void mxl5007t_set_bw_bits(struct mxl5007t_state *state,
enum mxl5007t_bw_mhz bw)
{
u8 val;
switch (bw) {
case MxL_BW_6MHz:
val = 0x15; /* set DIG_MODEINDEX, DIG_MODEINDEX_A,
* and DIG_MODEINDEX_CSF */
break;
case MxL_BW_7MHz:
val = 0x2a;
break;
case MxL_BW_8MHz:
val = 0x3f;
break;
default:
mxl_fail(-EINVAL);
return;
}
set_reg_bits(state->tab_rftune, 0x0c, 0x3f, val);
return;
}
static struct
reg_pair_t *mxl5007t_calc_rf_tune_regs(struct mxl5007t_state *state,
u32 rf_freq, enum mxl5007t_bw_mhz bw)
{
u32 dig_rf_freq = 0;
u32 temp;
u32 frac_divider = 1000000;
unsigned int i;
memcpy(&state->tab_rftune, ®_pair_rftune, sizeof(reg_pair_rftune));
mxl5007t_set_bw_bits(state, bw);
/* Convert RF frequency into 16 bits =>
* 10 bit integer (MHz) + 6 bit fraction */
dig_rf_freq = rf_freq / MHz;
temp = rf_freq % MHz;
for (i = 0; i < 6; i++) {
dig_rf_freq <<= 1;
frac_divider /= 2;
if (temp > frac_divider) {
temp -= frac_divider;
dig_rf_freq++;
}
}
/* add to have shift center point by 7.8124 kHz */
if (temp > 7812)
dig_rf_freq++;
set_reg_bits(state->tab_rftune, 0x0d, 0xff, (u8) dig_rf_freq);
set_reg_bits(state->tab_rftune, 0x0e, 0xff, (u8) (dig_rf_freq >> 8));
if (rf_freq >= 333000000)
set_reg_bits(state->tab_rftune, 0x80, 0x40, 0x40);
return state->tab_rftune;
}
/* ------------------------------------------------------------------------- */
static int mxl5007t_write_reg(struct mxl5007t_state *state, u8 reg, u8 val)
{
u8 buf[] = { reg, val };
struct i2c_msg msg = { .addr = state->i2c_props.addr, .flags = 0,
.buf = buf, .len = 2 };
int ret;
ret = i2c_transfer(state->i2c_props.adap, &msg, 1);
if (ret != 1) {
mxl_err("failed!");
return -EREMOTEIO;
}
return 0;
}
static int mxl5007t_write_regs(struct mxl5007t_state *state,
struct reg_pair_t *reg_pair)
{
unsigned int i = 0;
int ret = 0;
while ((ret == 0) && (reg_pair[i].reg || reg_pair[i].val)) {
ret = mxl5007t_write_reg(state,
reg_pair[i].reg, reg_pair[i].val);
i++;
}
return ret;
}
static int mxl5007t_read_reg(struct mxl5007t_state *state, u8 reg, u8 *val)
{
u8 buf[2] = { 0xfb, reg };
struct i2c_msg msg[] = {
{ .addr = state->i2c_props.addr, .flags = 0,
.buf = buf, .len = 2 },
{ .addr = state->i2c_props.addr, .flags = I2C_M_RD,
.buf = val, .len = 1 },
};
int ret;
ret = i2c_transfer(state->i2c_props.adap, msg, 2);
if (ret != 2) {
mxl_err("failed!");
return -EREMOTEIO;
}
return 0;
}
static int mxl5007t_soft_reset(struct mxl5007t_state *state)
{
u8 d = 0xff;
struct i2c_msg msg = {
.addr = state->i2c_props.addr, .flags = 0,
.buf = &d, .len = 1
};
int ret = i2c_transfer(state->i2c_props.adap, &msg, 1);
if (ret != 1) {
mxl_err("failed!");
return -EREMOTEIO;
}
return 0;
}
static int mxl5007t_tuner_init(struct mxl5007t_state *state,
enum mxl5007t_mode mode)
{
struct reg_pair_t *init_regs;
int ret;
ret = mxl5007t_soft_reset(state);
if (mxl_fail(ret))
goto fail;
/* calculate initialization reg array */
init_regs = mxl5007t_calc_init_regs(state, mode);
ret = mxl5007t_write_regs(state, init_regs);
if (mxl_fail(ret))
goto fail;
mdelay(1);
fail:
return ret;
}
static int mxl5007t_tuner_rf_tune(struct mxl5007t_state *state, u32 rf_freq_hz,
enum mxl5007t_bw_mhz bw)
{
struct reg_pair_t *rf_tune_regs;
int ret;
/* calculate channel change reg array */
rf_tune_regs = mxl5007t_calc_rf_tune_regs(state, rf_freq_hz, bw);
ret = mxl5007t_write_regs(state, rf_tune_regs);
if (mxl_fail(ret))
goto fail;
msleep(3);
fail:
return ret;
}
/* ------------------------------------------------------------------------- */
static int mxl5007t_synth_lock_status(struct mxl5007t_state *state,
int *rf_locked, int *ref_locked)
{
u8 d;
int ret;
*rf_locked = 0;
*ref_locked = 0;
ret = mxl5007t_read_reg(state, 0xd8, &d);
if (mxl_fail(ret))
goto fail;
if ((d & 0x0c) == 0x0c)
*rf_locked = 1;
if ((d & 0x03) == 0x03)
*ref_locked = 1;
fail:
return ret;
}
/* ------------------------------------------------------------------------- */
static int mxl5007t_get_status(struct dvb_frontend *fe, u32 *status)
{
struct mxl5007t_state *state = fe->tuner_priv;
int rf_locked, ref_locked, ret;
*status = 0;
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
ret = mxl5007t_synth_lock_status(state, &rf_locked, &ref_locked);
if (mxl_fail(ret))
goto fail;
mxl_debug("%s%s", rf_locked ? "rf locked " : "",
ref_locked ? "ref locked" : "");
if ((rf_locked) || (ref_locked))
*status |= TUNER_STATUS_LOCKED;
fail:
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
return ret;
}
/* ------------------------------------------------------------------------- */
static int mxl5007t_set_params(struct dvb_frontend *fe,
struct dvb_frontend_parameters *params)
{
struct mxl5007t_state *state = fe->tuner_priv;
enum mxl5007t_bw_mhz bw;
enum mxl5007t_mode mode;
int ret;
u32 freq = params->frequency;
if (fe->ops.info.type == FE_ATSC) {
switch (params->u.vsb.modulation) {
case VSB_8:
case VSB_16:
mode = MxL_MODE_ATSC;
break;
case QAM_64:
case QAM_256:
mode = MxL_MODE_CABLE;
break;
default:
mxl_err("modulation not set!");
return -EINVAL;
}
bw = MxL_BW_6MHz;
} else if (fe->ops.info.type == FE_OFDM) {
switch (params->u.ofdm.bandwidth) {
case BANDWIDTH_6_MHZ:
bw = MxL_BW_6MHz;
break;
case BANDWIDTH_7_MHZ:
bw = MxL_BW_7MHz;
break;
case BANDWIDTH_8_MHZ:
bw = MxL_BW_8MHz;
break;
default:
mxl_err("bandwidth not set!");
return -EINVAL;
}
mode = MxL_MODE_DVBT;
} else {
mxl_err("modulation type not supported!");
return -EINVAL;
}
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
mutex_lock(&state->lock);
ret = mxl5007t_tuner_init(state, mode);
if (mxl_fail(ret))
goto fail;
ret = mxl5007t_tuner_rf_tune(state, freq, bw);
if (mxl_fail(ret))
goto fail;
state->frequency = freq;
state->bandwidth = (fe->ops.info.type == FE_OFDM) ?
params->u.ofdm.bandwidth : 0;
fail:
mutex_unlock(&state->lock);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
return ret;
}
/* ------------------------------------------------------------------------- */
static int mxl5007t_init(struct dvb_frontend *fe)
{
struct mxl5007t_state *state = fe->tuner_priv;
int ret;
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
/* wake from standby */
ret = mxl5007t_write_reg(state, 0x01, 0x01);
mxl_fail(ret);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
return ret;
}
static int mxl5007t_sleep(struct dvb_frontend *fe)
{
struct mxl5007t_state *state = fe->tuner_priv;
int ret;
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
/* enter standby mode */
ret = mxl5007t_write_reg(state, 0x01, 0x00);
mxl_fail(ret);
ret = mxl5007t_write_reg(state, 0x0f, 0x00);
mxl_fail(ret);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
return ret;
}
/* ------------------------------------------------------------------------- */
static int mxl5007t_get_frequency(struct dvb_frontend *fe, u32 *frequency)
{
struct mxl5007t_state *state = fe->tuner_priv;
*frequency = state->frequency;
return 0;
}
static int mxl5007t_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth)
{
struct mxl5007t_state *state = fe->tuner_priv;
*bandwidth = state->bandwidth;
return 0;
}
static int mxl5007t_release(struct dvb_frontend *fe)
{
struct mxl5007t_state *state = fe->tuner_priv;
mutex_lock(&mxl5007t_list_mutex);
if (state)
hybrid_tuner_release_state(state);
mutex_unlock(&mxl5007t_list_mutex);
fe->tuner_priv = NULL;
return 0;
}
/* ------------------------------------------------------------------------- */
static struct dvb_tuner_ops mxl5007t_tuner_ops = {
.info = {
.name = "MaxLinear MxL5007T",
},
.init = mxl5007t_init,
.sleep = mxl5007t_sleep,
.set_params = mxl5007t_set_params,
.get_status = mxl5007t_get_status,
.get_frequency = mxl5007t_get_frequency,
.get_bandwidth = mxl5007t_get_bandwidth,
.release = mxl5007t_release,
};
static int mxl5007t_get_chip_id(struct mxl5007t_state *state)
{
char *name;
int ret;
u8 id;
ret = mxl5007t_read_reg(state, 0xd9, &id);
if (mxl_fail(ret))
goto fail;
switch (id) {
case MxL_5007_V1_F1:
name = "MxL5007.v1.f1";
break;
case MxL_5007_V1_F2:
name = "MxL5007.v1.f2";
break;
case MxL_5007_V2_100_F1:
name = "MxL5007.v2.100.f1";
break;
case MxL_5007_V2_100_F2:
name = "MxL5007.v2.100.f2";
break;
case MxL_5007_V2_200_F1:
name = "MxL5007.v2.200.f1";
break;
case MxL_5007_V2_200_F2:
name = "MxL5007.v2.200.f2";
break;
case MxL_5007_V4:
name = "MxL5007T.v4";
break;
default:
name = "MxL5007T";
printk(KERN_WARNING "%s: unknown rev (%02x)\n", __func__, id);
id = MxL_UNKNOWN_ID;
}
state->chip_id = id;
mxl_info("%s detected @ %d-%04x", name,
i2c_adapter_id(state->i2c_props.adap),
state->i2c_props.addr);
return 0;
fail:
mxl_warn("unable to identify device @ %d-%04x",
i2c_adapter_id(state->i2c_props.adap),
state->i2c_props.addr);
state->chip_id = MxL_UNKNOWN_ID;
return ret;
}
struct dvb_frontend *mxl5007t_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c, u8 addr,
struct mxl5007t_config *cfg)
{
struct mxl5007t_state *state = NULL;
int instance, ret;
mutex_lock(&mxl5007t_list_mutex);
instance = hybrid_tuner_request_state(struct mxl5007t_state, state,
hybrid_tuner_instance_list,
i2c, addr, "mxl5007t");
switch (instance) {
case 0:
goto fail;
case 1:
/* new tuner instance */
state->config = cfg;
mutex_init(&state->lock);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
ret = mxl5007t_get_chip_id(state);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
/* check return value of mxl5007t_get_chip_id */
if (mxl_fail(ret))
goto fail;
break;
default:
/* existing tuner instance */
break;
}
fe->tuner_priv = state;
mutex_unlock(&mxl5007t_list_mutex);
memcpy(&fe->ops.tuner_ops, &mxl5007t_tuner_ops,
sizeof(struct dvb_tuner_ops));
return fe;
fail:
mutex_unlock(&mxl5007t_list_mutex);
mxl5007t_release(fe);
return NULL;
}
EXPORT_SYMBOL_GPL(mxl5007t_attach);
MODULE_DESCRIPTION("MaxLinear MxL5007T Silicon IC tuner driver");
MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
MODULE_LICENSE("GPL");
MODULE_VERSION("0.2");
/*
* Overrides for Emacs so that we follow Linus's tabbing style.
* ---------------------------------------------------------------------------
* Local variables:
* c-basic-offset: 8
* End:
*/
| gpl-2.0 |
sunny256/linux | drivers/edac/i3200_edac.c | 129 | 13091 | /*
* Intel 3200/3210 Memory Controller kernel module
* Copyright (C) 2008-2009 Akamai Technologies, Inc.
* Portions by Hitoshi Mitake <h.mitake@gmail.com>.
*
* This file may be distributed under the terms of the
* GNU General Public License.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/edac.h>
#include <linux/io.h>
#include "edac_module.h"
#include <linux/io-64-nonatomic-lo-hi.h>
#define EDAC_MOD_STR "i3200_edac"
#define PCI_DEVICE_ID_INTEL_3200_HB 0x29f0
#define I3200_DIMMS 4
#define I3200_RANKS 8
#define I3200_RANKS_PER_CHANNEL 4
#define I3200_CHANNELS 2
/* Intel 3200 register addresses - device 0 function 0 - DRAM Controller */
#define I3200_MCHBAR_LOW 0x48 /* MCH Memory Mapped Register BAR */
#define I3200_MCHBAR_HIGH 0x4c
#define I3200_MCHBAR_MASK 0xfffffc000ULL /* bits 35:14 */
#define I3200_MMR_WINDOW_SIZE 16384
#define I3200_TOM 0xa0 /* Top of Memory (16b)
*
* 15:10 reserved
* 9:0 total populated physical memory
*/
#define I3200_TOM_MASK 0x3ff /* bits 9:0 */
#define I3200_TOM_SHIFT 26 /* 64MiB grain */
#define I3200_ERRSTS 0xc8 /* Error Status Register (16b)
*
* 15 reserved
* 14 Isochronous TBWRR Run Behind FIFO Full
* (ITCV)
* 13 Isochronous TBWRR Run Behind FIFO Put
* (ITSTV)
* 12 reserved
* 11 MCH Thermal Sensor Event
* for SMI/SCI/SERR (GTSE)
* 10 reserved
* 9 LOCK to non-DRAM Memory Flag (LCKF)
* 8 reserved
* 7 DRAM Throttle Flag (DTF)
* 6:2 reserved
* 1 Multi-bit DRAM ECC Error Flag (DMERR)
* 0 Single-bit DRAM ECC Error Flag (DSERR)
*/
#define I3200_ERRSTS_UE 0x0002
#define I3200_ERRSTS_CE 0x0001
#define I3200_ERRSTS_BITS (I3200_ERRSTS_UE | I3200_ERRSTS_CE)
/* Intel MMIO register space - device 0 function 0 - MMR space */
#define I3200_C0DRB 0x200 /* Channel 0 DRAM Rank Boundary (16b x 4)
*
* 15:10 reserved
* 9:0 Channel 0 DRAM Rank Boundary Address
*/
#define I3200_C1DRB 0x600 /* Channel 1 DRAM Rank Boundary (16b x 4) */
#define I3200_DRB_MASK 0x3ff /* bits 9:0 */
#define I3200_DRB_SHIFT 26 /* 64MiB grain */
#define I3200_C0ECCERRLOG 0x280 /* Channel 0 ECC Error Log (64b)
*
* 63:48 Error Column Address (ERRCOL)
* 47:32 Error Row Address (ERRROW)
* 31:29 Error Bank Address (ERRBANK)
* 28:27 Error Rank Address (ERRRANK)
* 26:24 reserved
* 23:16 Error Syndrome (ERRSYND)
* 15: 2 reserved
* 1 Multiple Bit Error Status (MERRSTS)
* 0 Correctable Error Status (CERRSTS)
*/
#define I3200_C1ECCERRLOG 0x680 /* Chan 1 ECC Error Log (64b) */
#define I3200_ECCERRLOG_CE 0x1
#define I3200_ECCERRLOG_UE 0x2
#define I3200_ECCERRLOG_RANK_BITS 0x18000000
#define I3200_ECCERRLOG_RANK_SHIFT 27
#define I3200_ECCERRLOG_SYNDROME_BITS 0xff0000
#define I3200_ECCERRLOG_SYNDROME_SHIFT 16
#define I3200_CAPID0 0xe0 /* P.95 of spec for details */
struct i3200_priv {
void __iomem *window;
};
static int nr_channels;
static int how_many_channels(struct pci_dev *pdev)
{
int n_channels;
unsigned char capid0_8b; /* 8th byte of CAPID0 */
pci_read_config_byte(pdev, I3200_CAPID0 + 8, &capid0_8b);
if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */
edac_dbg(0, "In single channel mode\n");
n_channels = 1;
} else {
edac_dbg(0, "In dual channel mode\n");
n_channels = 2;
}
if (capid0_8b & 0x10) /* check if both channels are filled */
edac_dbg(0, "2 DIMMS per channel disabled\n");
else
edac_dbg(0, "2 DIMMS per channel enabled\n");
return n_channels;
}
static unsigned long eccerrlog_syndrome(u64 log)
{
return (log & I3200_ECCERRLOG_SYNDROME_BITS) >>
I3200_ECCERRLOG_SYNDROME_SHIFT;
}
static int eccerrlog_row(int channel, u64 log)
{
u64 rank = ((log & I3200_ECCERRLOG_RANK_BITS) >>
I3200_ECCERRLOG_RANK_SHIFT);
return rank | (channel * I3200_RANKS_PER_CHANNEL);
}
enum i3200_chips {
I3200 = 0,
};
struct i3200_dev_info {
const char *ctl_name;
};
struct i3200_error_info {
u16 errsts;
u16 errsts2;
u64 eccerrlog[I3200_CHANNELS];
};
static const struct i3200_dev_info i3200_devs[] = {
[I3200] = {
.ctl_name = "i3200"
},
};
static struct pci_dev *mci_pdev;
static int i3200_registered = 1;
static void i3200_clear_error_info(struct mem_ctl_info *mci)
{
struct pci_dev *pdev;
pdev = to_pci_dev(mci->pdev);
/*
* Clear any error bits.
* (Yes, we really clear bits by writing 1 to them.)
*/
pci_write_bits16(pdev, I3200_ERRSTS, I3200_ERRSTS_BITS,
I3200_ERRSTS_BITS);
}
static void i3200_get_and_clear_error_info(struct mem_ctl_info *mci,
struct i3200_error_info *info)
{
struct pci_dev *pdev;
struct i3200_priv *priv = mci->pvt_info;
void __iomem *window = priv->window;
pdev = to_pci_dev(mci->pdev);
/*
* This is a mess because there is no atomic way to read all the
* registers at once and the registers can transition from CE being
* overwritten by UE.
*/
pci_read_config_word(pdev, I3200_ERRSTS, &info->errsts);
if (!(info->errsts & I3200_ERRSTS_BITS))
return;
info->eccerrlog[0] = readq(window + I3200_C0ECCERRLOG);
if (nr_channels == 2)
info->eccerrlog[1] = readq(window + I3200_C1ECCERRLOG);
pci_read_config_word(pdev, I3200_ERRSTS, &info->errsts2);
/*
* If the error is the same for both reads then the first set
* of reads is valid. If there is a change then there is a CE
* with no info and the second set of reads is valid and
* should be UE info.
*/
if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) {
info->eccerrlog[0] = readq(window + I3200_C0ECCERRLOG);
if (nr_channels == 2)
info->eccerrlog[1] = readq(window + I3200_C1ECCERRLOG);
}
i3200_clear_error_info(mci);
}
static void i3200_process_error_info(struct mem_ctl_info *mci,
struct i3200_error_info *info)
{
int channel;
u64 log;
if (!(info->errsts & I3200_ERRSTS_BITS))
return;
if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) {
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
-1, -1, -1, "UE overwrote CE", "");
info->errsts = info->errsts2;
}
for (channel = 0; channel < nr_channels; channel++) {
log = info->eccerrlog[channel];
if (log & I3200_ECCERRLOG_UE) {
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
0, 0, 0,
eccerrlog_row(channel, log),
-1, -1,
"i3000 UE", "");
} else if (log & I3200_ECCERRLOG_CE) {
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
0, 0, eccerrlog_syndrome(log),
eccerrlog_row(channel, log),
-1, -1,
"i3000 CE", "");
}
}
}
static void i3200_check(struct mem_ctl_info *mci)
{
struct i3200_error_info info;
edac_dbg(1, "MC%d\n", mci->mc_idx);
i3200_get_and_clear_error_info(mci, &info);
i3200_process_error_info(mci, &info);
}
static void __iomem *i3200_map_mchbar(struct pci_dev *pdev)
{
union {
u64 mchbar;
struct {
u32 mchbar_low;
u32 mchbar_high;
};
} u;
void __iomem *window;
pci_read_config_dword(pdev, I3200_MCHBAR_LOW, &u.mchbar_low);
pci_read_config_dword(pdev, I3200_MCHBAR_HIGH, &u.mchbar_high);
u.mchbar &= I3200_MCHBAR_MASK;
if (u.mchbar != (resource_size_t)u.mchbar) {
printk(KERN_ERR
"i3200: mmio space beyond accessible range (0x%llx)\n",
(unsigned long long)u.mchbar);
return NULL;
}
window = ioremap_nocache(u.mchbar, I3200_MMR_WINDOW_SIZE);
if (!window)
printk(KERN_ERR "i3200: cannot map mmio space at 0x%llx\n",
(unsigned long long)u.mchbar);
return window;
}
static void i3200_get_drbs(void __iomem *window,
u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL])
{
int i;
for (i = 0; i < I3200_RANKS_PER_CHANNEL; i++) {
drbs[0][i] = readw(window + I3200_C0DRB + 2*i) & I3200_DRB_MASK;
drbs[1][i] = readw(window + I3200_C1DRB + 2*i) & I3200_DRB_MASK;
edac_dbg(0, "drb[0][%d] = %d, drb[1][%d] = %d\n", i, drbs[0][i], i, drbs[1][i]);
}
}
static bool i3200_is_stacked(struct pci_dev *pdev,
u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL])
{
u16 tom;
pci_read_config_word(pdev, I3200_TOM, &tom);
tom &= I3200_TOM_MASK;
return drbs[I3200_CHANNELS - 1][I3200_RANKS_PER_CHANNEL - 1] == tom;
}
static unsigned long drb_to_nr_pages(
u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL], bool stacked,
int channel, int rank)
{
int n;
n = drbs[channel][rank];
if (!n)
return 0;
if (rank > 0)
n -= drbs[channel][rank - 1];
if (stacked && (channel == 1) &&
drbs[channel][rank] == drbs[channel][I3200_RANKS_PER_CHANNEL - 1])
n -= drbs[0][I3200_RANKS_PER_CHANNEL - 1];
n <<= (I3200_DRB_SHIFT - PAGE_SHIFT);
return n;
}
static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
{
int rc;
int i, j;
struct mem_ctl_info *mci = NULL;
struct edac_mc_layer layers[2];
u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL];
bool stacked;
void __iomem *window;
struct i3200_priv *priv;
edac_dbg(0, "MC:\n");
window = i3200_map_mchbar(pdev);
if (!window)
return -ENODEV;
i3200_get_drbs(window, drbs);
nr_channels = how_many_channels(pdev);
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
layers[0].size = I3200_DIMMS;
layers[0].is_virt_csrow = true;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
layers[1].size = nr_channels;
layers[1].is_virt_csrow = false;
mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
sizeof(struct i3200_priv));
if (!mci)
return -ENOMEM;
edac_dbg(3, "MC: init mci\n");
mci->pdev = &pdev->dev;
mci->mtype_cap = MEM_FLAG_DDR2;
mci->edac_ctl_cap = EDAC_FLAG_SECDED;
mci->edac_cap = EDAC_FLAG_SECDED;
mci->mod_name = EDAC_MOD_STR;
mci->ctl_name = i3200_devs[dev_idx].ctl_name;
mci->dev_name = pci_name(pdev);
mci->edac_check = i3200_check;
mci->ctl_page_to_phys = NULL;
priv = mci->pvt_info;
priv->window = window;
stacked = i3200_is_stacked(pdev, drbs);
/*
* The dram rank boundary (DRB) reg values are boundary addresses
* for each DRAM rank with a granularity of 64MB. DRB regs are
* cumulative; the last one will contain the total memory
* contained in all ranks.
*/
for (i = 0; i < I3200_DIMMS; i++) {
unsigned long nr_pages;
for (j = 0; j < nr_channels; j++) {
struct dimm_info *dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
mci->n_layers, i, j, 0);
nr_pages = drb_to_nr_pages(drbs, stacked, j, i);
if (nr_pages == 0)
continue;
edac_dbg(0, "csrow %d, channel %d%s, size = %ld Mb\n", i, j,
stacked ? " (stacked)" : "", PAGES_TO_MiB(nr_pages));
dimm->nr_pages = nr_pages;
dimm->grain = nr_pages << PAGE_SHIFT;
dimm->mtype = MEM_DDR2;
dimm->dtype = DEV_UNKNOWN;
dimm->edac_mode = EDAC_UNKNOWN;
}
}
i3200_clear_error_info(mci);
rc = -ENODEV;
if (edac_mc_add_mc(mci)) {
edac_dbg(3, "MC: failed edac_mc_add_mc()\n");
goto fail;
}
/* get this far and it's successful */
edac_dbg(3, "MC: success\n");
return 0;
fail:
iounmap(window);
if (mci)
edac_mc_free(mci);
return rc;
}
static int i3200_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int rc;
edac_dbg(0, "MC:\n");
if (pci_enable_device(pdev) < 0)
return -EIO;
rc = i3200_probe1(pdev, ent->driver_data);
if (!mci_pdev)
mci_pdev = pci_dev_get(pdev);
return rc;
}
static void i3200_remove_one(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
struct i3200_priv *priv;
edac_dbg(0, "\n");
mci = edac_mc_del_mc(&pdev->dev);
if (!mci)
return;
priv = mci->pvt_info;
iounmap(priv->window);
edac_mc_free(mci);
pci_disable_device(pdev);
}
static const struct pci_device_id i3200_pci_tbl[] = {
{
PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
I3200},
{
0,
} /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, i3200_pci_tbl);
static struct pci_driver i3200_driver = {
.name = EDAC_MOD_STR,
.probe = i3200_init_one,
.remove = i3200_remove_one,
.id_table = i3200_pci_tbl,
};
static int __init i3200_init(void)
{
int pci_rc;
edac_dbg(3, "MC:\n");
/* Ensure that the OPSTATE is set correctly for POLL or NMI */
opstate_init();
pci_rc = pci_register_driver(&i3200_driver);
if (pci_rc < 0)
goto fail0;
if (!mci_pdev) {
i3200_registered = 0;
mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_3200_HB, NULL);
if (!mci_pdev) {
edac_dbg(0, "i3200 pci_get_device fail\n");
pci_rc = -ENODEV;
goto fail1;
}
pci_rc = i3200_init_one(mci_pdev, i3200_pci_tbl);
if (pci_rc < 0) {
edac_dbg(0, "i3200 init fail\n");
pci_rc = -ENODEV;
goto fail1;
}
}
return 0;
fail1:
pci_unregister_driver(&i3200_driver);
fail0:
pci_dev_put(mci_pdev);
return pci_rc;
}
static void __exit i3200_exit(void)
{
edac_dbg(3, "MC:\n");
pci_unregister_driver(&i3200_driver);
if (!i3200_registered) {
i3200_remove_one(mci_pdev);
pci_dev_put(mci_pdev);
}
}
module_init(i3200_init);
module_exit(i3200_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Akamai Technologies, Inc.");
MODULE_DESCRIPTION("MC support for Intel 3200 memory hub controllers");
module_param(edac_op_state, int, 0444);
MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
| gpl-2.0 |
KangBangKreations/KangBangKore-Kernel | arch/sparc/kernel/signal32.c | 129 | 26242 | /* arch/sparc64/kernel/signal32.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
* Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/mm.h>
#include <linux/tty.h>
#include <linux/binfmts.h>
#include <linux/compat.h>
#include <linux/bitops.h>
#include <linux/tracehook.h>
#include <asm/uaccess.h>
#include <asm/ptrace.h>
#include <asm/pgtable.h>
#include <asm/psrcompat.h>
#include <asm/fpumacro.h>
#include <asm/visasm.h>
#include <asm/compat_signal.h>
#include <asm/switch_to.h>
#include "sigutil.h"
/* This magic should be in g_upper[0] for all upper parts
* to be valid.
*/
#define SIGINFO_EXTRA_V8PLUS_MAGIC 0x130e269
typedef struct {
unsigned int g_upper[8];
unsigned int o_upper[8];
unsigned int asi;
} siginfo_extra_v8plus_t;
struct signal_frame32 {
struct sparc_stackf32 ss;
__siginfo32_t info;
/* __siginfo_fpu_t * */ u32 fpu_save;
unsigned int insns[2];
unsigned int extramask[_COMPAT_NSIG_WORDS - 1];
unsigned int extra_size; /* Should be sizeof(siginfo_extra_v8plus_t) */
/* Only valid if (info.si_regs.psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS */
siginfo_extra_v8plus_t v8plus;
/* __siginfo_rwin_t * */u32 rwin_save;
} __attribute__((aligned(8)));
typedef struct compat_siginfo{
int si_signo;
int si_errno;
int si_code;
union {
int _pad[SI_PAD_SIZE32];
/* kill() */
struct {
compat_pid_t _pid; /* sender's pid */
unsigned int _uid; /* sender's uid */
} _kill;
/* POSIX.1b timers */
struct {
compat_timer_t _tid; /* timer id */
int _overrun; /* overrun count */
compat_sigval_t _sigval; /* same as below */
int _sys_private; /* not to be passed to user */
} _timer;
/* POSIX.1b signals */
struct {
compat_pid_t _pid; /* sender's pid */
unsigned int _uid; /* sender's uid */
compat_sigval_t _sigval;
} _rt;
/* SIGCHLD */
struct {
compat_pid_t _pid; /* which child */
unsigned int _uid; /* sender's uid */
int _status; /* exit code */
compat_clock_t _utime;
compat_clock_t _stime;
} _sigchld;
/* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGEMT */
struct {
u32 _addr; /* faulting insn/memory ref. */
int _trapno;
} _sigfault;
/* SIGPOLL */
struct {
int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
int _fd;
} _sigpoll;
} _sifields;
}compat_siginfo_t;
struct rt_signal_frame32 {
struct sparc_stackf32 ss;
compat_siginfo_t info;
struct pt_regs32 regs;
compat_sigset_t mask;
/* __siginfo_fpu_t * */ u32 fpu_save;
unsigned int insns[2];
stack_t32 stack;
unsigned int extra_size; /* Should be sizeof(siginfo_extra_v8plus_t) */
/* Only valid if (regs.psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS */
siginfo_extra_v8plus_t v8plus;
/* __siginfo_rwin_t * */u32 rwin_save;
} __attribute__((aligned(8)));
int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
{
int err;
if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
return -EFAULT;
/* If you change siginfo_t structure, please be sure
this code is fixed accordingly.
It should never copy any pad contained in the structure
to avoid security leaks, but must copy the generic
3 ints plus the relevant union member.
This routine must convert siginfo from 64bit to 32bit as well
at the same time. */
err = __put_user(from->si_signo, &to->si_signo);
err |= __put_user(from->si_errno, &to->si_errno);
err |= __put_user((short)from->si_code, &to->si_code);
if (from->si_code < 0)
err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
else {
switch (from->si_code >> 16) {
case __SI_TIMER >> 16:
err |= __put_user(from->si_tid, &to->si_tid);
err |= __put_user(from->si_overrun, &to->si_overrun);
err |= __put_user(from->si_int, &to->si_int);
break;
case __SI_CHLD >> 16:
err |= __put_user(from->si_utime, &to->si_utime);
err |= __put_user(from->si_stime, &to->si_stime);
err |= __put_user(from->si_status, &to->si_status);
default:
err |= __put_user(from->si_pid, &to->si_pid);
err |= __put_user(from->si_uid, &to->si_uid);
break;
case __SI_FAULT >> 16:
err |= __put_user(from->si_trapno, &to->si_trapno);
err |= __put_user((unsigned long)from->si_addr, &to->si_addr);
break;
case __SI_POLL >> 16:
err |= __put_user(from->si_band, &to->si_band);
err |= __put_user(from->si_fd, &to->si_fd);
break;
case __SI_RT >> 16: /* This is not generated by the kernel as of now. */
case __SI_MESGQ >> 16:
err |= __put_user(from->si_pid, &to->si_pid);
err |= __put_user(from->si_uid, &to->si_uid);
err |= __put_user(from->si_int, &to->si_int);
break;
}
}
return err;
}
/* CAUTION: This is just a very minimalist implementation for the
* sake of compat_sys_rt_sigqueueinfo()
*/
int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
{
if (!access_ok(VERIFY_WRITE, from, sizeof(compat_siginfo_t)))
return -EFAULT;
if (copy_from_user(to, from, 3*sizeof(int)) ||
copy_from_user(to->_sifields._pad, from->_sifields._pad,
SI_PAD_SIZE))
return -EFAULT;
return 0;
}
void do_sigreturn32(struct pt_regs *regs)
{
struct signal_frame32 __user *sf;
compat_uptr_t fpu_save;
compat_uptr_t rwin_save;
unsigned int psr;
unsigned pc, npc;
sigset_t set;
unsigned seta[_COMPAT_NSIG_WORDS];
int err, i;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
synchronize_user_stack();
regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
sf = (struct signal_frame32 __user *) regs->u_regs[UREG_FP];
/* 1. Make sure we are not getting garbage from the user */
if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
(((unsigned long) sf) & 3))
goto segv;
if (get_user(pc, &sf->info.si_regs.pc) ||
__get_user(npc, &sf->info.si_regs.npc))
goto segv;
if ((pc | npc) & 3)
goto segv;
if (test_thread_flag(TIF_32BIT)) {
pc &= 0xffffffff;
npc &= 0xffffffff;
}
regs->tpc = pc;
regs->tnpc = npc;
/* 2. Restore the state */
err = __get_user(regs->y, &sf->info.si_regs.y);
err |= __get_user(psr, &sf->info.si_regs.psr);
for (i = UREG_G1; i <= UREG_I7; i++)
err |= __get_user(regs->u_regs[i], &sf->info.si_regs.u_regs[i]);
if ((psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS) {
err |= __get_user(i, &sf->v8plus.g_upper[0]);
if (i == SIGINFO_EXTRA_V8PLUS_MAGIC) {
unsigned long asi;
for (i = UREG_G1; i <= UREG_I7; i++)
err |= __get_user(((u32 *)regs->u_regs)[2*i], &sf->v8plus.g_upper[i]);
err |= __get_user(asi, &sf->v8plus.asi);
regs->tstate &= ~TSTATE_ASI;
regs->tstate |= ((asi & 0xffUL) << 24UL);
}
}
/* User can only change condition codes in %tstate. */
regs->tstate &= ~(TSTATE_ICC|TSTATE_XCC);
regs->tstate |= psr_to_tstate_icc(psr);
/* Prevent syscall restart. */
pt_regs_clear_syscall(regs);
err |= __get_user(fpu_save, &sf->fpu_save);
if (!err && fpu_save)
err |= restore_fpu_state(regs, compat_ptr(fpu_save));
err |= __get_user(rwin_save, &sf->rwin_save);
if (!err && rwin_save) {
if (restore_rwin_state(compat_ptr(rwin_save)))
goto segv;
}
err |= __get_user(seta[0], &sf->info.si_mask);
err |= copy_from_user(seta+1, &sf->extramask,
(_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int));
if (err)
goto segv;
switch (_NSIG_WORDS) {
case 4: set.sig[3] = seta[6] + (((long)seta[7]) << 32);
case 3: set.sig[2] = seta[4] + (((long)seta[5]) << 32);
case 2: set.sig[1] = seta[2] + (((long)seta[3]) << 32);
case 1: set.sig[0] = seta[0] + (((long)seta[1]) << 32);
}
set_current_blocked(&set);
return;
segv:
force_sig(SIGSEGV, current);
}
asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
{
struct rt_signal_frame32 __user *sf;
unsigned int psr, pc, npc, u_ss_sp;
compat_uptr_t fpu_save;
compat_uptr_t rwin_save;
mm_segment_t old_fs;
sigset_t set;
compat_sigset_t seta;
stack_t st;
int err, i;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
synchronize_user_stack();
regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
sf = (struct rt_signal_frame32 __user *) regs->u_regs[UREG_FP];
/* 1. Make sure we are not getting garbage from the user */
if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
(((unsigned long) sf) & 3))
goto segv;
if (get_user(pc, &sf->regs.pc) ||
__get_user(npc, &sf->regs.npc))
goto segv;
if ((pc | npc) & 3)
goto segv;
if (test_thread_flag(TIF_32BIT)) {
pc &= 0xffffffff;
npc &= 0xffffffff;
}
regs->tpc = pc;
regs->tnpc = npc;
/* 2. Restore the state */
err = __get_user(regs->y, &sf->regs.y);
err |= __get_user(psr, &sf->regs.psr);
for (i = UREG_G1; i <= UREG_I7; i++)
err |= __get_user(regs->u_regs[i], &sf->regs.u_regs[i]);
if ((psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS) {
err |= __get_user(i, &sf->v8plus.g_upper[0]);
if (i == SIGINFO_EXTRA_V8PLUS_MAGIC) {
unsigned long asi;
for (i = UREG_G1; i <= UREG_I7; i++)
err |= __get_user(((u32 *)regs->u_regs)[2*i], &sf->v8plus.g_upper[i]);
err |= __get_user(asi, &sf->v8plus.asi);
regs->tstate &= ~TSTATE_ASI;
regs->tstate |= ((asi & 0xffUL) << 24UL);
}
}
/* User can only change condition codes in %tstate. */
regs->tstate &= ~(TSTATE_ICC|TSTATE_XCC);
regs->tstate |= psr_to_tstate_icc(psr);
/* Prevent syscall restart. */
pt_regs_clear_syscall(regs);
err |= __get_user(fpu_save, &sf->fpu_save);
if (!err && fpu_save)
err |= restore_fpu_state(regs, compat_ptr(fpu_save));
err |= copy_from_user(&seta, &sf->mask, sizeof(compat_sigset_t));
err |= __get_user(u_ss_sp, &sf->stack.ss_sp);
st.ss_sp = compat_ptr(u_ss_sp);
err |= __get_user(st.ss_flags, &sf->stack.ss_flags);
err |= __get_user(st.ss_size, &sf->stack.ss_size);
if (err)
goto segv;
/* It is more difficult to avoid calling this function than to
call it and ignore errors. */
old_fs = get_fs();
set_fs(KERNEL_DS);
do_sigaltstack((stack_t __user *) &st, NULL, (unsigned long)sf);
set_fs(old_fs);
err |= __get_user(rwin_save, &sf->rwin_save);
if (!err && rwin_save) {
if (restore_rwin_state(compat_ptr(rwin_save)))
goto segv;
}
switch (_NSIG_WORDS) {
case 4: set.sig[3] = seta.sig[6] + (((long)seta.sig[7]) << 32);
case 3: set.sig[2] = seta.sig[4] + (((long)seta.sig[5]) << 32);
case 2: set.sig[1] = seta.sig[2] + (((long)seta.sig[3]) << 32);
case 1: set.sig[0] = seta.sig[0] + (((long)seta.sig[1]) << 32);
}
set_current_blocked(&set);
return;
segv:
force_sig(SIGSEGV, current);
}
/* Checks if the fp is valid */
static int invalid_frame_pointer(void __user *fp, int fplen)
{
if ((((unsigned long) fp) & 7) || ((unsigned long)fp) > 0x100000000ULL - fplen)
return 1;
return 0;
}
static void __user *get_sigframe(struct sigaction *sa, struct pt_regs *regs, unsigned long framesize)
{
unsigned long sp;
regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
sp = regs->u_regs[UREG_FP];
/*
* If we are on the alternate signal stack and would overflow it, don't.
* Return an always-bogus address instead so we will die with SIGSEGV.
*/
if (on_sig_stack(sp) && !likely(on_sig_stack(sp - framesize)))
return (void __user *) -1L;
/* This is the X/Open sanctioned signal stack switching. */
if (sa->sa_flags & SA_ONSTACK) {
if (sas_ss_flags(sp) == 0)
sp = current->sas_ss_sp + current->sas_ss_size;
}
sp -= framesize;
/* Always align the stack frame. This handles two cases. First,
* sigaltstack need not be mindful of platform specific stack
* alignment. Second, if we took this signal because the stack
* is not aligned properly, we'd like to take the signal cleanly
* and report that.
*/
sp &= ~15UL;
return (void __user *) sp;
}
/* The I-cache flush instruction only works in the primary ASI, which
* right now is the nucleus, aka. kernel space.
*
* Therefore we have to kick the instructions out using the kernel
* side linear mapping of the physical address backing the user
* instructions.
*/
static void flush_signal_insns(unsigned long address)
{
unsigned long pstate, paddr;
pte_t *ptep, pte;
pgd_t *pgdp;
pud_t *pudp;
pmd_t *pmdp;
/* Commit all stores of the instructions we are about to flush. */
wmb();
/* Disable cross-call reception. In this way even a very wide
* munmap() on another cpu can't tear down the page table
* hierarchy from underneath us, since that can't complete
* until the IPI tlb flush returns.
*/
__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
__asm__ __volatile__("wrpr %0, %1, %%pstate"
: : "r" (pstate), "i" (PSTATE_IE));
pgdp = pgd_offset(current->mm, address);
if (pgd_none(*pgdp))
goto out_irqs_on;
pudp = pud_offset(pgdp, address);
if (pud_none(*pudp))
goto out_irqs_on;
pmdp = pmd_offset(pudp, address);
if (pmd_none(*pmdp))
goto out_irqs_on;
ptep = pte_offset_map(pmdp, address);
pte = *ptep;
if (!pte_present(pte))
goto out_unmap;
paddr = (unsigned long) page_address(pte_page(pte));
__asm__ __volatile__("flush %0 + %1"
: /* no outputs */
: "r" (paddr),
"r" (address & (PAGE_SIZE - 1))
: "memory");
out_unmap:
pte_unmap(ptep);
out_irqs_on:
__asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
}
static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
int signo, sigset_t *oldset)
{
struct signal_frame32 __user *sf;
int i, err, wsaved;
void __user *tail;
int sigframe_size;
u32 psr;
unsigned int seta[_COMPAT_NSIG_WORDS];
/* 1. Make sure everything is clean */
synchronize_user_stack();
save_and_clear_fpu();
wsaved = get_thread_wsaved();
sigframe_size = sizeof(*sf);
if (current_thread_info()->fpsaved[0] & FPRS_FEF)
sigframe_size += sizeof(__siginfo_fpu_t);
if (wsaved)
sigframe_size += sizeof(__siginfo_rwin_t);
sf = (struct signal_frame32 __user *)
get_sigframe(&ka->sa, regs, sigframe_size);
if (invalid_frame_pointer(sf, sigframe_size))
goto sigill;
tail = (sf + 1);
/* 2. Save the current process state */
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
err = put_user(regs->tpc, &sf->info.si_regs.pc);
err |= __put_user(regs->tnpc, &sf->info.si_regs.npc);
err |= __put_user(regs->y, &sf->info.si_regs.y);
psr = tstate_to_psr(regs->tstate);
if (current_thread_info()->fpsaved[0] & FPRS_FEF)
psr |= PSR_EF;
err |= __put_user(psr, &sf->info.si_regs.psr);
for (i = 0; i < 16; i++)
err |= __put_user(regs->u_regs[i], &sf->info.si_regs.u_regs[i]);
err |= __put_user(sizeof(siginfo_extra_v8plus_t), &sf->extra_size);
err |= __put_user(SIGINFO_EXTRA_V8PLUS_MAGIC, &sf->v8plus.g_upper[0]);
for (i = 1; i < 16; i++)
err |= __put_user(((u32 *)regs->u_regs)[2*i],
&sf->v8plus.g_upper[i]);
err |= __put_user((regs->tstate & TSTATE_ASI) >> 24UL,
&sf->v8plus.asi);
if (psr & PSR_EF) {
__siginfo_fpu_t __user *fp = tail;
tail += sizeof(*fp);
err |= save_fpu_state(regs, fp);
err |= __put_user((u64)fp, &sf->fpu_save);
} else {
err |= __put_user(0, &sf->fpu_save);
}
if (wsaved) {
__siginfo_rwin_t __user *rwp = tail;
tail += sizeof(*rwp);
err |= save_rwin_state(wsaved, rwp);
err |= __put_user((u64)rwp, &sf->rwin_save);
set_thread_wsaved(0);
} else {
err |= __put_user(0, &sf->rwin_save);
}
switch (_NSIG_WORDS) {
case 4: seta[7] = (oldset->sig[3] >> 32);
seta[6] = oldset->sig[3];
case 3: seta[5] = (oldset->sig[2] >> 32);
seta[4] = oldset->sig[2];
case 2: seta[3] = (oldset->sig[1] >> 32);
seta[2] = oldset->sig[1];
case 1: seta[1] = (oldset->sig[0] >> 32);
seta[0] = oldset->sig[0];
}
err |= __put_user(seta[0], &sf->info.si_mask);
err |= __copy_to_user(sf->extramask, seta + 1,
(_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int));
if (!wsaved) {
err |= copy_in_user((u32 __user *)sf,
(u32 __user *)(regs->u_regs[UREG_FP]),
sizeof(struct reg_window32));
} else {
struct reg_window *rp;
rp = ¤t_thread_info()->reg_window[wsaved - 1];
for (i = 0; i < 8; i++)
err |= __put_user(rp->locals[i], &sf->ss.locals[i]);
for (i = 0; i < 6; i++)
err |= __put_user(rp->ins[i], &sf->ss.ins[i]);
err |= __put_user(rp->ins[6], &sf->ss.fp);
err |= __put_user(rp->ins[7], &sf->ss.callers_pc);
}
if (err)
goto sigsegv;
/* 3. signal handler back-trampoline and parameters */
regs->u_regs[UREG_FP] = (unsigned long) sf;
regs->u_regs[UREG_I0] = signo;
regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
regs->u_regs[UREG_I2] = (unsigned long) &sf->info;
/* 4. signal handler */
regs->tpc = (unsigned long) ka->sa.sa_handler;
regs->tnpc = (regs->tpc + 4);
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
/* 5. return to kernel instructions */
if (ka->ka_restorer) {
regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
} else {
unsigned long address = ((unsigned long)&(sf->insns[0]));
regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
err = __put_user(0x821020d8, &sf->insns[0]); /*mov __NR_sigreturn, %g1*/
err |= __put_user(0x91d02010, &sf->insns[1]); /*t 0x10*/
if (err)
goto sigsegv;
flush_signal_insns(address);
}
return 0;
sigill:
do_exit(SIGILL);
return -EINVAL;
sigsegv:
force_sigsegv(signo, current);
return -EFAULT;
}
static int setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
unsigned long signr, sigset_t *oldset,
siginfo_t *info)
{
struct rt_signal_frame32 __user *sf;
int i, err, wsaved;
void __user *tail;
int sigframe_size;
u32 psr;
compat_sigset_t seta;
/* 1. Make sure everything is clean */
synchronize_user_stack();
save_and_clear_fpu();
wsaved = get_thread_wsaved();
sigframe_size = sizeof(*sf);
if (current_thread_info()->fpsaved[0] & FPRS_FEF)
sigframe_size += sizeof(__siginfo_fpu_t);
if (wsaved)
sigframe_size += sizeof(__siginfo_rwin_t);
sf = (struct rt_signal_frame32 __user *)
get_sigframe(&ka->sa, regs, sigframe_size);
if (invalid_frame_pointer(sf, sigframe_size))
goto sigill;
tail = (sf + 1);
/* 2. Save the current process state */
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
err = put_user(regs->tpc, &sf->regs.pc);
err |= __put_user(regs->tnpc, &sf->regs.npc);
err |= __put_user(regs->y, &sf->regs.y);
psr = tstate_to_psr(regs->tstate);
if (current_thread_info()->fpsaved[0] & FPRS_FEF)
psr |= PSR_EF;
err |= __put_user(psr, &sf->regs.psr);
for (i = 0; i < 16; i++)
err |= __put_user(regs->u_regs[i], &sf->regs.u_regs[i]);
err |= __put_user(sizeof(siginfo_extra_v8plus_t), &sf->extra_size);
err |= __put_user(SIGINFO_EXTRA_V8PLUS_MAGIC, &sf->v8plus.g_upper[0]);
for (i = 1; i < 16; i++)
err |= __put_user(((u32 *)regs->u_regs)[2*i],
&sf->v8plus.g_upper[i]);
err |= __put_user((regs->tstate & TSTATE_ASI) >> 24UL,
&sf->v8plus.asi);
if (psr & PSR_EF) {
__siginfo_fpu_t __user *fp = tail;
tail += sizeof(*fp);
err |= save_fpu_state(regs, fp);
err |= __put_user((u64)fp, &sf->fpu_save);
} else {
err |= __put_user(0, &sf->fpu_save);
}
if (wsaved) {
__siginfo_rwin_t __user *rwp = tail;
tail += sizeof(*rwp);
err |= save_rwin_state(wsaved, rwp);
err |= __put_user((u64)rwp, &sf->rwin_save);
set_thread_wsaved(0);
} else {
err |= __put_user(0, &sf->rwin_save);
}
/* Update the siginfo structure. */
err |= copy_siginfo_to_user32(&sf->info, info);
/* Setup sigaltstack */
err |= __put_user(current->sas_ss_sp, &sf->stack.ss_sp);
err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &sf->stack.ss_flags);
err |= __put_user(current->sas_ss_size, &sf->stack.ss_size);
switch (_NSIG_WORDS) {
case 4: seta.sig[7] = (oldset->sig[3] >> 32);
seta.sig[6] = oldset->sig[3];
case 3: seta.sig[5] = (oldset->sig[2] >> 32);
seta.sig[4] = oldset->sig[2];
case 2: seta.sig[3] = (oldset->sig[1] >> 32);
seta.sig[2] = oldset->sig[1];
case 1: seta.sig[1] = (oldset->sig[0] >> 32);
seta.sig[0] = oldset->sig[0];
}
err |= __copy_to_user(&sf->mask, &seta, sizeof(compat_sigset_t));
if (!wsaved) {
err |= copy_in_user((u32 __user *)sf,
(u32 __user *)(regs->u_regs[UREG_FP]),
sizeof(struct reg_window32));
} else {
struct reg_window *rp;
rp = ¤t_thread_info()->reg_window[wsaved - 1];
for (i = 0; i < 8; i++)
err |= __put_user(rp->locals[i], &sf->ss.locals[i]);
for (i = 0; i < 6; i++)
err |= __put_user(rp->ins[i], &sf->ss.ins[i]);
err |= __put_user(rp->ins[6], &sf->ss.fp);
err |= __put_user(rp->ins[7], &sf->ss.callers_pc);
}
if (err)
goto sigsegv;
/* 3. signal handler back-trampoline and parameters */
regs->u_regs[UREG_FP] = (unsigned long) sf;
regs->u_regs[UREG_I0] = signr;
regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
regs->u_regs[UREG_I2] = (unsigned long) &sf->regs;
/* 4. signal handler */
regs->tpc = (unsigned long) ka->sa.sa_handler;
regs->tnpc = (regs->tpc + 4);
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
/* 5. return to kernel instructions */
if (ka->ka_restorer)
regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
else {
unsigned long address = ((unsigned long)&(sf->insns[0]));
regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
/* mov __NR_rt_sigreturn, %g1 */
err |= __put_user(0x82102065, &sf->insns[0]);
/* t 0x10 */
err |= __put_user(0x91d02010, &sf->insns[1]);
if (err)
goto sigsegv;
flush_signal_insns(address);
}
return 0;
sigill:
do_exit(SIGILL);
return -EINVAL;
sigsegv:
force_sigsegv(signr, current);
return -EFAULT;
}
static inline void handle_signal32(unsigned long signr, struct k_sigaction *ka,
siginfo_t *info,
sigset_t *oldset, struct pt_regs *regs)
{
int err;
if (ka->sa.sa_flags & SA_SIGINFO)
err = setup_rt_frame32(ka, regs, signr, oldset, info);
else
err = setup_frame32(ka, regs, signr, oldset);
if (err)
return;
signal_delivered(signr, info, ka, regs, 0);
}
static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs,
struct sigaction *sa)
{
switch (regs->u_regs[UREG_I0]) {
case ERESTART_RESTARTBLOCK:
case ERESTARTNOHAND:
no_system_call_restart:
regs->u_regs[UREG_I0] = EINTR;
regs->tstate |= TSTATE_ICARRY;
break;
case ERESTARTSYS:
if (!(sa->sa_flags & SA_RESTART))
goto no_system_call_restart;
/* fallthrough */
case ERESTARTNOINTR:
regs->u_regs[UREG_I0] = orig_i0;
regs->tpc -= 4;
regs->tnpc -= 4;
}
}
/* Note that 'init' is a special process: it doesn't get signals it doesn't
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*/
void do_signal32(sigset_t *oldset, struct pt_regs * regs)
{
struct k_sigaction ka;
unsigned long orig_i0;
int restart_syscall;
siginfo_t info;
int signr;
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
restart_syscall = 0;
orig_i0 = 0;
if (pt_regs_is_syscall(regs) &&
(regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) {
restart_syscall = 1;
orig_i0 = regs->u_regs[UREG_G6];
}
if (signr > 0) {
if (restart_syscall)
syscall_restart32(orig_i0, regs, &ka.sa);
handle_signal32(signr, &ka, &info, oldset, regs);
return;
}
if (restart_syscall &&
(regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
regs->u_regs[UREG_I0] == ERESTARTSYS ||
regs->u_regs[UREG_I0] == ERESTARTNOINTR)) {
/* replay the system call when we are done */
regs->u_regs[UREG_I0] = orig_i0;
regs->tpc -= 4;
regs->tnpc -= 4;
pt_regs_clear_syscall(regs);
}
if (restart_syscall &&
regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->tpc -= 4;
regs->tnpc -= 4;
pt_regs_clear_syscall(regs);
}
/* If there's no signal to deliver, we just put the saved sigmask
* back
*/
restore_saved_sigmask();
}
struct sigstack32 {
u32 the_stack;
int cur_status;
};
asmlinkage int do_sys32_sigstack(u32 u_ssptr, u32 u_ossptr, unsigned long sp)
{
struct sigstack32 __user *ssptr =
(struct sigstack32 __user *)((unsigned long)(u_ssptr));
struct sigstack32 __user *ossptr =
(struct sigstack32 __user *)((unsigned long)(u_ossptr));
int ret = -EFAULT;
/* First see if old state is wanted. */
if (ossptr) {
if (put_user(current->sas_ss_sp + current->sas_ss_size,
&ossptr->the_stack) ||
__put_user(on_sig_stack(sp), &ossptr->cur_status))
goto out;
}
/* Now see if we want to update the new state. */
if (ssptr) {
u32 ss_sp;
if (get_user(ss_sp, &ssptr->the_stack))
goto out;
/* If the current stack was set with sigaltstack, don't
* swap stacks while we are on it.
*/
ret = -EPERM;
if (current->sas_ss_sp && on_sig_stack(sp))
goto out;
/* Since we don't know the extent of the stack, and we don't
* track onstack-ness, but rather calculate it, we must
* presume a size. Ho hum this interface is lossy.
*/
current->sas_ss_sp = (unsigned long)ss_sp - SIGSTKSZ;
current->sas_ss_size = SIGSTKSZ;
}
ret = 0;
out:
return ret;
}
asmlinkage long do_sys32_sigaltstack(u32 ussa, u32 uossa, unsigned long sp)
{
stack_t uss, uoss;
u32 u_ss_sp = 0;
int ret;
mm_segment_t old_fs;
stack_t32 __user *uss32 = compat_ptr(ussa);
stack_t32 __user *uoss32 = compat_ptr(uossa);
if (ussa && (get_user(u_ss_sp, &uss32->ss_sp) ||
__get_user(uss.ss_flags, &uss32->ss_flags) ||
__get_user(uss.ss_size, &uss32->ss_size)))
return -EFAULT;
uss.ss_sp = compat_ptr(u_ss_sp);
old_fs = get_fs();
set_fs(KERNEL_DS);
ret = do_sigaltstack(ussa ? (stack_t __user *) &uss : NULL,
uossa ? (stack_t __user *) &uoss : NULL, sp);
set_fs(old_fs);
if (!ret && uossa && (put_user(ptr_to_compat(uoss.ss_sp), &uoss32->ss_sp) ||
__put_user(uoss.ss_flags, &uoss32->ss_flags) ||
__put_user(uoss.ss_size, &uoss32->ss_size)))
return -EFAULT;
return ret;
}
| gpl-2.0 |
linux-scraping/linux-grsecurity | drivers/s390/char/hmcdrv_ftp.c | 385 | 8150 | /*
* HMC Drive FTP Services
*
* Copyright IBM Corp. 2013
* Author(s): Ralf Hoppe (rhoppe@de.ibm.com)
*/
#define KMSG_COMPONENT "hmcdrv"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/export.h>
#include <linux/ctype.h>
#include <linux/crc16.h>
#include "hmcdrv_ftp.h"
#include "hmcdrv_cache.h"
#include "sclp_ftp.h"
#include "diag_ftp.h"
/**
* struct hmcdrv_ftp_ops - HMC drive FTP operations
* @startup: startup function
* @shutdown: shutdown function
* @cmd: FTP transfer function
*/
struct hmcdrv_ftp_ops {
int (*startup)(void);
void (*shutdown)(void);
ssize_t (*transfer)(const struct hmcdrv_ftp_cmdspec *ftp,
size_t *fsize);
};
static enum hmcdrv_ftp_cmdid hmcdrv_ftp_cmd_getid(const char *cmd, int len);
static int hmcdrv_ftp_parse(char *cmd, struct hmcdrv_ftp_cmdspec *ftp);
static const struct hmcdrv_ftp_ops *hmcdrv_ftp_funcs; /* current operations */
static DEFINE_MUTEX(hmcdrv_ftp_mutex); /* mutex for hmcdrv_ftp_funcs */
static unsigned hmcdrv_ftp_refcnt; /* start/shutdown reference counter */
/**
* hmcdrv_ftp_cmd_getid() - determine FTP command ID from a command string
* @cmd: FTP command string (NOT zero-terminated)
* @len: length of FTP command string in @cmd
*/
static enum hmcdrv_ftp_cmdid hmcdrv_ftp_cmd_getid(const char *cmd, int len)
{
/* HMC FTP command descriptor */
struct hmcdrv_ftp_cmd_desc {
const char *str; /* command string */
enum hmcdrv_ftp_cmdid cmd; /* associated command as enum */
};
/* Description of all HMC drive FTP commands
*
* Notes:
* 1. Array size should be a prime number.
* 2. Do not change the order of commands in table (because the
* index is determined by CRC % ARRAY_SIZE).
* 3. Original command 'nlist' was renamed, else the CRC would
* collide with 'append' (see point 2).
*/
static const struct hmcdrv_ftp_cmd_desc ftpcmds[7] = {
{.str = "get", /* [0] get (CRC = 0x68eb) */
.cmd = HMCDRV_FTP_GET},
{.str = "dir", /* [1] dir (CRC = 0x6a9e) */
.cmd = HMCDRV_FTP_DIR},
{.str = "delete", /* [2] delete (CRC = 0x53ae) */
.cmd = HMCDRV_FTP_DELETE},
{.str = "nls", /* [3] nls (CRC = 0xf87c) */
.cmd = HMCDRV_FTP_NLIST},
{.str = "put", /* [4] put (CRC = 0xac56) */
.cmd = HMCDRV_FTP_PUT},
{.str = "append", /* [5] append (CRC = 0xf56e) */
.cmd = HMCDRV_FTP_APPEND},
{.str = NULL} /* [6] unused */
};
const struct hmcdrv_ftp_cmd_desc *pdesc;
u16 crc = 0xffffU;
if (len == 0)
return HMCDRV_FTP_NOOP; /* error indiactor */
crc = crc16(crc, cmd, len);
pdesc = ftpcmds + (crc % ARRAY_SIZE(ftpcmds));
pr_debug("FTP command '%s' has CRC 0x%04x, at table pos. %lu\n",
cmd, crc, (crc % ARRAY_SIZE(ftpcmds)));
if (!pdesc->str || strncmp(pdesc->str, cmd, len))
return HMCDRV_FTP_NOOP;
pr_debug("FTP command '%s' found, with ID %d\n",
pdesc->str, pdesc->cmd);
return pdesc->cmd;
}
/**
* hmcdrv_ftp_parse() - HMC drive FTP command parser
* @cmd: FTP command string "<cmd> <filename>"
* @ftp: Pointer to FTP command specification buffer (output)
*
* Return: 0 on success, else a (negative) error code
*/
static int hmcdrv_ftp_parse(char *cmd, struct hmcdrv_ftp_cmdspec *ftp)
{
char *start;
int argc = 0;
ftp->id = HMCDRV_FTP_NOOP;
ftp->fname = NULL;
while (*cmd != '\0') {
while (isspace(*cmd))
++cmd;
if (*cmd == '\0')
break;
start = cmd;
switch (argc) {
case 0: /* 1st argument (FTP command) */
while ((*cmd != '\0') && !isspace(*cmd))
++cmd;
ftp->id = hmcdrv_ftp_cmd_getid(start, cmd - start);
break;
case 1: /* 2nd / last argument (rest of line) */
while ((*cmd != '\0') && !iscntrl(*cmd))
++cmd;
ftp->fname = start;
/* fall through */
default:
*cmd = '\0';
break;
} /* switch */
++argc;
} /* while */
if (!ftp->fname || (ftp->id == HMCDRV_FTP_NOOP))
return -EINVAL;
return 0;
}
/**
* hmcdrv_ftp_do() - perform a HMC drive FTP, with data from kernel-space
* @ftp: pointer to FTP command specification
*
* Return: number of bytes read/written or a negative error code
*/
ssize_t hmcdrv_ftp_do(const struct hmcdrv_ftp_cmdspec *ftp)
{
ssize_t len;
mutex_lock(&hmcdrv_ftp_mutex);
if (hmcdrv_ftp_funcs && hmcdrv_ftp_refcnt) {
pr_debug("starting transfer, cmd %d for '%s' at %lld with %zd bytes\n",
ftp->id, ftp->fname, (long long) ftp->ofs, ftp->len);
len = hmcdrv_cache_cmd(ftp, hmcdrv_ftp_funcs->transfer);
} else {
len = -ENXIO;
}
mutex_unlock(&hmcdrv_ftp_mutex);
return len;
}
EXPORT_SYMBOL(hmcdrv_ftp_do);
/**
* hmcdrv_ftp_probe() - probe for the HMC drive FTP service
*
* Return: 0 if service is available, else an (negative) error code
*/
int hmcdrv_ftp_probe(void)
{
int rc;
struct hmcdrv_ftp_cmdspec ftp = {
.id = HMCDRV_FTP_NOOP,
.ofs = 0,
.fname = "",
.len = PAGE_SIZE
};
ftp.buf = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!ftp.buf)
return -ENOMEM;
rc = hmcdrv_ftp_startup();
if (rc)
goto out;
rc = hmcdrv_ftp_do(&ftp);
hmcdrv_ftp_shutdown();
switch (rc) {
case -ENOENT: /* no such file/media or currently busy, */
case -EBUSY: /* but service seems to be available */
rc = 0;
break;
default: /* leave 'rc' as it is for [0, -EPERM, -E...] */
if (rc > 0)
rc = 0; /* clear length (success) */
break;
} /* switch */
out:
free_page((unsigned long) ftp.buf);
return rc;
}
EXPORT_SYMBOL(hmcdrv_ftp_probe);
/**
* hmcdrv_ftp_cmd() - Perform a HMC drive FTP, with data from user-space
*
* @cmd: FTP command string "<cmd> <filename>"
* @offset: file position to read/write
* @buf: user-space buffer for read/written directory/file
* @len: size of @buf (read/dir) or number of bytes to write
*
* This function must not be called before hmcdrv_ftp_startup() was called.
*
* Return: number of bytes read/written or a negative error code
*/
ssize_t hmcdrv_ftp_cmd(char __kernel *cmd, loff_t offset,
char __user *buf, size_t len)
{
int order;
struct hmcdrv_ftp_cmdspec ftp = {.len = len, .ofs = offset};
ssize_t retlen = hmcdrv_ftp_parse(cmd, &ftp);
if (retlen)
return retlen;
order = get_order(ftp.len);
ftp.buf = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, order);
if (!ftp.buf)
return -ENOMEM;
switch (ftp.id) {
case HMCDRV_FTP_DIR:
case HMCDRV_FTP_NLIST:
case HMCDRV_FTP_GET:
retlen = hmcdrv_ftp_do(&ftp);
if ((retlen >= 0) &&
copy_to_user(buf, ftp.buf, retlen))
retlen = -EFAULT;
break;
case HMCDRV_FTP_PUT:
case HMCDRV_FTP_APPEND:
if (!copy_from_user(ftp.buf, buf, ftp.len))
retlen = hmcdrv_ftp_do(&ftp);
else
retlen = -EFAULT;
break;
case HMCDRV_FTP_DELETE:
retlen = hmcdrv_ftp_do(&ftp);
break;
default:
retlen = -EOPNOTSUPP;
break;
}
free_pages((unsigned long) ftp.buf, order);
return retlen;
}
/**
* hmcdrv_ftp_startup() - startup of HMC drive FTP functionality for a
* dedicated (owner) instance
*
* Return: 0 on success, else an (negative) error code
*/
int hmcdrv_ftp_startup(void)
{
static const struct hmcdrv_ftp_ops hmcdrv_ftp_zvm = {
.startup = diag_ftp_startup,
.shutdown = diag_ftp_shutdown,
.transfer = diag_ftp_cmd
};
static const struct hmcdrv_ftp_ops hmcdrv_ftp_lpar = {
.startup = sclp_ftp_startup,
.shutdown = sclp_ftp_shutdown,
.transfer = sclp_ftp_cmd
};
int rc = 0;
mutex_lock(&hmcdrv_ftp_mutex); /* block transfers while start-up */
if (hmcdrv_ftp_refcnt == 0) {
if (MACHINE_IS_VM)
hmcdrv_ftp_funcs = &hmcdrv_ftp_zvm;
else if (MACHINE_IS_LPAR || MACHINE_IS_KVM)
hmcdrv_ftp_funcs = &hmcdrv_ftp_lpar;
else
rc = -EOPNOTSUPP;
if (hmcdrv_ftp_funcs)
rc = hmcdrv_ftp_funcs->startup();
}
if (!rc)
++hmcdrv_ftp_refcnt;
mutex_unlock(&hmcdrv_ftp_mutex);
return rc;
}
EXPORT_SYMBOL(hmcdrv_ftp_startup);
/**
* hmcdrv_ftp_shutdown() - shutdown of HMC drive FTP functionality for a
* dedicated (owner) instance
*/
void hmcdrv_ftp_shutdown(void)
{
mutex_lock(&hmcdrv_ftp_mutex);
--hmcdrv_ftp_refcnt;
if ((hmcdrv_ftp_refcnt == 0) && hmcdrv_ftp_funcs)
hmcdrv_ftp_funcs->shutdown();
mutex_unlock(&hmcdrv_ftp_mutex);
}
EXPORT_SYMBOL(hmcdrv_ftp_shutdown);
| gpl-2.0 |
niker/elitekernel_oxp_42 | drivers/net/wireless/ath/ath5k/caps.c | 385 | 4058 | /*
* Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
* Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
* Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
/**************\
* Capabilities *
\**************/
#include "ath5k.h"
#include "reg.h"
#include "debug.h"
#include "base.h"
/*
* Fill the capabilities struct
* TODO: Merge this with EEPROM code when we are done with it
*/
int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
{
struct ath5k_capabilities *caps = &ah->ah_capabilities;
u16 ee_header;
/* Capabilities stored in the EEPROM */
ee_header = caps->cap_eeprom.ee_header;
if (ah->ah_version == AR5K_AR5210) {
/*
* Set radio capabilities
* (The AR5110 only supports the middle 5GHz band)
*/
caps->cap_range.range_5ghz_min = 5120;
caps->cap_range.range_5ghz_max = 5430;
caps->cap_range.range_2ghz_min = 0;
caps->cap_range.range_2ghz_max = 0;
/* Set supported modes */
__set_bit(AR5K_MODE_11A, caps->cap_mode);
} else {
/*
* XXX The transceiver supports frequencies from 4920 to 6100MHz
* XXX and from 2312 to 2732MHz. There are problems with the
* XXX current ieee80211 implementation because the IEEE
* XXX channel mapping does not support negative channel
* XXX numbers (2312MHz is channel -19). Of course, this
* XXX doesn't matter because these channels are out of the
* XXX legal range.
*/
/*
* Set radio capabilities
*/
if (AR5K_EEPROM_HDR_11A(ee_header)) {
if (ath_is_49ghz_allowed(caps->cap_eeprom.ee_regdomain))
caps->cap_range.range_5ghz_min = 4920;
else
caps->cap_range.range_5ghz_min = 5005;
caps->cap_range.range_5ghz_max = 6100;
/* Set supported modes */
__set_bit(AR5K_MODE_11A, caps->cap_mode);
}
/* Enable 802.11b if a 2GHz capable radio (2111/5112) is
* connected */
if (AR5K_EEPROM_HDR_11B(ee_header) ||
(AR5K_EEPROM_HDR_11G(ee_header) &&
ah->ah_version != AR5K_AR5211)) {
/* 2312 */
caps->cap_range.range_2ghz_min = 2412;
caps->cap_range.range_2ghz_max = 2732;
if (AR5K_EEPROM_HDR_11B(ee_header))
__set_bit(AR5K_MODE_11B, caps->cap_mode);
if (AR5K_EEPROM_HDR_11G(ee_header) &&
ah->ah_version != AR5K_AR5211)
__set_bit(AR5K_MODE_11G, caps->cap_mode);
}
}
if ((ah->ah_radio_5ghz_revision & 0xf0) == AR5K_SREV_RAD_2112)
__clear_bit(AR5K_MODE_11A, caps->cap_mode);
/* Set number of supported TX queues */
if (ah->ah_version == AR5K_AR5210)
caps->cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES_NOQCU;
else
caps->cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES;
/* newer hardware has PHY error counters */
if (ah->ah_mac_srev >= AR5K_SREV_AR5213A)
caps->cap_has_phyerr_counters = true;
else
caps->cap_has_phyerr_counters = false;
return 0;
}
/*
* TODO: Following functions should be part of a new function
* set_capability
*/
int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid,
u16 assoc_id)
{
if (ah->ah_version == AR5K_AR5210) {
AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1,
AR5K_STA_ID1_NO_PSPOLL | AR5K_STA_ID1_DEFAULT_ANTENNA);
return 0;
}
return -EIO;
}
int ath5k_hw_disable_pspoll(struct ath5k_hw *ah)
{
if (ah->ah_version == AR5K_AR5210) {
AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1,
AR5K_STA_ID1_NO_PSPOLL | AR5K_STA_ID1_DEFAULT_ANTENNA);
return 0;
}
return -EIO;
}
| gpl-2.0 |
leyarx/android_kernel_wexler_qc750 | arch/arm/mach-pxa/magician.c | 385 | 17661 | /*
* Support for HTC Magician PDA phones:
* i-mate JAM, O2 Xda mini, Orange SPV M500, Qtek s100, Qtek s110
* and T-Mobile MDA Compact.
*
* Copyright (c) 2006-2007 Philipp Zabel
*
* Based on hx4700.c, spitz.c and others.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/gpio_keys.h>
#include <linux/input.h>
#include <linux/mfd/htc-egpio.h>
#include <linux/mfd/htc-pasic3.h>
#include <linux/mtd/physmap.h>
#include <linux/pda_power.h>
#include <linux/pwm_backlight.h>
#include <linux/regulator/bq24022.h>
#include <linux/regulator/machine.h>
#include <linux/usb/gpio_vbus.h>
#include <linux/i2c/pxa-i2c.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <mach/pxa27x.h>
#include <mach/magician.h>
#include <mach/pxafb.h>
#include <mach/mmc.h>
#include <mach/irda.h>
#include <mach/ohci.h>
#include "devices.h"
#include "generic.h"
static unsigned long magician_pin_config[] __initdata = {
/* SDRAM and Static Memory I/O Signals */
GPIO20_nSDCS_2,
GPIO21_nSDCS_3,
GPIO15_nCS_1,
GPIO78_nCS_2, /* PASIC3 */
GPIO79_nCS_3, /* EGPIO CPLD */
GPIO80_nCS_4,
GPIO33_nCS_5,
/* I2C */
GPIO117_I2C_SCL,
GPIO118_I2C_SDA,
/* PWM 0 */
GPIO16_PWM0_OUT,
/* I2S */
GPIO28_I2S_BITCLK_OUT,
GPIO29_I2S_SDATA_IN,
GPIO31_I2S_SYNC,
GPIO113_I2S_SYSCLK,
/* SSP 1 */
GPIO23_SSP1_SCLK,
GPIO24_SSP1_SFRM,
GPIO25_SSP1_TXD,
/* SSP 2 */
GPIO19_SSP2_SCLK,
GPIO14_SSP2_SFRM,
GPIO89_SSP2_TXD,
GPIO88_SSP2_RXD,
/* MMC */
GPIO32_MMC_CLK,
GPIO92_MMC_DAT_0,
GPIO109_MMC_DAT_1,
GPIO110_MMC_DAT_2,
GPIO111_MMC_DAT_3,
GPIO112_MMC_CMD,
/* LCD */
GPIOxx_LCD_TFT_16BPP,
/* QCI */
GPIO12_CIF_DD_7,
GPIO17_CIF_DD_6,
GPIO50_CIF_DD_3,
GPIO51_CIF_DD_2,
GPIO52_CIF_DD_4,
GPIO53_CIF_MCLK,
GPIO54_CIF_PCLK,
GPIO55_CIF_DD_1,
GPIO81_CIF_DD_0,
GPIO82_CIF_DD_5,
GPIO84_CIF_FV,
GPIO85_CIF_LV,
/* Magician specific input GPIOs */
GPIO9_GPIO, /* unknown */
GPIO10_GPIO, /* GSM_IRQ */
GPIO13_GPIO, /* CPLD_IRQ */
GPIO107_GPIO, /* DS1WM_IRQ */
GPIO108_GPIO, /* GSM_READY */
GPIO115_GPIO, /* nPEN_IRQ */
/* I2C */
GPIO117_I2C_SCL,
GPIO118_I2C_SDA,
};
/*
* IRDA
*/
static struct pxaficp_platform_data magician_ficp_info = {
.gpio_pwdown = GPIO83_MAGICIAN_nIR_EN,
.transceiver_cap = IR_SIRMODE | IR_OFF,
};
/*
* GPIO Keys
*/
#define INIT_KEY(_code, _gpio, _desc) \
{ \
.code = KEY_##_code, \
.gpio = _gpio, \
.desc = _desc, \
.type = EV_KEY, \
.wakeup = 1, \
}
static struct gpio_keys_button magician_button_table[] = {
INIT_KEY(POWER, GPIO0_MAGICIAN_KEY_POWER, "Power button"),
INIT_KEY(ESC, GPIO37_MAGICIAN_KEY_HANGUP, "Hangup button"),
INIT_KEY(F10, GPIO38_MAGICIAN_KEY_CONTACTS, "Contacts button"),
INIT_KEY(CALENDAR, GPIO90_MAGICIAN_KEY_CALENDAR, "Calendar button"),
INIT_KEY(CAMERA, GPIO91_MAGICIAN_KEY_CAMERA, "Camera button"),
INIT_KEY(UP, GPIO93_MAGICIAN_KEY_UP, "Up button"),
INIT_KEY(DOWN, GPIO94_MAGICIAN_KEY_DOWN, "Down button"),
INIT_KEY(LEFT, GPIO95_MAGICIAN_KEY_LEFT, "Left button"),
INIT_KEY(RIGHT, GPIO96_MAGICIAN_KEY_RIGHT, "Right button"),
INIT_KEY(KPENTER, GPIO97_MAGICIAN_KEY_ENTER, "Action button"),
INIT_KEY(RECORD, GPIO98_MAGICIAN_KEY_RECORD, "Record button"),
INIT_KEY(VOLUMEUP, GPIO100_MAGICIAN_KEY_VOL_UP, "Volume up"),
INIT_KEY(VOLUMEDOWN, GPIO101_MAGICIAN_KEY_VOL_DOWN, "Volume down"),
INIT_KEY(PHONE, GPIO102_MAGICIAN_KEY_PHONE, "Phone button"),
INIT_KEY(PLAY, GPIO99_MAGICIAN_HEADPHONE_IN, "Headset button"),
};
static struct gpio_keys_platform_data gpio_keys_data = {
.buttons = magician_button_table,
.nbuttons = ARRAY_SIZE(magician_button_table),
};
static struct platform_device gpio_keys = {
.name = "gpio-keys",
.dev = {
.platform_data = &gpio_keys_data,
},
.id = -1,
};
/*
* EGPIO (Xilinx CPLD)
*
* 7 32-bit aligned 8-bit registers: 3x output, 1x irq, 3x input
*/
static struct resource egpio_resources[] = {
[0] = {
.start = PXA_CS3_PHYS,
.end = PXA_CS3_PHYS + 0x20 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = gpio_to_irq(GPIO13_MAGICIAN_CPLD_IRQ),
.end = gpio_to_irq(GPIO13_MAGICIAN_CPLD_IRQ),
.flags = IORESOURCE_IRQ,
},
};
static struct htc_egpio_chip egpio_chips[] = {
[0] = {
.reg_start = 0,
.gpio_base = MAGICIAN_EGPIO(0, 0),
.num_gpios = 24,
.direction = HTC_EGPIO_OUTPUT,
.initial_values = 0x40, /* EGPIO_MAGICIAN_GSM_RESET */
},
[1] = {
.reg_start = 4,
.gpio_base = MAGICIAN_EGPIO(4, 0),
.num_gpios = 24,
.direction = HTC_EGPIO_INPUT,
},
};
static struct htc_egpio_platform_data egpio_info = {
.reg_width = 8,
.bus_width = 32,
.irq_base = IRQ_BOARD_START,
.num_irqs = 4,
.ack_register = 3,
.chip = egpio_chips,
.num_chips = ARRAY_SIZE(egpio_chips),
};
static struct platform_device egpio = {
.name = "htc-egpio",
.id = -1,
.resource = egpio_resources,
.num_resources = ARRAY_SIZE(egpio_resources),
.dev = {
.platform_data = &egpio_info,
},
};
/*
* LCD - Toppoly TD028STEB1 or Samsung LTP280QV
*/
static struct pxafb_mode_info toppoly_modes[] = {
{
.pixclock = 96153,
.bpp = 16,
.xres = 240,
.yres = 320,
.hsync_len = 11,
.vsync_len = 3,
.left_margin = 19,
.upper_margin = 2,
.right_margin = 10,
.lower_margin = 2,
.sync = 0,
},
};
static struct pxafb_mode_info samsung_modes[] = {
{
.pixclock = 96153,
.bpp = 16,
.xres = 240,
.yres = 320,
.hsync_len = 8,
.vsync_len = 4,
.left_margin = 9,
.upper_margin = 4,
.right_margin = 9,
.lower_margin = 4,
.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
},
};
static void toppoly_lcd_power(int on, struct fb_var_screeninfo *si)
{
pr_debug("Toppoly LCD power\n");
if (on) {
pr_debug("on\n");
gpio_set_value(EGPIO_MAGICIAN_TOPPOLY_POWER, 1);
gpio_set_value(GPIO106_MAGICIAN_LCD_POWER_3, 1);
udelay(2000);
gpio_set_value(EGPIO_MAGICIAN_LCD_POWER, 1);
udelay(2000);
/* FIXME: enable LCDC here */
udelay(2000);
gpio_set_value(GPIO104_MAGICIAN_LCD_POWER_1, 1);
udelay(2000);
gpio_set_value(GPIO105_MAGICIAN_LCD_POWER_2, 1);
} else {
pr_debug("off\n");
msleep(15);
gpio_set_value(GPIO105_MAGICIAN_LCD_POWER_2, 0);
udelay(500);
gpio_set_value(GPIO104_MAGICIAN_LCD_POWER_1, 0);
udelay(1000);
gpio_set_value(GPIO106_MAGICIAN_LCD_POWER_3, 0);
gpio_set_value(EGPIO_MAGICIAN_LCD_POWER, 0);
}
}
static void samsung_lcd_power(int on, struct fb_var_screeninfo *si)
{
pr_debug("Samsung LCD power\n");
if (on) {
pr_debug("on\n");
if (system_rev < 3)
gpio_set_value(GPIO75_MAGICIAN_SAMSUNG_POWER, 1);
else
gpio_set_value(EGPIO_MAGICIAN_LCD_POWER, 1);
mdelay(10);
gpio_set_value(GPIO106_MAGICIAN_LCD_POWER_3, 1);
mdelay(10);
gpio_set_value(GPIO104_MAGICIAN_LCD_POWER_1, 1);
mdelay(30);
gpio_set_value(GPIO105_MAGICIAN_LCD_POWER_2, 1);
mdelay(10);
} else {
pr_debug("off\n");
mdelay(10);
gpio_set_value(GPIO105_MAGICIAN_LCD_POWER_2, 0);
mdelay(30);
gpio_set_value(GPIO104_MAGICIAN_LCD_POWER_1, 0);
mdelay(10);
gpio_set_value(GPIO106_MAGICIAN_LCD_POWER_3, 0);
mdelay(10);
if (system_rev < 3)
gpio_set_value(GPIO75_MAGICIAN_SAMSUNG_POWER, 0);
else
gpio_set_value(EGPIO_MAGICIAN_LCD_POWER, 0);
}
}
static struct pxafb_mach_info toppoly_info = {
.modes = toppoly_modes,
.num_modes = 1,
.fixed_modes = 1,
.lcd_conn = LCD_COLOR_TFT_16BPP,
.pxafb_lcd_power = toppoly_lcd_power,
};
static struct pxafb_mach_info samsung_info = {
.modes = samsung_modes,
.num_modes = 1,
.fixed_modes = 1,
.lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL |\
LCD_ALTERNATE_MAPPING,
.pxafb_lcd_power = samsung_lcd_power,
};
/*
* Backlight
*/
static struct gpio magician_bl_gpios[] = {
{ EGPIO_MAGICIAN_BL_POWER, GPIOF_DIR_OUT, "Backlight power" },
{ EGPIO_MAGICIAN_BL_POWER2, GPIOF_DIR_OUT, "Backlight power 2" },
};
static int magician_backlight_init(struct device *dev)
{
return gpio_request_array(ARRAY_AND_SIZE(magician_bl_gpios));
}
static int magician_backlight_notify(struct device *dev, int brightness)
{
gpio_set_value(EGPIO_MAGICIAN_BL_POWER, brightness);
if (brightness >= 200) {
gpio_set_value(EGPIO_MAGICIAN_BL_POWER2, 1);
return brightness - 72;
} else {
gpio_set_value(EGPIO_MAGICIAN_BL_POWER2, 0);
return brightness;
}
}
static void magician_backlight_exit(struct device *dev)
{
gpio_free_array(ARRAY_AND_SIZE(magician_bl_gpios));
}
static struct platform_pwm_backlight_data backlight_data = {
.pwm_id = 0,
.max_brightness = 272,
.dft_brightness = 100,
.pwm_period_ns = 30923,
.init = magician_backlight_init,
.notify = magician_backlight_notify,
.exit = magician_backlight_exit,
};
static struct platform_device backlight = {
.name = "pwm-backlight",
.id = -1,
.dev = {
.parent = &pxa27x_device_pwm0.dev,
.platform_data = &backlight_data,
},
};
/*
* LEDs
*/
static struct gpio_led gpio_leds[] = {
{
.name = "magician::vibra",
.default_trigger = "none",
.gpio = GPIO22_MAGICIAN_VIBRA_EN,
},
{
.name = "magician::phone_bl",
.default_trigger = "backlight",
.gpio = GPIO103_MAGICIAN_LED_KP,
},
};
static struct gpio_led_platform_data gpio_led_info = {
.leds = gpio_leds,
.num_leds = ARRAY_SIZE(gpio_leds),
};
static struct platform_device leds_gpio = {
.name = "leds-gpio",
.id = -1,
.dev = {
.platform_data = &gpio_led_info,
},
};
static struct pasic3_led pasic3_leds[] = {
{
.led = {
.name = "magician:red",
.default_trigger = "ds2760-battery.0-charging",
},
.hw_num = 0,
.bit2 = PASIC3_BIT2_LED0,
.mask = PASIC3_MASK_LED0,
},
{
.led = {
.name = "magician:green",
.default_trigger = "ds2760-battery.0-charging-or-full",
},
.hw_num = 1,
.bit2 = PASIC3_BIT2_LED1,
.mask = PASIC3_MASK_LED1,
},
{
.led = {
.name = "magician:blue",
.default_trigger = "bluetooth",
},
.hw_num = 2,
.bit2 = PASIC3_BIT2_LED2,
.mask = PASIC3_MASK_LED2,
},
};
static struct pasic3_leds_machinfo pasic3_leds_info = {
.num_leds = ARRAY_SIZE(pasic3_leds),
.power_gpio = EGPIO_MAGICIAN_LED_POWER,
.leds = pasic3_leds,
};
/*
* PASIC3 with DS1WM
*/
static struct resource pasic3_resources[] = {
[0] = {
.start = PXA_CS2_PHYS,
.end = PXA_CS2_PHYS + 0x1b,
.flags = IORESOURCE_MEM,
},
/* No IRQ handler in the PASIC3, DS1WM needs an external IRQ */
[1] = {
.start = gpio_to_irq(GPIO107_MAGICIAN_DS1WM_IRQ),
.end = gpio_to_irq(GPIO107_MAGICIAN_DS1WM_IRQ),
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
}
};
static struct pasic3_platform_data pasic3_platform_data = {
.led_pdata = &pasic3_leds_info,
.clock_rate = 4000000,
};
static struct platform_device pasic3 = {
.name = "pasic3",
.id = -1,
.num_resources = ARRAY_SIZE(pasic3_resources),
.resource = pasic3_resources,
.dev = {
.platform_data = &pasic3_platform_data,
},
};
/*
* USB "Transceiver"
*/
static struct resource gpio_vbus_resource = {
.flags = IORESOURCE_IRQ,
.start = IRQ_MAGICIAN_VBUS,
.end = IRQ_MAGICIAN_VBUS,
};
static struct gpio_vbus_mach_info gpio_vbus_info = {
.gpio_pullup = GPIO27_MAGICIAN_USBC_PUEN,
.gpio_vbus = EGPIO_MAGICIAN_CABLE_STATE_USB,
};
static struct platform_device gpio_vbus = {
.name = "gpio-vbus",
.id = -1,
.num_resources = 1,
.resource = &gpio_vbus_resource,
.dev = {
.platform_data = &gpio_vbus_info,
},
};
/*
* External power
*/
static int power_supply_init(struct device *dev)
{
return gpio_request(EGPIO_MAGICIAN_CABLE_STATE_AC, "CABLE_STATE_AC");
}
static int magician_is_ac_online(void)
{
return gpio_get_value(EGPIO_MAGICIAN_CABLE_STATE_AC);
}
static void power_supply_exit(struct device *dev)
{
gpio_free(EGPIO_MAGICIAN_CABLE_STATE_AC);
}
static char *magician_supplicants[] = {
"ds2760-battery.0", "backup-battery"
};
static struct pda_power_pdata power_supply_info = {
.init = power_supply_init,
.is_ac_online = magician_is_ac_online,
.exit = power_supply_exit,
.supplied_to = magician_supplicants,
.num_supplicants = ARRAY_SIZE(magician_supplicants),
};
static struct resource power_supply_resources[] = {
[0] = {
.name = "ac",
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE |
IORESOURCE_IRQ_LOWEDGE,
.start = IRQ_MAGICIAN_VBUS,
.end = IRQ_MAGICIAN_VBUS,
},
[1] = {
.name = "usb",
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE |
IORESOURCE_IRQ_LOWEDGE,
.start = IRQ_MAGICIAN_VBUS,
.end = IRQ_MAGICIAN_VBUS,
},
};
static struct platform_device power_supply = {
.name = "pda-power",
.id = -1,
.dev = {
.platform_data = &power_supply_info,
},
.resource = power_supply_resources,
.num_resources = ARRAY_SIZE(power_supply_resources),
};
/*
* Battery charger
*/
static struct regulator_consumer_supply bq24022_consumers[] = {
{
.dev = &gpio_vbus.dev,
.supply = "vbus_draw",
},
{
.dev = &power_supply.dev,
.supply = "ac_draw",
},
};
static struct regulator_init_data bq24022_init_data = {
.constraints = {
.max_uA = 500000,
.valid_ops_mask = REGULATOR_CHANGE_CURRENT | REGULATOR_CHANGE_STATUS,
},
.num_consumer_supplies = ARRAY_SIZE(bq24022_consumers),
.consumer_supplies = bq24022_consumers,
};
static struct bq24022_mach_info bq24022_info = {
.gpio_nce = GPIO30_MAGICIAN_BQ24022_nCHARGE_EN,
.gpio_iset2 = EGPIO_MAGICIAN_BQ24022_ISET2,
.init_data = &bq24022_init_data,
};
static struct platform_device bq24022 = {
.name = "bq24022",
.id = -1,
.dev = {
.platform_data = &bq24022_info,
},
};
/*
* MMC/SD
*/
static int magician_mci_init(struct device *dev,
irq_handler_t detect_irq, void *data)
{
return request_irq(IRQ_MAGICIAN_SD, detect_irq,
IRQF_DISABLED | IRQF_SAMPLE_RANDOM,
"mmc card detect", data);
}
static void magician_mci_exit(struct device *dev, void *data)
{
free_irq(IRQ_MAGICIAN_SD, data);
}
static struct pxamci_platform_data magician_mci_info = {
.ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
.init = magician_mci_init,
.exit = magician_mci_exit,
.gpio_card_detect = -1,
.gpio_card_ro = EGPIO_MAGICIAN_nSD_READONLY,
.gpio_card_ro_invert = 1,
.gpio_power = EGPIO_MAGICIAN_SD_POWER,
};
/*
* USB OHCI
*/
static struct pxaohci_platform_data magician_ohci_info = {
.port_mode = PMM_PERPORT_MODE,
.flags = ENABLE_PORT1 | ENABLE_PORT3 | POWER_CONTROL_LOW,
.power_budget = 0,
};
/*
* StrataFlash
*/
static void magician_set_vpp(struct platform_device *pdev, int vpp)
{
gpio_set_value(EGPIO_MAGICIAN_FLASH_VPP, vpp);
}
static struct resource strataflash_resource = {
.start = PXA_CS0_PHYS,
.end = PXA_CS0_PHYS + SZ_64M - 1,
.flags = IORESOURCE_MEM,
};
static struct physmap_flash_data strataflash_data = {
.width = 4,
.set_vpp = magician_set_vpp,
};
static struct platform_device strataflash = {
.name = "physmap-flash",
.id = -1,
.resource = &strataflash_resource,
.num_resources = 1,
.dev = {
.platform_data = &strataflash_data,
},
};
/*
* I2C
*/
static struct i2c_pxa_platform_data i2c_info = {
.fast_mode = 1,
};
/*
* Platform devices
*/
static struct platform_device *devices[] __initdata = {
&gpio_keys,
&egpio,
&backlight,
&pasic3,
&bq24022,
&gpio_vbus,
&power_supply,
&strataflash,
&leds_gpio,
};
static struct gpio magician_global_gpios[] = {
{ GPIO13_MAGICIAN_CPLD_IRQ, GPIOF_IN, "CPLD_IRQ" },
{ GPIO107_MAGICIAN_DS1WM_IRQ, GPIOF_IN, "DS1WM_IRQ" },
{ GPIO104_MAGICIAN_LCD_POWER_1, GPIOF_OUT_INIT_LOW, "LCD power 1" },
{ GPIO105_MAGICIAN_LCD_POWER_2, GPIOF_OUT_INIT_LOW, "LCD power 2" },
{ GPIO106_MAGICIAN_LCD_POWER_3, GPIOF_OUT_INIT_LOW, "LCD power 3" },
{ GPIO83_MAGICIAN_nIR_EN, GPIOF_OUT_INIT_HIGH, "nIR_EN" },
};
static void __init magician_init(void)
{
void __iomem *cpld;
int lcd_select;
int err;
pxa2xx_mfp_config(ARRAY_AND_SIZE(magician_pin_config));
err = gpio_request_array(ARRAY_AND_SIZE(magician_global_gpios));
if (err)
pr_err("magician: Failed to request GPIOs: %d\n", err);
pxa_set_ffuart_info(NULL);
pxa_set_btuart_info(NULL);
pxa_set_stuart_info(NULL);
platform_add_devices(ARRAY_AND_SIZE(devices));
pxa_set_ficp_info(&magician_ficp_info);
pxa27x_set_i2c_power_info(NULL);
pxa_set_i2c_info(&i2c_info);
pxa_set_mci_info(&magician_mci_info);
pxa_set_ohci_info(&magician_ohci_info);
/* Check LCD type we have */
cpld = ioremap_nocache(PXA_CS3_PHYS, 0x1000);
if (cpld) {
u8 board_id = __raw_readb(cpld+0x14);
iounmap(cpld);
system_rev = board_id & 0x7;
lcd_select = board_id & 0x8;
pr_info("LCD type: %s\n", lcd_select ? "Samsung" : "Toppoly");
if (lcd_select && (system_rev < 3))
gpio_request_one(GPIO75_MAGICIAN_SAMSUNG_POWER,
GPIOF_OUT_INIT_LOW, "SAMSUNG_POWER");
pxa_set_fb_info(NULL, lcd_select ? &samsung_info : &toppoly_info);
} else
pr_err("LCD detection: CPLD mapping failed\n");
}
MACHINE_START(MAGICIAN, "HTC Magician")
.boot_params = 0xa0000100,
.map_io = pxa27x_map_io,
.nr_irqs = MAGICIAN_NR_IRQS,
.init_irq = pxa27x_init_irq,
.handle_irq = pxa27x_handle_irq,
.init_machine = magician_init,
.timer = &pxa_timer,
MACHINE_END
| gpl-2.0 |
jackzzjack/linux | drivers/gpu/drm/imx/ipuv3-crtc.c | 641 | 12790 | /*
* i.MX IPUv3 Graphics driver
*
* Copyright (C) 2011 Sascha Hauer, Pengutronix
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/component.h>
#include <linux/module.h>
#include <linux/export.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <linux/fb.h>
#include <linux/clk.h>
#include <linux/errno.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <video/imx-ipu-v3.h>
#include "imx-drm.h"
#include "ipuv3-plane.h"
#define DRIVER_DESC "i.MX IPUv3 Graphics"
struct ipu_crtc {
struct device *dev;
struct drm_crtc base;
struct imx_drm_crtc *imx_crtc;
/* plane[0] is the full plane, plane[1] is the partial plane */
struct ipu_plane *plane[2];
struct ipu_dc *dc;
struct ipu_di *di;
int enabled;
struct drm_pending_vblank_event *page_flip_event;
struct drm_framebuffer *newfb;
int irq;
u32 bus_format;
int di_hsync_pin;
int di_vsync_pin;
};
#define to_ipu_crtc(x) container_of(x, struct ipu_crtc, base)
static void ipu_fb_enable(struct ipu_crtc *ipu_crtc)
{
struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
if (ipu_crtc->enabled)
return;
ipu_dc_enable(ipu);
ipu_plane_enable(ipu_crtc->plane[0]);
/* Start DC channel and DI after IDMAC */
ipu_dc_enable_channel(ipu_crtc->dc);
ipu_di_enable(ipu_crtc->di);
ipu_crtc->enabled = 1;
}
static void ipu_fb_disable(struct ipu_crtc *ipu_crtc)
{
struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
if (!ipu_crtc->enabled)
return;
/* Stop DC channel and DI before IDMAC */
ipu_dc_disable_channel(ipu_crtc->dc);
ipu_di_disable(ipu_crtc->di);
ipu_plane_disable(ipu_crtc->plane[0]);
ipu_dc_disable(ipu);
ipu_crtc->enabled = 0;
}
static void ipu_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
dev_dbg(ipu_crtc->dev, "%s mode: %d\n", __func__, mode);
switch (mode) {
case DRM_MODE_DPMS_ON:
ipu_fb_enable(ipu_crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
ipu_fb_disable(ipu_crtc);
break;
}
}
static int ipu_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t page_flip_flags)
{
struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
int ret;
if (ipu_crtc->newfb)
return -EBUSY;
ret = imx_drm_crtc_vblank_get(ipu_crtc->imx_crtc);
if (ret) {
dev_dbg(ipu_crtc->dev, "failed to acquire vblank counter\n");
list_del(&event->base.link);
return ret;
}
ipu_crtc->newfb = fb;
ipu_crtc->page_flip_event = event;
crtc->primary->fb = fb;
return 0;
}
static const struct drm_crtc_funcs ipu_crtc_funcs = {
.set_config = drm_crtc_helper_set_config,
.destroy = drm_crtc_cleanup,
.page_flip = ipu_page_flip,
};
static int ipu_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *orig_mode,
struct drm_display_mode *mode,
int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
struct drm_encoder *encoder;
struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
struct ipu_di_signal_cfg sig_cfg = {};
unsigned long encoder_types = 0;
int ret;
dev_dbg(ipu_crtc->dev, "%s: mode->hdisplay: %d\n", __func__,
mode->hdisplay);
dev_dbg(ipu_crtc->dev, "%s: mode->vdisplay: %d\n", __func__,
mode->vdisplay);
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
if (encoder->crtc == crtc)
encoder_types |= BIT(encoder->encoder_type);
dev_dbg(ipu_crtc->dev, "%s: attached to encoder types 0x%lx\n",
__func__, encoder_types);
/*
* If we have DAC or LDB, then we need the IPU DI clock to be
* the same as the LDB DI clock. For TVDAC, derive the IPU DI
* clock from 27 MHz TVE_DI clock, but allow to divide it.
*/
if (encoder_types & (BIT(DRM_MODE_ENCODER_DAC) |
BIT(DRM_MODE_ENCODER_LVDS)))
sig_cfg.clkflags = IPU_DI_CLKMODE_SYNC | IPU_DI_CLKMODE_EXT;
else if (encoder_types & BIT(DRM_MODE_ENCODER_TVDAC))
sig_cfg.clkflags = IPU_DI_CLKMODE_EXT;
else
sig_cfg.clkflags = 0;
sig_cfg.enable_pol = 1;
sig_cfg.clk_pol = 0;
sig_cfg.bus_format = ipu_crtc->bus_format;
sig_cfg.v_to_h_sync = 0;
sig_cfg.hsync_pin = ipu_crtc->di_hsync_pin;
sig_cfg.vsync_pin = ipu_crtc->di_vsync_pin;
drm_display_mode_to_videomode(mode, &sig_cfg.mode);
ret = ipu_dc_init_sync(ipu_crtc->dc, ipu_crtc->di,
mode->flags & DRM_MODE_FLAG_INTERLACE,
ipu_crtc->bus_format, mode->hdisplay);
if (ret) {
dev_err(ipu_crtc->dev,
"initializing display controller failed with %d\n",
ret);
return ret;
}
ret = ipu_di_init_sync_panel(ipu_crtc->di, &sig_cfg);
if (ret) {
dev_err(ipu_crtc->dev,
"initializing panel failed with %d\n", ret);
return ret;
}
return ipu_plane_mode_set(ipu_crtc->plane[0], crtc, mode,
crtc->primary->fb,
0, 0, mode->hdisplay, mode->vdisplay,
x, y, mode->hdisplay, mode->vdisplay,
mode->flags & DRM_MODE_FLAG_INTERLACE);
}
static void ipu_crtc_handle_pageflip(struct ipu_crtc *ipu_crtc)
{
unsigned long flags;
struct drm_device *drm = ipu_crtc->base.dev;
spin_lock_irqsave(&drm->event_lock, flags);
if (ipu_crtc->page_flip_event)
drm_send_vblank_event(drm, -1, ipu_crtc->page_flip_event);
ipu_crtc->page_flip_event = NULL;
imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc);
spin_unlock_irqrestore(&drm->event_lock, flags);
}
static irqreturn_t ipu_irq_handler(int irq, void *dev_id)
{
struct ipu_crtc *ipu_crtc = dev_id;
imx_drm_handle_vblank(ipu_crtc->imx_crtc);
if (ipu_crtc->newfb) {
struct ipu_plane *plane = ipu_crtc->plane[0];
ipu_crtc->newfb = NULL;
ipu_plane_set_base(plane, ipu_crtc->base.primary->fb,
plane->x, plane->y);
ipu_crtc_handle_pageflip(ipu_crtc);
}
return IRQ_HANDLED;
}
static bool ipu_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
struct videomode vm;
int ret;
drm_display_mode_to_videomode(adjusted_mode, &vm);
ret = ipu_di_adjust_videomode(ipu_crtc->di, &vm);
if (ret)
return false;
drm_display_mode_from_videomode(&vm, adjusted_mode);
return true;
}
static void ipu_crtc_prepare(struct drm_crtc *crtc)
{
struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
ipu_fb_disable(ipu_crtc);
}
static void ipu_crtc_commit(struct drm_crtc *crtc)
{
struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
ipu_fb_enable(ipu_crtc);
}
static struct drm_crtc_helper_funcs ipu_helper_funcs = {
.dpms = ipu_crtc_dpms,
.mode_fixup = ipu_crtc_mode_fixup,
.mode_set = ipu_crtc_mode_set,
.prepare = ipu_crtc_prepare,
.commit = ipu_crtc_commit,
};
static int ipu_enable_vblank(struct drm_crtc *crtc)
{
return 0;
}
static void ipu_disable_vblank(struct drm_crtc *crtc)
{
struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
ipu_crtc->page_flip_event = NULL;
ipu_crtc->newfb = NULL;
}
static int ipu_set_interface_pix_fmt(struct drm_crtc *crtc,
u32 bus_format, int hsync_pin, int vsync_pin)
{
struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
ipu_crtc->bus_format = bus_format;
ipu_crtc->di_hsync_pin = hsync_pin;
ipu_crtc->di_vsync_pin = vsync_pin;
return 0;
}
static const struct imx_drm_crtc_helper_funcs ipu_crtc_helper_funcs = {
.enable_vblank = ipu_enable_vblank,
.disable_vblank = ipu_disable_vblank,
.set_interface_pix_fmt = ipu_set_interface_pix_fmt,
.crtc_funcs = &ipu_crtc_funcs,
.crtc_helper_funcs = &ipu_helper_funcs,
};
static void ipu_put_resources(struct ipu_crtc *ipu_crtc)
{
if (!IS_ERR_OR_NULL(ipu_crtc->dc))
ipu_dc_put(ipu_crtc->dc);
if (!IS_ERR_OR_NULL(ipu_crtc->di))
ipu_di_put(ipu_crtc->di);
}
static int ipu_get_resources(struct ipu_crtc *ipu_crtc,
struct ipu_client_platformdata *pdata)
{
struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
int ret;
ipu_crtc->dc = ipu_dc_get(ipu, pdata->dc);
if (IS_ERR(ipu_crtc->dc)) {
ret = PTR_ERR(ipu_crtc->dc);
goto err_out;
}
ipu_crtc->di = ipu_di_get(ipu, pdata->di);
if (IS_ERR(ipu_crtc->di)) {
ret = PTR_ERR(ipu_crtc->di);
goto err_out;
}
return 0;
err_out:
ipu_put_resources(ipu_crtc);
return ret;
}
static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
struct ipu_client_platformdata *pdata, struct drm_device *drm)
{
struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
int dp = -EINVAL;
int ret;
int id;
ret = ipu_get_resources(ipu_crtc, pdata);
if (ret) {
dev_err(ipu_crtc->dev, "getting resources failed with %d.\n",
ret);
return ret;
}
ret = imx_drm_add_crtc(drm, &ipu_crtc->base, &ipu_crtc->imx_crtc,
&ipu_crtc_helper_funcs, ipu_crtc->dev->of_node);
if (ret) {
dev_err(ipu_crtc->dev, "adding crtc failed with %d.\n", ret);
goto err_put_resources;
}
if (pdata->dp >= 0)
dp = IPU_DP_FLOW_SYNC_BG;
id = imx_drm_crtc_id(ipu_crtc->imx_crtc);
ipu_crtc->plane[0] = ipu_plane_init(ipu_crtc->base.dev, ipu,
pdata->dma[0], dp, BIT(id), true);
ret = ipu_plane_get_resources(ipu_crtc->plane[0]);
if (ret) {
dev_err(ipu_crtc->dev, "getting plane 0 resources failed with %d.\n",
ret);
goto err_remove_crtc;
}
/* If this crtc is using the DP, add an overlay plane */
if (pdata->dp >= 0 && pdata->dma[1] > 0) {
ipu_crtc->plane[1] = ipu_plane_init(ipu_crtc->base.dev, ipu,
pdata->dma[1],
IPU_DP_FLOW_SYNC_FG,
BIT(id), false);
if (IS_ERR(ipu_crtc->plane[1]))
ipu_crtc->plane[1] = NULL;
}
ipu_crtc->irq = ipu_plane_irq(ipu_crtc->plane[0]);
ret = devm_request_irq(ipu_crtc->dev, ipu_crtc->irq, ipu_irq_handler, 0,
"imx_drm", ipu_crtc);
if (ret < 0) {
dev_err(ipu_crtc->dev, "irq request failed with %d.\n", ret);
goto err_put_plane_res;
}
return 0;
err_put_plane_res:
ipu_plane_put_resources(ipu_crtc->plane[0]);
err_remove_crtc:
imx_drm_remove_crtc(ipu_crtc->imx_crtc);
err_put_resources:
ipu_put_resources(ipu_crtc);
return ret;
}
static struct device_node *ipu_drm_get_port_by_id(struct device_node *parent,
int port_id)
{
struct device_node *port;
int id, ret;
port = of_get_child_by_name(parent, "port");
while (port) {
ret = of_property_read_u32(port, "reg", &id);
if (!ret && id == port_id)
return port;
do {
port = of_get_next_child(parent, port);
if (!port)
return NULL;
} while (of_node_cmp(port->name, "port"));
}
return NULL;
}
static int ipu_drm_bind(struct device *dev, struct device *master, void *data)
{
struct ipu_client_platformdata *pdata = dev->platform_data;
struct drm_device *drm = data;
struct ipu_crtc *ipu_crtc;
int ret;
ipu_crtc = devm_kzalloc(dev, sizeof(*ipu_crtc), GFP_KERNEL);
if (!ipu_crtc)
return -ENOMEM;
ipu_crtc->dev = dev;
ret = ipu_crtc_init(ipu_crtc, pdata, drm);
if (ret)
return ret;
dev_set_drvdata(dev, ipu_crtc);
return 0;
}
static void ipu_drm_unbind(struct device *dev, struct device *master,
void *data)
{
struct ipu_crtc *ipu_crtc = dev_get_drvdata(dev);
imx_drm_remove_crtc(ipu_crtc->imx_crtc);
ipu_plane_put_resources(ipu_crtc->plane[0]);
ipu_put_resources(ipu_crtc);
}
static const struct component_ops ipu_crtc_ops = {
.bind = ipu_drm_bind,
.unbind = ipu_drm_unbind,
};
static int ipu_drm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ipu_client_platformdata *pdata = dev->platform_data;
int ret;
if (!dev->platform_data)
return -EINVAL;
if (!dev->of_node) {
/* Associate crtc device with the corresponding DI port node */
dev->of_node = ipu_drm_get_port_by_id(dev->parent->of_node,
pdata->di + 2);
if (!dev->of_node) {
dev_err(dev, "missing port@%d node in %s\n",
pdata->di + 2, dev->parent->of_node->full_name);
return -ENODEV;
}
}
ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
if (ret)
return ret;
return component_add(dev, &ipu_crtc_ops);
}
static int ipu_drm_remove(struct platform_device *pdev)
{
component_del(&pdev->dev, &ipu_crtc_ops);
return 0;
}
static struct platform_driver ipu_drm_driver = {
.driver = {
.name = "imx-ipuv3-crtc",
},
.probe = ipu_drm_probe,
.remove = ipu_drm_remove,
};
module_platform_driver(ipu_drm_driver);
MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:imx-ipuv3-crtc");
| gpl-2.0 |
Yamatoo/mysticalpvp | dep/g3dlite/source/Vector2.cpp | 641 | 5416 | /**
@file Vector2.cpp
2D vector class, used for texture coordinates primarily.
@maintainer Morgan McGuire, http://graphics.cs.williams.edu
@cite Portions based on Dave Eberly'x Magic Software Library
at http://www.magic-software.com
@created 2001-06-02
@edited 2009-11-16
*/
#include "G3D/platform.h"
#include <stdlib.h>
#include "G3D/Vector2.h"
#include "G3D/g3dmath.h"
#include "G3D/format.h"
#include "G3D/BinaryInput.h"
#include "G3D/BinaryOutput.h"
#include "G3D/TextInput.h"
#include "G3D/TextOutput.h"
#include "G3D/Any.h"
namespace G3D {
Vector2::Vector2(const Any& any) {
any.verifyName("Vector2");
any.verifyType(Any::TABLE, Any::ARRAY);
any.verifySize(2);
if (any.type() == Any::ARRAY) {
x = any[0];
y = any[1];
} else {
// Table
x = any["x"];
y = any["y"];
}
}
Vector2::operator Any() const {
Any any(Any::ARRAY, "Vector2");
any.append(x, y);
return any;
}
const Vector2& Vector2::one() {
static const Vector2 v(1, 1); return v;
}
const Vector2& Vector2::zero() {
static Vector2 v(0, 0);
return v;
}
const Vector2& Vector2::unitX() {
static Vector2 v(1, 0);
return v;
}
const Vector2& Vector2::unitY() {
static Vector2 v(0, 1);
return v;
}
const Vector2& Vector2::inf() {
static Vector2 v((float)G3D::finf(), (float)G3D::finf());
return v;
}
const Vector2& Vector2::nan() {
static Vector2 v((float)G3D::fnan(), (float)G3D::fnan());
return v;
}
const Vector2& Vector2::minFinite() {
static Vector2 v(-FLT_MAX, -FLT_MAX);
return v;
}
const Vector2& Vector2::maxFinite() {
static Vector2 v(FLT_MAX, FLT_MAX);
return v;
}
size_t Vector2::hashCode() const {
unsigned int xhash = (*(int*)(void*)(&x));
unsigned int yhash = (*(int*)(void*)(&y));
return xhash + (yhash * 37);
}
Vector2::Vector2(BinaryInput& b) {
deserialize(b);
}
void Vector2::deserialize(BinaryInput& b) {
x = b.readFloat32();
y = b.readFloat32();
}
void Vector2::serialize(BinaryOutput& b) const {
b.writeFloat32(x);
b.writeFloat32(y);
}
void Vector2::deserialize(TextInput& t) {
t.readSymbol("(");
x = (float)t.readNumber();
t.readSymbol(",");
y = (float)t.readNumber();
t.readSymbol(")");
}
void Vector2::serialize(TextOutput& t) const {
t.writeSymbol("(");
t.writeNumber(x);
t.writeSymbol(",");
t.writeNumber(y);
t.writeSymbol(")");
}
//----------------------------------------------------------------------------
Vector2 Vector2::random(G3D::Random& r) {
Vector2 result;
do {
result = Vector2(r.uniform(-1, 1), r.uniform(-1, 1));
} while (result.squaredLength() >= 1.0f);
result.unitize();
return result;
}
Vector2 Vector2::operator/ (float k) const {
return *this * (1.0f / k);
}
Vector2& Vector2::operator/= (float k) {
this->x /= k;
this->y /= k;
return *this;
}
//----------------------------------------------------------------------------
float Vector2::unitize (float fTolerance) {
float fLength = length();
if (fLength > fTolerance) {
float fInvLength = 1.0f / fLength;
x *= fInvLength;
y *= fInvLength;
} else {
fLength = 0.0;
}
return fLength;
}
//----------------------------------------------------------------------------
std::string Vector2::toString() const {
return G3D::format("(%g, %g)", x, y);
}
// 2-char swizzles
Vector2 Vector2::xx() const { return Vector2 (x, x); }
Vector2 Vector2::yx() const { return Vector2 (y, x); }
Vector2 Vector2::xy() const { return Vector2 (x, y); }
Vector2 Vector2::yy() const { return Vector2 (y, y); }
// 3-char swizzles
Vector3 Vector2::xxx() const { return Vector3 (x, x, x); }
Vector3 Vector2::yxx() const { return Vector3 (y, x, x); }
Vector3 Vector2::xyx() const { return Vector3 (x, y, x); }
Vector3 Vector2::yyx() const { return Vector3 (y, y, x); }
Vector3 Vector2::xxy() const { return Vector3 (x, x, y); }
Vector3 Vector2::yxy() const { return Vector3 (y, x, y); }
Vector3 Vector2::xyy() const { return Vector3 (x, y, y); }
Vector3 Vector2::yyy() const { return Vector3 (y, y, y); }
// 4-char swizzles
Vector4 Vector2::xxxx() const { return Vector4 (x, x, x, x); }
Vector4 Vector2::yxxx() const { return Vector4 (y, x, x, x); }
Vector4 Vector2::xyxx() const { return Vector4 (x, y, x, x); }
Vector4 Vector2::yyxx() const { return Vector4 (y, y, x, x); }
Vector4 Vector2::xxyx() const { return Vector4 (x, x, y, x); }
Vector4 Vector2::yxyx() const { return Vector4 (y, x, y, x); }
Vector4 Vector2::xyyx() const { return Vector4 (x, y, y, x); }
Vector4 Vector2::yyyx() const { return Vector4 (y, y, y, x); }
Vector4 Vector2::xxxy() const { return Vector4 (x, x, x, y); }
Vector4 Vector2::yxxy() const { return Vector4 (y, x, x, y); }
Vector4 Vector2::xyxy() const { return Vector4 (x, y, x, y); }
Vector4 Vector2::yyxy() const { return Vector4 (y, y, x, y); }
Vector4 Vector2::xxyy() const { return Vector4 (x, x, y, y); }
Vector4 Vector2::yxyy() const { return Vector4 (y, x, y, y); }
Vector4 Vector2::xyyy() const { return Vector4 (x, y, y, y); }
Vector4 Vector2::yyyy() const { return Vector4 (y, y, y, y); }
} // namespace
| gpl-2.0 |
hackerspace/rpi-linux | drivers/clk/pxa/clk-pxa.c | 1153 | 2710 | /*
* Marvell PXA family clocks
*
* Copyright (C) 2014 Robert Jarzmik
*
* Common clock code for PXA clocks ("CKEN" type clocks + DT)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/of.h>
#include <dt-bindings/clock/pxa-clock.h>
#include "clk-pxa.h"
DEFINE_SPINLOCK(lock);
static struct clk *pxa_clocks[CLK_MAX];
static struct clk_onecell_data onecell_data = {
.clks = pxa_clocks,
.clk_num = CLK_MAX,
};
struct pxa_clk {
struct clk_hw hw;
struct clk_fixed_factor lp;
struct clk_fixed_factor hp;
struct clk_gate gate;
bool (*is_in_low_power)(void);
};
#define to_pxa_clk(_hw) container_of(_hw, struct pxa_clk, hw)
static unsigned long cken_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct pxa_clk *pclk = to_pxa_clk(hw);
struct clk_fixed_factor *fix;
if (!pclk->is_in_low_power || pclk->is_in_low_power())
fix = &pclk->lp;
else
fix = &pclk->hp;
__clk_hw_set_clk(&fix->hw, hw);
return clk_fixed_factor_ops.recalc_rate(&fix->hw, parent_rate);
}
static struct clk_ops cken_rate_ops = {
.recalc_rate = cken_recalc_rate,
};
static u8 cken_get_parent(struct clk_hw *hw)
{
struct pxa_clk *pclk = to_pxa_clk(hw);
if (!pclk->is_in_low_power)
return 0;
return pclk->is_in_low_power() ? 0 : 1;
}
static struct clk_ops cken_mux_ops = {
.get_parent = cken_get_parent,
.set_parent = dummy_clk_set_parent,
};
void __init clkdev_pxa_register(int ckid, const char *con_id,
const char *dev_id, struct clk *clk)
{
if (!IS_ERR(clk) && (ckid != CLK_NONE))
pxa_clocks[ckid] = clk;
if (!IS_ERR(clk))
clk_register_clkdev(clk, con_id, dev_id);
}
int __init clk_pxa_cken_init(const struct desc_clk_cken *clks, int nb_clks)
{
int i;
struct pxa_clk *pxa_clk;
struct clk *clk;
for (i = 0; i < nb_clks; i++) {
pxa_clk = kzalloc(sizeof(*pxa_clk), GFP_KERNEL);
pxa_clk->is_in_low_power = clks[i].is_in_low_power;
pxa_clk->lp = clks[i].lp;
pxa_clk->hp = clks[i].hp;
pxa_clk->gate = clks[i].gate;
pxa_clk->gate.lock = &lock;
clk = clk_register_composite(NULL, clks[i].name,
clks[i].parent_names, 2,
&pxa_clk->hw, &cken_mux_ops,
&pxa_clk->hw, &cken_rate_ops,
&pxa_clk->gate.hw, &clk_gate_ops,
clks[i].flags);
clkdev_pxa_register(clks[i].ckid, clks[i].con_id,
clks[i].dev_id, clk);
}
return 0;
}
void __init clk_pxa_dt_common_init(struct device_node *np)
{
of_clk_add_provider(np, of_clk_src_onecell_get, &onecell_data);
}
| gpl-2.0 |
ktoonsez/KT-SGS4 | drivers/hwspinlock/hwspinlock_core.c | 1409 | 17996 | /*
* Hardware spinlock framework
*
* Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
*
* Contact: Ohad Ben-Cohen <ohad@wizery.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/err.h>
#include <linux/jiffies.h>
#include <linux/radix-tree.h>
#include <linux/hwspinlock.h>
#include <linux/pm_runtime.h>
#include <linux/mutex.h>
#include "hwspinlock_internal.h"
/* radix tree tags */
#define HWSPINLOCK_UNUSED (0) /* tags an hwspinlock as unused */
/*
* A radix tree is used to maintain the available hwspinlock instances.
* The tree associates hwspinlock pointers with their integer key id,
* and provides easy-to-use API which makes the hwspinlock core code simple
* and easy to read.
*
* Radix trees are quick on lookups, and reasonably efficient in terms of
* storage, especially with high density usages such as this framework
* requires (a continuous range of integer keys, beginning with zero, is
* used as the ID's of the hwspinlock instances).
*
* The radix tree API supports tagging items in the tree, which this
* framework uses to mark unused hwspinlock instances (see the
* HWSPINLOCK_UNUSED tag above). As a result, the process of querying the
* tree, looking for an unused hwspinlock instance, is now reduced to a
* single radix tree API call.
*/
static RADIX_TREE(hwspinlock_tree, GFP_KERNEL);
/*
* Synchronization of access to the tree is achieved using this mutex,
* as the radix-tree API requires that users provide all synchronisation.
* A mutex is needed because we're using non-atomic radix tree allocations.
*/
static DEFINE_MUTEX(hwspinlock_tree_lock);
/**
* __hwspin_trylock() - attempt to lock a specific hwspinlock
* @hwlock: an hwspinlock which we want to trylock
* @mode: controls whether local interrupts are disabled or not
* @flags: a pointer where the caller's interrupt state will be saved at (if
* requested)
*
* This function attempts to lock an hwspinlock, and will immediately
* fail if the hwspinlock is already taken.
*
* Upon a successful return from this function, preemption (and possibly
* interrupts) is disabled, so the caller must not sleep, and is advised to
* release the hwspinlock as soon as possible. This is required in order to
* minimize remote cores polling on the hardware interconnect.
*
* The user decides whether local interrupts are disabled or not, and if yes,
* whether he wants their previous state to be saved. It is up to the user
* to choose the appropriate @mode of operation, exactly the same way users
* should decide between spin_trylock, spin_trylock_irq and
* spin_trylock_irqsave.
*
* Returns 0 if we successfully locked the hwspinlock or -EBUSY if
* the hwspinlock was already taken.
* This function will never sleep.
*/
int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
{
int ret;
BUG_ON(!hwlock);
BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
/*
* This spin_lock{_irq, _irqsave} serves three purposes:
*
* 1. Disable preemption, in order to minimize the period of time
* in which the hwspinlock is taken. This is important in order
* to minimize the possible polling on the hardware interconnect
* by a remote user of this lock.
* 2. Make the hwspinlock SMP-safe (so we can take it from
* additional contexts on the local host).
* 3. Ensure that in_atomic/might_sleep checks catch potential
* problems with hwspinlock usage (e.g. scheduler checks like
* 'scheduling while atomic' etc.)
*/
if (mode == HWLOCK_IRQSTATE)
ret = spin_trylock_irqsave(&hwlock->lock, *flags);
else if (mode == HWLOCK_IRQ)
ret = spin_trylock_irq(&hwlock->lock);
else
ret = spin_trylock(&hwlock->lock);
/* is lock already taken by another context on the local cpu ? */
if (!ret)
return -EBUSY;
/* try to take the hwspinlock device */
ret = hwlock->bank->ops->trylock(hwlock);
/* if hwlock is already taken, undo spin_trylock_* and exit */
if (!ret) {
if (mode == HWLOCK_IRQSTATE)
spin_unlock_irqrestore(&hwlock->lock, *flags);
else if (mode == HWLOCK_IRQ)
spin_unlock_irq(&hwlock->lock);
else
spin_unlock(&hwlock->lock);
return -EBUSY;
}
/*
* We can be sure the other core's memory operations
* are observable to us only _after_ we successfully take
* the hwspinlock, and we must make sure that subsequent memory
* operations (both reads and writes) will not be reordered before
* we actually took the hwspinlock.
*
* Note: the implicit memory barrier of the spinlock above is too
* early, so we need this additional explicit memory barrier.
*/
mb();
return 0;
}
EXPORT_SYMBOL_GPL(__hwspin_trylock);
/**
* __hwspin_lock_timeout() - lock an hwspinlock with timeout limit
* @hwlock: the hwspinlock to be locked
* @timeout: timeout value in msecs
* @mode: mode which controls whether local interrupts are disabled or not
* @flags: a pointer to where the caller's interrupt state will be saved at (if
* requested)
*
* This function locks the given @hwlock. If the @hwlock
* is already taken, the function will busy loop waiting for it to
* be released, but give up after @timeout msecs have elapsed.
*
* Upon a successful return from this function, preemption is disabled
* (and possibly local interrupts, too), so the caller must not sleep,
* and is advised to release the hwspinlock as soon as possible.
* This is required in order to minimize remote cores polling on the
* hardware interconnect.
*
* The user decides whether local interrupts are disabled or not, and if yes,
* whether he wants their previous state to be saved. It is up to the user
* to choose the appropriate @mode of operation, exactly the same way users
* should decide between spin_lock, spin_lock_irq and spin_lock_irqsave.
*
* Returns 0 when the @hwlock was successfully taken, and an appropriate
* error code otherwise (most notably -ETIMEDOUT if the @hwlock is still
* busy after @timeout msecs). The function will never sleep.
*/
int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
int mode, unsigned long *flags)
{
int ret;
unsigned long expire;
expire = msecs_to_jiffies(to) + jiffies;
for (;;) {
/* Try to take the hwspinlock */
ret = __hwspin_trylock(hwlock, mode, flags);
if (ret != -EBUSY)
break;
/*
* The lock is already taken, let's check if the user wants
* us to try again
*/
if (time_is_before_eq_jiffies(expire))
return -ETIMEDOUT;
/*
* Allow platform-specific relax handlers to prevent
* hogging the interconnect (no sleeping, though)
*/
if (hwlock->bank->ops->relax)
hwlock->bank->ops->relax(hwlock);
}
return ret;
}
EXPORT_SYMBOL_GPL(__hwspin_lock_timeout);
/**
* __hwspin_unlock() - unlock a specific hwspinlock
* @hwlock: a previously-acquired hwspinlock which we want to unlock
* @mode: controls whether local interrupts needs to be restored or not
* @flags: previous caller's interrupt state to restore (if requested)
*
* This function will unlock a specific hwspinlock, enable preemption and
* (possibly) enable interrupts or restore their previous state.
* @hwlock must be already locked before calling this function: it is a bug
* to call unlock on a @hwlock that is already unlocked.
*
* The user decides whether local interrupts should be enabled or not, and
* if yes, whether he wants their previous state to be restored. It is up
* to the user to choose the appropriate @mode of operation, exactly the
* same way users decide between spin_unlock, spin_unlock_irq and
* spin_unlock_irqrestore.
*
* The function will never sleep.
*/
void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
{
BUG_ON(!hwlock);
BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
/*
* We must make sure that memory operations (both reads and writes),
* done before unlocking the hwspinlock, will not be reordered
* after the lock is released.
*
* That's the purpose of this explicit memory barrier.
*
* Note: the memory barrier induced by the spin_unlock below is too
* late; the other core is going to access memory soon after it will
* take the hwspinlock, and by then we want to be sure our memory
* operations are already observable.
*/
mb();
hwlock->bank->ops->unlock(hwlock);
/* Undo the spin_trylock{_irq, _irqsave} called while locking */
if (mode == HWLOCK_IRQSTATE)
spin_unlock_irqrestore(&hwlock->lock, *flags);
else if (mode == HWLOCK_IRQ)
spin_unlock_irq(&hwlock->lock);
else
spin_unlock(&hwlock->lock);
}
EXPORT_SYMBOL_GPL(__hwspin_unlock);
static int hwspin_lock_register_single(struct hwspinlock *hwlock, int id)
{
struct hwspinlock *tmp;
int ret;
mutex_lock(&hwspinlock_tree_lock);
ret = radix_tree_insert(&hwspinlock_tree, id, hwlock);
if (ret) {
if (ret == -EEXIST)
pr_err("hwspinlock id %d already exists!\n", id);
goto out;
}
/* mark this hwspinlock as available */
tmp = radix_tree_tag_set(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
/* self-sanity check which should never fail */
WARN_ON(tmp != hwlock);
out:
mutex_unlock(&hwspinlock_tree_lock);
return 0;
}
static struct hwspinlock *hwspin_lock_unregister_single(unsigned int id)
{
struct hwspinlock *hwlock = NULL;
int ret;
mutex_lock(&hwspinlock_tree_lock);
/* make sure the hwspinlock is not in use (tag is set) */
ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
if (ret == 0) {
pr_err("hwspinlock %d still in use (or not present)\n", id);
goto out;
}
hwlock = radix_tree_delete(&hwspinlock_tree, id);
if (!hwlock) {
pr_err("failed to delete hwspinlock %d\n", id);
goto out;
}
out:
mutex_unlock(&hwspinlock_tree_lock);
return hwlock;
}
/**
* hwspin_lock_register() - register a new hw spinlock device
* @bank: the hwspinlock device, which usually provides numerous hw locks
* @dev: the backing device
* @ops: hwspinlock handlers for this device
* @base_id: id of the first hardware spinlock in this bank
* @num_locks: number of hwspinlocks provided by this device
*
* This function should be called from the underlying platform-specific
* implementation, to register a new hwspinlock device instance.
*
* Should be called from a process context (might sleep)
*
* Returns 0 on success, or an appropriate error code on failure
*/
int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
const struct hwspinlock_ops *ops, int base_id, int num_locks)
{
struct hwspinlock *hwlock;
int ret = 0, i;
if (!bank || !ops || !dev || !num_locks || !ops->trylock ||
!ops->unlock) {
pr_err("invalid parameters\n");
return -EINVAL;
}
bank->dev = dev;
bank->ops = ops;
bank->base_id = base_id;
bank->num_locks = num_locks;
for (i = 0; i < num_locks; i++) {
hwlock = &bank->lock[i];
spin_lock_init(&hwlock->lock);
hwlock->bank = bank;
ret = hwspin_lock_register_single(hwlock, base_id + i);
if (ret)
goto reg_failed;
}
return 0;
reg_failed:
while (--i >= 0)
hwspin_lock_unregister_single(base_id + i);
return ret;
}
EXPORT_SYMBOL_GPL(hwspin_lock_register);
/**
* hwspin_lock_unregister() - unregister an hw spinlock device
* @bank: the hwspinlock device, which usually provides numerous hw locks
*
* This function should be called from the underlying platform-specific
* implementation, to unregister an existing (and unused) hwspinlock.
*
* Should be called from a process context (might sleep)
*
* Returns 0 on success, or an appropriate error code on failure
*/
int hwspin_lock_unregister(struct hwspinlock_device *bank)
{
struct hwspinlock *hwlock, *tmp;
int i;
for (i = 0; i < bank->num_locks; i++) {
hwlock = &bank->lock[i];
tmp = hwspin_lock_unregister_single(bank->base_id + i);
if (!tmp)
return -EBUSY;
/* self-sanity check that should never fail */
WARN_ON(tmp != hwlock);
}
return 0;
}
EXPORT_SYMBOL_GPL(hwspin_lock_unregister);
/**
* __hwspin_lock_request() - tag an hwspinlock as used and power it up
*
* This is an internal function that prepares an hwspinlock instance
* before it is given to the user. The function assumes that
* hwspinlock_tree_lock is taken.
*
* Returns 0 or positive to indicate success, and a negative value to
* indicate an error (with the appropriate error code)
*/
static int __hwspin_lock_request(struct hwspinlock *hwlock)
{
struct device *dev = hwlock->bank->dev;
struct hwspinlock *tmp;
int ret;
/* prevent underlying implementation from being removed */
if (!try_module_get(dev->driver->owner)) {
dev_err(dev, "%s: can't get owner\n", __func__);
return -EINVAL;
}
/* notify PM core that power is now needed */
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
dev_err(dev, "%s: can't power on device\n", __func__);
pm_runtime_put_noidle(dev);
module_put(dev->driver->owner);
return ret;
}
/* mark hwspinlock as used, should not fail */
tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock_to_id(hwlock),
HWSPINLOCK_UNUSED);
/* self-sanity check that should never fail */
WARN_ON(tmp != hwlock);
return ret;
}
/**
* hwspin_lock_get_id() - retrieve id number of a given hwspinlock
* @hwlock: a valid hwspinlock instance
*
* Returns the id number of a given @hwlock, or -EINVAL if @hwlock is invalid.
*/
int hwspin_lock_get_id(struct hwspinlock *hwlock)
{
if (!hwlock) {
pr_err("invalid hwlock\n");
return -EINVAL;
}
return hwlock_to_id(hwlock);
}
EXPORT_SYMBOL_GPL(hwspin_lock_get_id);
/**
* hwspin_lock_request() - request an hwspinlock
*
* This function should be called by users of the hwspinlock device,
* in order to dynamically assign them an unused hwspinlock.
* Usually the user of this lock will then have to communicate the lock's id
* to the remote core before it can be used for synchronization (to get the
* id of a given hwlock, use hwspin_lock_get_id()).
*
* Should be called from a process context (might sleep)
*
* Returns the address of the assigned hwspinlock, or NULL on error
*/
struct hwspinlock *hwspin_lock_request(void)
{
struct hwspinlock *hwlock;
int ret;
mutex_lock(&hwspinlock_tree_lock);
/* look for an unused lock */
ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock,
0, 1, HWSPINLOCK_UNUSED);
if (ret == 0) {
pr_warn("a free hwspinlock is not available\n");
hwlock = NULL;
goto out;
}
/* sanity check that should never fail */
WARN_ON(ret > 1);
/* mark as used and power up */
ret = __hwspin_lock_request(hwlock);
if (ret < 0)
hwlock = NULL;
out:
mutex_unlock(&hwspinlock_tree_lock);
return hwlock;
}
EXPORT_SYMBOL_GPL(hwspin_lock_request);
/**
* hwspin_lock_request_specific() - request for a specific hwspinlock
* @id: index of the specific hwspinlock that is requested
*
* This function should be called by users of the hwspinlock module,
* in order to assign them a specific hwspinlock.
* Usually early board code will be calling this function in order to
* reserve specific hwspinlock ids for predefined purposes.
*
* Should be called from a process context (might sleep)
*
* Returns the address of the assigned hwspinlock, or NULL on error
*/
struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
{
struct hwspinlock *hwlock;
int ret;
mutex_lock(&hwspinlock_tree_lock);
/* make sure this hwspinlock exists */
hwlock = radix_tree_lookup(&hwspinlock_tree, id);
if (!hwlock) {
pr_warn("hwspinlock %u does not exist\n", id);
goto out;
}
/* sanity check (this shouldn't happen) */
WARN_ON(hwlock_to_id(hwlock) != id);
/* make sure this hwspinlock is unused */
ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
if (ret == 0) {
pr_warn("hwspinlock %u is already in use\n", id);
hwlock = NULL;
goto out;
}
/* mark as used and power up */
ret = __hwspin_lock_request(hwlock);
if (ret < 0)
hwlock = NULL;
out:
mutex_unlock(&hwspinlock_tree_lock);
return hwlock;
}
EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
/**
* hwspin_lock_free() - free a specific hwspinlock
* @hwlock: the specific hwspinlock to free
*
* This function mark @hwlock as free again.
* Should only be called with an @hwlock that was retrieved from
* an earlier call to omap_hwspin_lock_request{_specific}.
*
* Should be called from a process context (might sleep)
*
* Returns 0 on success, or an appropriate error code on failure
*/
int hwspin_lock_free(struct hwspinlock *hwlock)
{
struct device *dev = hwlock->bank->dev;
struct hwspinlock *tmp;
int ret;
if (!hwlock) {
pr_err("invalid hwlock\n");
return -EINVAL;
}
mutex_lock(&hwspinlock_tree_lock);
/* make sure the hwspinlock is used */
ret = radix_tree_tag_get(&hwspinlock_tree, hwlock_to_id(hwlock),
HWSPINLOCK_UNUSED);
if (ret == 1) {
dev_err(dev, "%s: hwlock is already free\n", __func__);
dump_stack();
ret = -EINVAL;
goto out;
}
/* notify the underlying device that power is not needed */
ret = pm_runtime_put(dev);
if (ret < 0)
goto out;
/* mark this hwspinlock as available */
tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock_to_id(hwlock),
HWSPINLOCK_UNUSED);
/* sanity check (this shouldn't happen) */
WARN_ON(tmp != hwlock);
module_put(dev->driver->owner);
out:
mutex_unlock(&hwspinlock_tree_lock);
return ret;
}
EXPORT_SYMBOL_GPL(hwspin_lock_free);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Hardware spinlock interface");
MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>");
| gpl-2.0 |
ngiordano/chimera_kernel | drivers/ssb/driver_chipcommon_pmu.c | 2433 | 19561 | /*
* Sonics Silicon Backplane
* Broadcom ChipCommon Power Management Unit driver
*
* Copyright 2009, Michael Buesch <mb@bu3sch.de>
* Copyright 2007, Broadcom Corporation
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
#include <linux/ssb/ssb.h>
#include <linux/ssb/ssb_regs.h>
#include <linux/ssb/ssb_driver_chipcommon.h>
#include <linux/delay.h>
#include "ssb_private.h"
static u32 ssb_chipco_pll_read(struct ssb_chipcommon *cc, u32 offset)
{
chipco_write32(cc, SSB_CHIPCO_PLLCTL_ADDR, offset);
return chipco_read32(cc, SSB_CHIPCO_PLLCTL_DATA);
}
static void ssb_chipco_pll_write(struct ssb_chipcommon *cc,
u32 offset, u32 value)
{
chipco_write32(cc, SSB_CHIPCO_PLLCTL_ADDR, offset);
chipco_write32(cc, SSB_CHIPCO_PLLCTL_DATA, value);
}
static void ssb_chipco_regctl_maskset(struct ssb_chipcommon *cc,
u32 offset, u32 mask, u32 set)
{
u32 value;
chipco_read32(cc, SSB_CHIPCO_REGCTL_ADDR);
chipco_write32(cc, SSB_CHIPCO_REGCTL_ADDR, offset);
chipco_read32(cc, SSB_CHIPCO_REGCTL_ADDR);
value = chipco_read32(cc, SSB_CHIPCO_REGCTL_DATA);
value &= mask;
value |= set;
chipco_write32(cc, SSB_CHIPCO_REGCTL_DATA, value);
chipco_read32(cc, SSB_CHIPCO_REGCTL_DATA);
}
struct pmu0_plltab_entry {
u16 freq; /* Crystal frequency in kHz.*/
u8 xf; /* Crystal frequency value for PMU control */
u8 wb_int;
u32 wb_frac;
};
static const struct pmu0_plltab_entry pmu0_plltab[] = {
{ .freq = 12000, .xf = 1, .wb_int = 73, .wb_frac = 349525, },
{ .freq = 13000, .xf = 2, .wb_int = 67, .wb_frac = 725937, },
{ .freq = 14400, .xf = 3, .wb_int = 61, .wb_frac = 116508, },
{ .freq = 15360, .xf = 4, .wb_int = 57, .wb_frac = 305834, },
{ .freq = 16200, .xf = 5, .wb_int = 54, .wb_frac = 336579, },
{ .freq = 16800, .xf = 6, .wb_int = 52, .wb_frac = 399457, },
{ .freq = 19200, .xf = 7, .wb_int = 45, .wb_frac = 873813, },
{ .freq = 19800, .xf = 8, .wb_int = 44, .wb_frac = 466033, },
{ .freq = 20000, .xf = 9, .wb_int = 44, .wb_frac = 0, },
{ .freq = 25000, .xf = 10, .wb_int = 70, .wb_frac = 419430, },
{ .freq = 26000, .xf = 11, .wb_int = 67, .wb_frac = 725937, },
{ .freq = 30000, .xf = 12, .wb_int = 58, .wb_frac = 699050, },
{ .freq = 38400, .xf = 13, .wb_int = 45, .wb_frac = 873813, },
{ .freq = 40000, .xf = 14, .wb_int = 45, .wb_frac = 0, },
};
#define SSB_PMU0_DEFAULT_XTALFREQ 20000
static const struct pmu0_plltab_entry * pmu0_plltab_find_entry(u32 crystalfreq)
{
const struct pmu0_plltab_entry *e;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(pmu0_plltab); i++) {
e = &pmu0_plltab[i];
if (e->freq == crystalfreq)
return e;
}
return NULL;
}
/* Tune the PLL to the crystal speed. crystalfreq is in kHz. */
static void ssb_pmu0_pllinit_r0(struct ssb_chipcommon *cc,
u32 crystalfreq)
{
struct ssb_bus *bus = cc->dev->bus;
const struct pmu0_plltab_entry *e = NULL;
u32 pmuctl, tmp, pllctl;
unsigned int i;
if ((bus->chip_id == 0x5354) && !crystalfreq) {
/* The 5354 crystal freq is 25MHz */
crystalfreq = 25000;
}
if (crystalfreq)
e = pmu0_plltab_find_entry(crystalfreq);
if (!e)
e = pmu0_plltab_find_entry(SSB_PMU0_DEFAULT_XTALFREQ);
BUG_ON(!e);
crystalfreq = e->freq;
cc->pmu.crystalfreq = e->freq;
/* Check if the PLL already is programmed to this frequency. */
pmuctl = chipco_read32(cc, SSB_CHIPCO_PMU_CTL);
if (((pmuctl & SSB_CHIPCO_PMU_CTL_XTALFREQ) >> SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT) == e->xf) {
/* We're already there... */
return;
}
ssb_printk(KERN_INFO PFX "Programming PLL to %u.%03u MHz\n",
(crystalfreq / 1000), (crystalfreq % 1000));
/* First turn the PLL off. */
switch (bus->chip_id) {
case 0x4328:
chipco_mask32(cc, SSB_CHIPCO_PMU_MINRES_MSK,
~(1 << SSB_PMURES_4328_BB_PLL_PU));
chipco_mask32(cc, SSB_CHIPCO_PMU_MAXRES_MSK,
~(1 << SSB_PMURES_4328_BB_PLL_PU));
break;
case 0x5354:
chipco_mask32(cc, SSB_CHIPCO_PMU_MINRES_MSK,
~(1 << SSB_PMURES_5354_BB_PLL_PU));
chipco_mask32(cc, SSB_CHIPCO_PMU_MAXRES_MSK,
~(1 << SSB_PMURES_5354_BB_PLL_PU));
break;
default:
SSB_WARN_ON(1);
}
for (i = 1500; i; i--) {
tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST);
if (!(tmp & SSB_CHIPCO_CLKCTLST_HAVEHT))
break;
udelay(10);
}
tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST);
if (tmp & SSB_CHIPCO_CLKCTLST_HAVEHT)
ssb_printk(KERN_EMERG PFX "Failed to turn the PLL off!\n");
/* Set PDIV in PLL control 0. */
pllctl = ssb_chipco_pll_read(cc, SSB_PMU0_PLLCTL0);
if (crystalfreq >= SSB_PMU0_PLLCTL0_PDIV_FREQ)
pllctl |= SSB_PMU0_PLLCTL0_PDIV_MSK;
else
pllctl &= ~SSB_PMU0_PLLCTL0_PDIV_MSK;
ssb_chipco_pll_write(cc, SSB_PMU0_PLLCTL0, pllctl);
/* Set WILD in PLL control 1. */
pllctl = ssb_chipco_pll_read(cc, SSB_PMU0_PLLCTL1);
pllctl &= ~SSB_PMU0_PLLCTL1_STOPMOD;
pllctl &= ~(SSB_PMU0_PLLCTL1_WILD_IMSK | SSB_PMU0_PLLCTL1_WILD_FMSK);
pllctl |= ((u32)e->wb_int << SSB_PMU0_PLLCTL1_WILD_IMSK_SHIFT) & SSB_PMU0_PLLCTL1_WILD_IMSK;
pllctl |= ((u32)e->wb_frac << SSB_PMU0_PLLCTL1_WILD_FMSK_SHIFT) & SSB_PMU0_PLLCTL1_WILD_FMSK;
if (e->wb_frac == 0)
pllctl |= SSB_PMU0_PLLCTL1_STOPMOD;
ssb_chipco_pll_write(cc, SSB_PMU0_PLLCTL1, pllctl);
/* Set WILD in PLL control 2. */
pllctl = ssb_chipco_pll_read(cc, SSB_PMU0_PLLCTL2);
pllctl &= ~SSB_PMU0_PLLCTL2_WILD_IMSKHI;
pllctl |= (((u32)e->wb_int >> 4) << SSB_PMU0_PLLCTL2_WILD_IMSKHI_SHIFT) & SSB_PMU0_PLLCTL2_WILD_IMSKHI;
ssb_chipco_pll_write(cc, SSB_PMU0_PLLCTL2, pllctl);
/* Set the crystalfrequency and the divisor. */
pmuctl = chipco_read32(cc, SSB_CHIPCO_PMU_CTL);
pmuctl &= ~SSB_CHIPCO_PMU_CTL_ILP_DIV;
pmuctl |= (((crystalfreq + 127) / 128 - 1) << SSB_CHIPCO_PMU_CTL_ILP_DIV_SHIFT)
& SSB_CHIPCO_PMU_CTL_ILP_DIV;
pmuctl &= ~SSB_CHIPCO_PMU_CTL_XTALFREQ;
pmuctl |= ((u32)e->xf << SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT) & SSB_CHIPCO_PMU_CTL_XTALFREQ;
chipco_write32(cc, SSB_CHIPCO_PMU_CTL, pmuctl);
}
struct pmu1_plltab_entry {
u16 freq; /* Crystal frequency in kHz.*/
u8 xf; /* Crystal frequency value for PMU control */
u8 ndiv_int;
u32 ndiv_frac;
u8 p1div;
u8 p2div;
};
static const struct pmu1_plltab_entry pmu1_plltab[] = {
{ .freq = 12000, .xf = 1, .p1div = 3, .p2div = 22, .ndiv_int = 0x9, .ndiv_frac = 0xFFFFEF, },
{ .freq = 13000, .xf = 2, .p1div = 1, .p2div = 6, .ndiv_int = 0xb, .ndiv_frac = 0x483483, },
{ .freq = 14400, .xf = 3, .p1div = 1, .p2div = 10, .ndiv_int = 0xa, .ndiv_frac = 0x1C71C7, },
{ .freq = 15360, .xf = 4, .p1div = 1, .p2div = 5, .ndiv_int = 0xb, .ndiv_frac = 0x755555, },
{ .freq = 16200, .xf = 5, .p1div = 1, .p2div = 10, .ndiv_int = 0x5, .ndiv_frac = 0x6E9E06, },
{ .freq = 16800, .xf = 6, .p1div = 1, .p2div = 10, .ndiv_int = 0x5, .ndiv_frac = 0x3CF3CF, },
{ .freq = 19200, .xf = 7, .p1div = 1, .p2div = 9, .ndiv_int = 0x5, .ndiv_frac = 0x17B425, },
{ .freq = 19800, .xf = 8, .p1div = 1, .p2div = 11, .ndiv_int = 0x4, .ndiv_frac = 0xA57EB, },
{ .freq = 20000, .xf = 9, .p1div = 1, .p2div = 11, .ndiv_int = 0x4, .ndiv_frac = 0, },
{ .freq = 24000, .xf = 10, .p1div = 3, .p2div = 11, .ndiv_int = 0xa, .ndiv_frac = 0, },
{ .freq = 25000, .xf = 11, .p1div = 5, .p2div = 16, .ndiv_int = 0xb, .ndiv_frac = 0, },
{ .freq = 26000, .xf = 12, .p1div = 1, .p2div = 2, .ndiv_int = 0x10, .ndiv_frac = 0xEC4EC4, },
{ .freq = 30000, .xf = 13, .p1div = 3, .p2div = 8, .ndiv_int = 0xb, .ndiv_frac = 0, },
{ .freq = 38400, .xf = 14, .p1div = 1, .p2div = 5, .ndiv_int = 0x4, .ndiv_frac = 0x955555, },
{ .freq = 40000, .xf = 15, .p1div = 1, .p2div = 2, .ndiv_int = 0xb, .ndiv_frac = 0, },
};
#define SSB_PMU1_DEFAULT_XTALFREQ 15360
static const struct pmu1_plltab_entry * pmu1_plltab_find_entry(u32 crystalfreq)
{
const struct pmu1_plltab_entry *e;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(pmu1_plltab); i++) {
e = &pmu1_plltab[i];
if (e->freq == crystalfreq)
return e;
}
return NULL;
}
/* Tune the PLL to the crystal speed. crystalfreq is in kHz. */
static void ssb_pmu1_pllinit_r0(struct ssb_chipcommon *cc,
u32 crystalfreq)
{
struct ssb_bus *bus = cc->dev->bus;
const struct pmu1_plltab_entry *e = NULL;
u32 buffer_strength = 0;
u32 tmp, pllctl, pmuctl;
unsigned int i;
if (bus->chip_id == 0x4312) {
/* We do not touch the BCM4312 PLL and assume
* the default crystal settings work out-of-the-box. */
cc->pmu.crystalfreq = 20000;
return;
}
if (crystalfreq)
e = pmu1_plltab_find_entry(crystalfreq);
if (!e)
e = pmu1_plltab_find_entry(SSB_PMU1_DEFAULT_XTALFREQ);
BUG_ON(!e);
crystalfreq = e->freq;
cc->pmu.crystalfreq = e->freq;
/* Check if the PLL already is programmed to this frequency. */
pmuctl = chipco_read32(cc, SSB_CHIPCO_PMU_CTL);
if (((pmuctl & SSB_CHIPCO_PMU_CTL_XTALFREQ) >> SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT) == e->xf) {
/* We're already there... */
return;
}
ssb_printk(KERN_INFO PFX "Programming PLL to %u.%03u MHz\n",
(crystalfreq / 1000), (crystalfreq % 1000));
/* First turn the PLL off. */
switch (bus->chip_id) {
case 0x4325:
chipco_mask32(cc, SSB_CHIPCO_PMU_MINRES_MSK,
~((1 << SSB_PMURES_4325_BBPLL_PWRSW_PU) |
(1 << SSB_PMURES_4325_HT_AVAIL)));
chipco_mask32(cc, SSB_CHIPCO_PMU_MAXRES_MSK,
~((1 << SSB_PMURES_4325_BBPLL_PWRSW_PU) |
(1 << SSB_PMURES_4325_HT_AVAIL)));
/* Adjust the BBPLL to 2 on all channels later. */
buffer_strength = 0x222222;
break;
default:
SSB_WARN_ON(1);
}
for (i = 1500; i; i--) {
tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST);
if (!(tmp & SSB_CHIPCO_CLKCTLST_HAVEHT))
break;
udelay(10);
}
tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST);
if (tmp & SSB_CHIPCO_CLKCTLST_HAVEHT)
ssb_printk(KERN_EMERG PFX "Failed to turn the PLL off!\n");
/* Set p1div and p2div. */
pllctl = ssb_chipco_pll_read(cc, SSB_PMU1_PLLCTL0);
pllctl &= ~(SSB_PMU1_PLLCTL0_P1DIV | SSB_PMU1_PLLCTL0_P2DIV);
pllctl |= ((u32)e->p1div << SSB_PMU1_PLLCTL0_P1DIV_SHIFT) & SSB_PMU1_PLLCTL0_P1DIV;
pllctl |= ((u32)e->p2div << SSB_PMU1_PLLCTL0_P2DIV_SHIFT) & SSB_PMU1_PLLCTL0_P2DIV;
ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL0, pllctl);
/* Set ndiv int and ndiv mode */
pllctl = ssb_chipco_pll_read(cc, SSB_PMU1_PLLCTL2);
pllctl &= ~(SSB_PMU1_PLLCTL2_NDIVINT | SSB_PMU1_PLLCTL2_NDIVMODE);
pllctl |= ((u32)e->ndiv_int << SSB_PMU1_PLLCTL2_NDIVINT_SHIFT) & SSB_PMU1_PLLCTL2_NDIVINT;
pllctl |= (1 << SSB_PMU1_PLLCTL2_NDIVMODE_SHIFT) & SSB_PMU1_PLLCTL2_NDIVMODE;
ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL2, pllctl);
/* Set ndiv frac */
pllctl = ssb_chipco_pll_read(cc, SSB_PMU1_PLLCTL3);
pllctl &= ~SSB_PMU1_PLLCTL3_NDIVFRAC;
pllctl |= ((u32)e->ndiv_frac << SSB_PMU1_PLLCTL3_NDIVFRAC_SHIFT) & SSB_PMU1_PLLCTL3_NDIVFRAC;
ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL3, pllctl);
/* Change the drive strength, if required. */
if (buffer_strength) {
pllctl = ssb_chipco_pll_read(cc, SSB_PMU1_PLLCTL5);
pllctl &= ~SSB_PMU1_PLLCTL5_CLKDRV;
pllctl |= (buffer_strength << SSB_PMU1_PLLCTL5_CLKDRV_SHIFT) & SSB_PMU1_PLLCTL5_CLKDRV;
ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL5, pllctl);
}
/* Tune the crystalfreq and the divisor. */
pmuctl = chipco_read32(cc, SSB_CHIPCO_PMU_CTL);
pmuctl &= ~(SSB_CHIPCO_PMU_CTL_ILP_DIV | SSB_CHIPCO_PMU_CTL_XTALFREQ);
pmuctl |= ((((u32)e->freq + 127) / 128 - 1) << SSB_CHIPCO_PMU_CTL_ILP_DIV_SHIFT)
& SSB_CHIPCO_PMU_CTL_ILP_DIV;
pmuctl |= ((u32)e->xf << SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT) & SSB_CHIPCO_PMU_CTL_XTALFREQ;
chipco_write32(cc, SSB_CHIPCO_PMU_CTL, pmuctl);
}
static void ssb_pmu_pll_init(struct ssb_chipcommon *cc)
{
struct ssb_bus *bus = cc->dev->bus;
u32 crystalfreq = 0; /* in kHz. 0 = keep default freq. */
if (bus->bustype == SSB_BUSTYPE_SSB) {
/* TODO: The user may override the crystal frequency. */
}
switch (bus->chip_id) {
case 0x4312:
case 0x4325:
ssb_pmu1_pllinit_r0(cc, crystalfreq);
break;
case 0x4328:
case 0x5354:
ssb_pmu0_pllinit_r0(cc, crystalfreq);
break;
case 0x4322:
if (cc->pmu.rev == 2) {
chipco_write32(cc, SSB_CHIPCO_PLLCTL_ADDR, 0x0000000A);
chipco_write32(cc, SSB_CHIPCO_PLLCTL_DATA, 0x380005C0);
}
break;
default:
ssb_printk(KERN_ERR PFX
"ERROR: PLL init unknown for device %04X\n",
bus->chip_id);
}
}
struct pmu_res_updown_tab_entry {
u8 resource; /* The resource number */
u16 updown; /* The updown value */
};
enum pmu_res_depend_tab_task {
PMU_RES_DEP_SET = 1,
PMU_RES_DEP_ADD,
PMU_RES_DEP_REMOVE,
};
struct pmu_res_depend_tab_entry {
u8 resource; /* The resource number */
u8 task; /* SET | ADD | REMOVE */
u32 depend; /* The depend mask */
};
static const struct pmu_res_updown_tab_entry pmu_res_updown_tab_4328a0[] = {
{ .resource = SSB_PMURES_4328_EXT_SWITCHER_PWM, .updown = 0x0101, },
{ .resource = SSB_PMURES_4328_BB_SWITCHER_PWM, .updown = 0x1F01, },
{ .resource = SSB_PMURES_4328_BB_SWITCHER_BURST, .updown = 0x010F, },
{ .resource = SSB_PMURES_4328_BB_EXT_SWITCHER_BURST, .updown = 0x0101, },
{ .resource = SSB_PMURES_4328_ILP_REQUEST, .updown = 0x0202, },
{ .resource = SSB_PMURES_4328_RADIO_SWITCHER_PWM, .updown = 0x0F01, },
{ .resource = SSB_PMURES_4328_RADIO_SWITCHER_BURST, .updown = 0x0F01, },
{ .resource = SSB_PMURES_4328_ROM_SWITCH, .updown = 0x0101, },
{ .resource = SSB_PMURES_4328_PA_REF_LDO, .updown = 0x0F01, },
{ .resource = SSB_PMURES_4328_RADIO_LDO, .updown = 0x0F01, },
{ .resource = SSB_PMURES_4328_AFE_LDO, .updown = 0x0F01, },
{ .resource = SSB_PMURES_4328_PLL_LDO, .updown = 0x0F01, },
{ .resource = SSB_PMURES_4328_BG_FILTBYP, .updown = 0x0101, },
{ .resource = SSB_PMURES_4328_TX_FILTBYP, .updown = 0x0101, },
{ .resource = SSB_PMURES_4328_RX_FILTBYP, .updown = 0x0101, },
{ .resource = SSB_PMURES_4328_XTAL_PU, .updown = 0x0101, },
{ .resource = SSB_PMURES_4328_XTAL_EN, .updown = 0xA001, },
{ .resource = SSB_PMURES_4328_BB_PLL_FILTBYP, .updown = 0x0101, },
{ .resource = SSB_PMURES_4328_RF_PLL_FILTBYP, .updown = 0x0101, },
{ .resource = SSB_PMURES_4328_BB_PLL_PU, .updown = 0x0701, },
};
static const struct pmu_res_depend_tab_entry pmu_res_depend_tab_4328a0[] = {
{
/* Adjust ILP Request to avoid forcing EXT/BB into burst mode. */
.resource = SSB_PMURES_4328_ILP_REQUEST,
.task = PMU_RES_DEP_SET,
.depend = ((1 << SSB_PMURES_4328_EXT_SWITCHER_PWM) |
(1 << SSB_PMURES_4328_BB_SWITCHER_PWM)),
},
};
static const struct pmu_res_updown_tab_entry pmu_res_updown_tab_4325a0[] = {
{ .resource = SSB_PMURES_4325_XTAL_PU, .updown = 0x1501, },
};
static const struct pmu_res_depend_tab_entry pmu_res_depend_tab_4325a0[] = {
{
/* Adjust HT-Available dependencies. */
.resource = SSB_PMURES_4325_HT_AVAIL,
.task = PMU_RES_DEP_ADD,
.depend = ((1 << SSB_PMURES_4325_RX_PWRSW_PU) |
(1 << SSB_PMURES_4325_TX_PWRSW_PU) |
(1 << SSB_PMURES_4325_LOGEN_PWRSW_PU) |
(1 << SSB_PMURES_4325_AFE_PWRSW_PU)),
},
};
static void ssb_pmu_resources_init(struct ssb_chipcommon *cc)
{
struct ssb_bus *bus = cc->dev->bus;
u32 min_msk = 0, max_msk = 0;
unsigned int i;
const struct pmu_res_updown_tab_entry *updown_tab = NULL;
unsigned int updown_tab_size;
const struct pmu_res_depend_tab_entry *depend_tab = NULL;
unsigned int depend_tab_size;
switch (bus->chip_id) {
case 0x4312:
min_msk = 0xCBB;
break;
case 0x4322:
/* We keep the default settings:
* min_msk = 0xCBB
* max_msk = 0x7FFFF
*/
break;
case 0x4325:
/* Power OTP down later. */
min_msk = (1 << SSB_PMURES_4325_CBUCK_BURST) |
(1 << SSB_PMURES_4325_LNLDO2_PU);
if (chipco_read32(cc, SSB_CHIPCO_CHIPSTAT) &
SSB_CHIPCO_CHST_4325_PMUTOP_2B)
min_msk |= (1 << SSB_PMURES_4325_CLDO_CBUCK_BURST);
/* The PLL may turn on, if it decides so. */
max_msk = 0xFFFFF;
updown_tab = pmu_res_updown_tab_4325a0;
updown_tab_size = ARRAY_SIZE(pmu_res_updown_tab_4325a0);
depend_tab = pmu_res_depend_tab_4325a0;
depend_tab_size = ARRAY_SIZE(pmu_res_depend_tab_4325a0);
break;
case 0x4328:
min_msk = (1 << SSB_PMURES_4328_EXT_SWITCHER_PWM) |
(1 << SSB_PMURES_4328_BB_SWITCHER_PWM) |
(1 << SSB_PMURES_4328_XTAL_EN);
/* The PLL may turn on, if it decides so. */
max_msk = 0xFFFFF;
updown_tab = pmu_res_updown_tab_4328a0;
updown_tab_size = ARRAY_SIZE(pmu_res_updown_tab_4328a0);
depend_tab = pmu_res_depend_tab_4328a0;
depend_tab_size = ARRAY_SIZE(pmu_res_depend_tab_4328a0);
break;
case 0x5354:
/* The PLL may turn on, if it decides so. */
max_msk = 0xFFFFF;
break;
default:
ssb_printk(KERN_ERR PFX
"ERROR: PMU resource config unknown for device %04X\n",
bus->chip_id);
}
if (updown_tab) {
for (i = 0; i < updown_tab_size; i++) {
chipco_write32(cc, SSB_CHIPCO_PMU_RES_TABSEL,
updown_tab[i].resource);
chipco_write32(cc, SSB_CHIPCO_PMU_RES_UPDNTM,
updown_tab[i].updown);
}
}
if (depend_tab) {
for (i = 0; i < depend_tab_size; i++) {
chipco_write32(cc, SSB_CHIPCO_PMU_RES_TABSEL,
depend_tab[i].resource);
switch (depend_tab[i].task) {
case PMU_RES_DEP_SET:
chipco_write32(cc, SSB_CHIPCO_PMU_RES_DEPMSK,
depend_tab[i].depend);
break;
case PMU_RES_DEP_ADD:
chipco_set32(cc, SSB_CHIPCO_PMU_RES_DEPMSK,
depend_tab[i].depend);
break;
case PMU_RES_DEP_REMOVE:
chipco_mask32(cc, SSB_CHIPCO_PMU_RES_DEPMSK,
~(depend_tab[i].depend));
break;
default:
SSB_WARN_ON(1);
}
}
}
/* Set the resource masks. */
if (min_msk)
chipco_write32(cc, SSB_CHIPCO_PMU_MINRES_MSK, min_msk);
if (max_msk)
chipco_write32(cc, SSB_CHIPCO_PMU_MAXRES_MSK, max_msk);
}
/* http://bcm-v4.sipsolutions.net/802.11/SSB/PmuInit */
void ssb_pmu_init(struct ssb_chipcommon *cc)
{
u32 pmucap;
if (!(cc->capabilities & SSB_CHIPCO_CAP_PMU))
return;
pmucap = chipco_read32(cc, SSB_CHIPCO_PMU_CAP);
cc->pmu.rev = (pmucap & SSB_CHIPCO_PMU_CAP_REVISION);
ssb_dprintk(KERN_DEBUG PFX "Found rev %u PMU (capabilities 0x%08X)\n",
cc->pmu.rev, pmucap);
if (cc->pmu.rev == 1)
chipco_mask32(cc, SSB_CHIPCO_PMU_CTL,
~SSB_CHIPCO_PMU_CTL_NOILPONW);
else
chipco_set32(cc, SSB_CHIPCO_PMU_CTL,
SSB_CHIPCO_PMU_CTL_NOILPONW);
ssb_pmu_pll_init(cc);
ssb_pmu_resources_init(cc);
}
void ssb_pmu_set_ldo_voltage(struct ssb_chipcommon *cc,
enum ssb_pmu_ldo_volt_id id, u32 voltage)
{
struct ssb_bus *bus = cc->dev->bus;
u32 addr, shift, mask;
switch (bus->chip_id) {
case 0x4328:
case 0x5354:
switch (id) {
case LDO_VOLT1:
addr = 2;
shift = 25;
mask = 0xF;
break;
case LDO_VOLT2:
addr = 3;
shift = 1;
mask = 0xF;
break;
case LDO_VOLT3:
addr = 3;
shift = 9;
mask = 0xF;
break;
case LDO_PAREF:
addr = 3;
shift = 17;
mask = 0x3F;
break;
default:
SSB_WARN_ON(1);
return;
}
break;
case 0x4312:
if (SSB_WARN_ON(id != LDO_PAREF))
return;
addr = 0;
shift = 21;
mask = 0x3F;
break;
default:
return;
}
ssb_chipco_regctl_maskset(cc, addr, ~(mask << shift),
(voltage & mask) << shift);
}
void ssb_pmu_set_ldo_paref(struct ssb_chipcommon *cc, bool on)
{
struct ssb_bus *bus = cc->dev->bus;
int ldo;
switch (bus->chip_id) {
case 0x4312:
ldo = SSB_PMURES_4312_PA_REF_LDO;
break;
case 0x4328:
ldo = SSB_PMURES_4328_PA_REF_LDO;
break;
case 0x5354:
ldo = SSB_PMURES_5354_PA_REF_LDO;
break;
default:
return;
}
if (on)
chipco_set32(cc, SSB_CHIPCO_PMU_MINRES_MSK, 1 << ldo);
else
chipco_mask32(cc, SSB_CHIPCO_PMU_MINRES_MSK, ~(1 << ldo));
chipco_read32(cc, SSB_CHIPCO_PMU_MINRES_MSK); //SPEC FIXME found via mmiotrace - dummy read?
}
EXPORT_SYMBOL(ssb_pmu_set_ldo_voltage);
EXPORT_SYMBOL(ssb_pmu_set_ldo_paref);
| gpl-2.0 |
iwishiwasgay/starkissed-clone | drivers/md/dm-zero.c | 2689 | 1545 | /*
* Copyright (C) 2003 Christophe Saout <christophe@saout.de>
*
* This file is released under the GPL.
*/
#include <linux/device-mapper.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/bio.h>
#define DM_MSG_PREFIX "zero"
/*
* Construct a dummy mapping that only returns zeros
*/
static int zero_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
if (argc != 0) {
ti->error = "No arguments required";
return -EINVAL;
}
/*
* Silently drop discards, avoiding -EOPNOTSUPP.
*/
ti->num_discard_bios = 1;
return 0;
}
/*
* Return zeros only on reads
*/
static int zero_map(struct dm_target *ti, struct bio *bio)
{
switch(bio_rw(bio)) {
case READ:
zero_fill_bio(bio);
break;
case READA:
/* readahead of null bytes only wastes buffer cache */
return -EIO;
case WRITE:
/* writes get silently dropped */
break;
}
bio_endio(bio, 0);
/* accepted bio, don't make new request */
return DM_MAPIO_SUBMITTED;
}
static struct target_type zero_target = {
.name = "zero",
.version = {1, 1, 0},
.module = THIS_MODULE,
.ctr = zero_ctr,
.map = zero_map,
};
static int __init dm_zero_init(void)
{
int r = dm_register_target(&zero_target);
if (r < 0)
DMERR("register failed %d", r);
return r;
}
static void __exit dm_zero_exit(void)
{
dm_unregister_target(&zero_target);
}
module_init(dm_zero_init)
module_exit(dm_zero_exit)
MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");
MODULE_DESCRIPTION(DM_NAME " dummy target returning zeros");
MODULE_LICENSE("GPL");
| gpl-2.0 |
RobinSystems/linux-3.13 | drivers/media/pci/saa7164/saa7164-api.c | 2945 | 44815 | /*
* Driver for the NXP SAA7164 PCIe bridge
*
* Copyright (c) 2010 Steven Toth <stoth@kernellabs.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/wait.h>
#include <linux/slab.h>
#include "saa7164.h"
int saa7164_api_get_load_info(struct saa7164_dev *dev, struct tmFwInfoStruct *i)
{
int ret;
if (!(saa_debug & DBGLVL_CPU))
return 0;
dprintk(DBGLVL_API, "%s()\n", __func__);
i->deviceinst = 0;
i->devicespec = 0;
i->mode = 0;
i->status = 0;
ret = saa7164_cmd_send(dev, 0, GET_CUR,
GET_FW_STATUS_CONTROL, sizeof(struct tmFwInfoStruct), i);
if (ret != SAA_OK)
printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
printk(KERN_INFO "saa7164[%d]-CPU: %d percent", dev->nr, i->CPULoad);
return ret;
}
int saa7164_api_collect_debug(struct saa7164_dev *dev)
{
struct tmComResDebugGetData d;
u8 more = 255;
int ret;
dprintk(DBGLVL_API, "%s()\n", __func__);
while (more--) {
memset(&d, 0, sizeof(d));
ret = saa7164_cmd_send(dev, 0, GET_CUR,
GET_DEBUG_DATA_CONTROL, sizeof(d), &d);
if (ret != SAA_OK)
printk(KERN_ERR "%s() error, ret = 0x%x\n",
__func__, ret);
if (d.dwResult != SAA_OK)
break;
printk(KERN_INFO "saa7164[%d]-FWMSG: %s", dev->nr,
d.ucDebugData);
}
return 0;
}
int saa7164_api_set_debug(struct saa7164_dev *dev, u8 level)
{
struct tmComResDebugSetLevel lvl;
int ret;
dprintk(DBGLVL_API, "%s(level=%d)\n", __func__, level);
/* Retrieve current state */
ret = saa7164_cmd_send(dev, 0, GET_CUR,
SET_DEBUG_LEVEL_CONTROL, sizeof(lvl), &lvl);
if (ret != SAA_OK)
printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
dprintk(DBGLVL_API, "%s() Was %d\n", __func__, lvl.dwDebugLevel);
lvl.dwDebugLevel = level;
/* set new state */
ret = saa7164_cmd_send(dev, 0, SET_CUR,
SET_DEBUG_LEVEL_CONTROL, sizeof(lvl), &lvl);
if (ret != SAA_OK)
printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
return ret;
}
int saa7164_api_set_vbi_format(struct saa7164_port *port)
{
struct saa7164_dev *dev = port->dev;
struct tmComResProbeCommit fmt, rsp;
int ret;
dprintk(DBGLVL_API, "%s(nr=%d, unitid=0x%x)\n", __func__,
port->nr, port->hwcfg.unitid);
fmt.bmHint = 0;
fmt.bFormatIndex = 1;
fmt.bFrameIndex = 1;
/* Probe, see if it can support this format */
ret = saa7164_cmd_send(port->dev, port->hwcfg.unitid,
SET_CUR, SAA_PROBE_CONTROL, sizeof(fmt), &fmt);
if (ret != SAA_OK)
printk(KERN_ERR "%s() set error, ret = 0x%x\n", __func__, ret);
/* See of the format change was successful */
ret = saa7164_cmd_send(port->dev, port->hwcfg.unitid,
GET_CUR, SAA_PROBE_CONTROL, sizeof(rsp), &rsp);
if (ret != SAA_OK) {
printk(KERN_ERR "%s() get error, ret = 0x%x\n", __func__, ret);
} else {
/* Compare requested vs received, should be same */
if (memcmp(&fmt, &rsp, sizeof(rsp)) == 0) {
dprintk(DBGLVL_API, "SET/PROBE Verified\n");
/* Ask the device to select the negotiated format */
ret = saa7164_cmd_send(port->dev, port->hwcfg.unitid,
SET_CUR, SAA_COMMIT_CONTROL, sizeof(fmt), &fmt);
if (ret != SAA_OK)
printk(KERN_ERR "%s() commit error, ret = 0x%x\n",
__func__, ret);
ret = saa7164_cmd_send(port->dev, port->hwcfg.unitid,
GET_CUR, SAA_COMMIT_CONTROL, sizeof(rsp), &rsp);
if (ret != SAA_OK)
printk(KERN_ERR "%s() GET commit error, ret = 0x%x\n",
__func__, ret);
if (memcmp(&fmt, &rsp, sizeof(rsp)) != 0) {
printk(KERN_ERR "%s() memcmp error, ret = 0x%x\n",
__func__, ret);
} else
dprintk(DBGLVL_API, "SET/COMMIT Verified\n");
dprintk(DBGLVL_API, "rsp.bmHint = 0x%x\n", rsp.bmHint);
dprintk(DBGLVL_API, "rsp.bFormatIndex = 0x%x\n",
rsp.bFormatIndex);
dprintk(DBGLVL_API, "rsp.bFrameIndex = 0x%x\n",
rsp.bFrameIndex);
} else
printk(KERN_ERR "%s() compare failed\n", __func__);
}
if (ret == SAA_OK)
dprintk(DBGLVL_API, "%s(nr=%d) Success\n", __func__, port->nr);
return ret;
}
static int saa7164_api_set_gop_size(struct saa7164_port *port)
{
struct saa7164_dev *dev = port->dev;
struct tmComResEncVideoGopStructure gs;
int ret;
dprintk(DBGLVL_ENC, "%s()\n", __func__);
gs.ucRefFrameDist = port->encoder_params.refdist;
gs.ucGOPSize = port->encoder_params.gop_size;
ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, SET_CUR,
EU_VIDEO_GOP_STRUCTURE_CONTROL,
sizeof(gs), &gs);
if (ret != SAA_OK)
printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
return ret;
}
int saa7164_api_set_encoder(struct saa7164_port *port)
{
struct saa7164_dev *dev = port->dev;
struct tmComResEncVideoBitRate vb;
struct tmComResEncAudioBitRate ab;
int ret;
dprintk(DBGLVL_ENC, "%s() unitid=0x%x\n", __func__,
port->hwcfg.sourceid);
if (port->encoder_params.stream_type == V4L2_MPEG_STREAM_TYPE_MPEG2_PS)
port->encoder_profile = EU_PROFILE_PS_DVD;
else
port->encoder_profile = EU_PROFILE_TS_HQ;
ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, SET_CUR,
EU_PROFILE_CONTROL, sizeof(u8), &port->encoder_profile);
if (ret != SAA_OK)
printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
/* Resolution */
ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, SET_CUR,
EU_PROFILE_CONTROL, sizeof(u8), &port->encoder_profile);
if (ret != SAA_OK)
printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
/* Establish video bitrates */
if (port->encoder_params.bitrate_mode ==
V4L2_MPEG_VIDEO_BITRATE_MODE_CBR)
vb.ucVideoBitRateMode = EU_VIDEO_BIT_RATE_MODE_CONSTANT;
else
vb.ucVideoBitRateMode = EU_VIDEO_BIT_RATE_MODE_VARIABLE_PEAK;
vb.dwVideoBitRate = port->encoder_params.bitrate;
vb.dwVideoBitRatePeak = port->encoder_params.bitrate_peak;
ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, SET_CUR,
EU_VIDEO_BIT_RATE_CONTROL,
sizeof(struct tmComResEncVideoBitRate),
&vb);
if (ret != SAA_OK)
printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
/* Establish audio bitrates */
ab.ucAudioBitRateMode = 0;
ab.dwAudioBitRate = 384000;
ab.dwAudioBitRatePeak = ab.dwAudioBitRate;
ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, SET_CUR,
EU_AUDIO_BIT_RATE_CONTROL,
sizeof(struct tmComResEncAudioBitRate),
&ab);
if (ret != SAA_OK)
printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__,
ret);
saa7164_api_set_aspect_ratio(port);
saa7164_api_set_gop_size(port);
return ret;
}
int saa7164_api_get_encoder(struct saa7164_port *port)
{
struct saa7164_dev *dev = port->dev;
struct tmComResEncVideoBitRate v;
struct tmComResEncAudioBitRate a;
struct tmComResEncVideoInputAspectRatio ar;
int ret;
dprintk(DBGLVL_ENC, "%s() unitid=0x%x\n", __func__,
port->hwcfg.sourceid);
port->encoder_profile = 0;
port->video_format = 0;
port->video_resolution = 0;
port->audio_format = 0;
ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR,
EU_PROFILE_CONTROL, sizeof(u8), &port->encoder_profile);
if (ret != SAA_OK)
printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR,
EU_VIDEO_RESOLUTION_CONTROL, sizeof(u8),
&port->video_resolution);
if (ret != SAA_OK)
printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR,
EU_VIDEO_FORMAT_CONTROL, sizeof(u8), &port->video_format);
if (ret != SAA_OK)
printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR,
EU_VIDEO_BIT_RATE_CONTROL, sizeof(v), &v);
if (ret != SAA_OK)
printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR,
EU_AUDIO_FORMAT_CONTROL, sizeof(u8), &port->audio_format);
if (ret != SAA_OK)
printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR,
EU_AUDIO_BIT_RATE_CONTROL, sizeof(a), &a);
if (ret != SAA_OK)
printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
/* Aspect Ratio */
ar.width = 0;
ar.height = 0;
ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR,
EU_VIDEO_INPUT_ASPECT_CONTROL,
sizeof(struct tmComResEncVideoInputAspectRatio), &ar);
if (ret != SAA_OK)
printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
dprintk(DBGLVL_ENC, "encoder_profile = %d\n", port->encoder_profile);
dprintk(DBGLVL_ENC, "video_format = %d\n", port->video_format);
dprintk(DBGLVL_ENC, "audio_format = %d\n", port->audio_format);
dprintk(DBGLVL_ENC, "video_resolution= %d\n", port->video_resolution);
dprintk(DBGLVL_ENC, "v.ucVideoBitRateMode = %d\n",
v.ucVideoBitRateMode);
dprintk(DBGLVL_ENC, "v.dwVideoBitRate = %d\n",
v.dwVideoBitRate);
dprintk(DBGLVL_ENC, "v.dwVideoBitRatePeak = %d\n",
v.dwVideoBitRatePeak);
dprintk(DBGLVL_ENC, "a.ucVideoBitRateMode = %d\n",
a.ucAudioBitRateMode);
dprintk(DBGLVL_ENC, "a.dwVideoBitRate = %d\n",
a.dwAudioBitRate);
dprintk(DBGLVL_ENC, "a.dwVideoBitRatePeak = %d\n",
a.dwAudioBitRatePeak);
dprintk(DBGLVL_ENC, "aspect.width / height = %d:%d\n",
ar.width, ar.height);
return ret;
}
int saa7164_api_set_aspect_ratio(struct saa7164_port *port)
{
struct saa7164_dev *dev = port->dev;
struct tmComResEncVideoInputAspectRatio ar;
int ret;
dprintk(DBGLVL_ENC, "%s(%d)\n", __func__,
port->encoder_params.ctl_aspect);
switch (port->encoder_params.ctl_aspect) {
case V4L2_MPEG_VIDEO_ASPECT_1x1:
ar.width = 1;
ar.height = 1;
break;
case V4L2_MPEG_VIDEO_ASPECT_4x3:
ar.width = 4;
ar.height = 3;
break;
case V4L2_MPEG_VIDEO_ASPECT_16x9:
ar.width = 16;
ar.height = 9;
break;
case V4L2_MPEG_VIDEO_ASPECT_221x100:
ar.width = 221;
ar.height = 100;
break;
default:
BUG();
}
dprintk(DBGLVL_ENC, "%s(%d) now %d:%d\n", __func__,
port->encoder_params.ctl_aspect,
ar.width, ar.height);
/* Aspect Ratio */
ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, SET_CUR,
EU_VIDEO_INPUT_ASPECT_CONTROL,
sizeof(struct tmComResEncVideoInputAspectRatio), &ar);
if (ret != SAA_OK)
printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
return ret;
}
int saa7164_api_set_usercontrol(struct saa7164_port *port, u8 ctl)
{
struct saa7164_dev *dev = port->dev;
int ret;
u16 val;
if (ctl == PU_BRIGHTNESS_CONTROL)
val = port->ctl_brightness;
else
if (ctl == PU_CONTRAST_CONTROL)
val = port->ctl_contrast;
else
if (ctl == PU_HUE_CONTROL)
val = port->ctl_hue;
else
if (ctl == PU_SATURATION_CONTROL)
val = port->ctl_saturation;
else
if (ctl == PU_SHARPNESS_CONTROL)
val = port->ctl_sharpness;
else
return -EINVAL;
dprintk(DBGLVL_ENC, "%s() unitid=0x%x ctl=%d, val=%d\n",
__func__, port->encunit.vsourceid, ctl, val);
ret = saa7164_cmd_send(port->dev, port->encunit.vsourceid, SET_CUR,
ctl, sizeof(u16), &val);
if (ret != SAA_OK)
printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
return ret;
}
int saa7164_api_get_usercontrol(struct saa7164_port *port, u8 ctl)
{
struct saa7164_dev *dev = port->dev;
int ret;
u16 val;
ret = saa7164_cmd_send(port->dev, port->encunit.vsourceid, GET_CUR,
ctl, sizeof(u16), &val);
if (ret != SAA_OK) {
printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
return ret;
}
dprintk(DBGLVL_ENC, "%s() ctl=%d, val=%d\n",
__func__, ctl, val);
if (ctl == PU_BRIGHTNESS_CONTROL)
port->ctl_brightness = val;
else
if (ctl == PU_CONTRAST_CONTROL)
port->ctl_contrast = val;
else
if (ctl == PU_HUE_CONTROL)
port->ctl_hue = val;
else
if (ctl == PU_SATURATION_CONTROL)
port->ctl_saturation = val;
else
if (ctl == PU_SHARPNESS_CONTROL)
port->ctl_sharpness = val;
return ret;
}
int saa7164_api_set_videomux(struct saa7164_port *port)
{
struct saa7164_dev *dev = port->dev;
u8 inputs[] = { 1, 2, 2, 2, 5, 5, 5 };
int ret;
dprintk(DBGLVL_ENC, "%s() v_mux=%d a_mux=%d\n",
__func__, port->mux_input, inputs[port->mux_input - 1]);
/* Audio Mute */
ret = saa7164_api_audio_mute(port, 1);
if (ret != SAA_OK)
printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
/* Video Mux */
ret = saa7164_cmd_send(port->dev, port->vidproc.sourceid, SET_CUR,
SU_INPUT_SELECT_CONTROL, sizeof(u8), &port->mux_input);
if (ret != SAA_OK)
printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
/* Audio Mux */
ret = saa7164_cmd_send(port->dev, port->audfeat.sourceid, SET_CUR,
SU_INPUT_SELECT_CONTROL, sizeof(u8),
&inputs[port->mux_input - 1]);
if (ret != SAA_OK)
printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
/* Audio UnMute */
ret = saa7164_api_audio_mute(port, 0);
if (ret != SAA_OK)
printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
return ret;
}
int saa7164_api_audio_mute(struct saa7164_port *port, int mute)
{
struct saa7164_dev *dev = port->dev;
u8 v = mute;
int ret;
dprintk(DBGLVL_API, "%s(%d)\n", __func__, mute);
ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, SET_CUR,
MUTE_CONTROL, sizeof(u8), &v);
if (ret != SAA_OK)
printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
return ret;
}
/* 0 = silence, 0xff = full */
int saa7164_api_set_audio_volume(struct saa7164_port *port, s8 level)
{
struct saa7164_dev *dev = port->dev;
s16 v, min, max;
int ret;
dprintk(DBGLVL_API, "%s(%d)\n", __func__, level);
/* Obtain the min/max ranges */
ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, GET_MIN,
VOLUME_CONTROL, sizeof(u16), &min);
if (ret != SAA_OK)
printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, GET_MAX,
VOLUME_CONTROL, sizeof(u16), &max);
if (ret != SAA_OK)
printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, GET_CUR,
(0x01 << 8) | VOLUME_CONTROL, sizeof(u16), &v);
if (ret != SAA_OK)
printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
dprintk(DBGLVL_API, "%s(%d) min=%d max=%d cur=%d\n", __func__,
level, min, max, v);
v = level;
if (v < min)
v = min;
if (v > max)
v = max;
/* Left */
ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, SET_CUR,
(0x01 << 8) | VOLUME_CONTROL, sizeof(s16), &v);
if (ret != SAA_OK)
printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
/* Right */
ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, SET_CUR,
(0x02 << 8) | VOLUME_CONTROL, sizeof(s16), &v);
if (ret != SAA_OK)
printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, GET_CUR,
(0x01 << 8) | VOLUME_CONTROL, sizeof(u16), &v);
if (ret != SAA_OK)
printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
dprintk(DBGLVL_API, "%s(%d) min=%d max=%d cur=%d\n", __func__,
level, min, max, v);
return ret;
}
int saa7164_api_set_audio_std(struct saa7164_port *port)
{
struct saa7164_dev *dev = port->dev;
struct tmComResAudioDefaults lvl;
struct tmComResTunerStandard tvaudio;
int ret;
dprintk(DBGLVL_API, "%s()\n", __func__);
/* Establish default levels */
lvl.ucDecoderLevel = TMHW_LEV_ADJ_DECLEV_DEFAULT;
lvl.ucDecoderFM_Level = TMHW_LEV_ADJ_DECLEV_DEFAULT;
lvl.ucMonoLevel = TMHW_LEV_ADJ_MONOLEV_DEFAULT;
lvl.ucNICAM_Level = TMHW_LEV_ADJ_NICLEV_DEFAULT;
lvl.ucSAP_Level = TMHW_LEV_ADJ_SAPLEV_DEFAULT;
lvl.ucADC_Level = TMHW_LEV_ADJ_ADCLEV_DEFAULT;
ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, SET_CUR,
AUDIO_DEFAULT_CONTROL, sizeof(struct tmComResAudioDefaults),
&lvl);
if (ret != SAA_OK)
printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
/* Manually select the appropriate TV audio standard */
if (port->encodernorm.id & V4L2_STD_NTSC) {
tvaudio.std = TU_STANDARD_NTSC_M;
tvaudio.country = 1;
} else {
tvaudio.std = TU_STANDARD_PAL_I;
tvaudio.country = 44;
}
ret = saa7164_cmd_send(port->dev, port->tunerunit.unitid, SET_CUR,
TU_STANDARD_CONTROL, sizeof(tvaudio), &tvaudio);
if (ret != SAA_OK)
printk(KERN_ERR "%s() TU_STANDARD_CONTROL error, ret = 0x%x\n",
__func__, ret);
return ret;
}
int saa7164_api_set_audio_detection(struct saa7164_port *port, int autodetect)
{
struct saa7164_dev *dev = port->dev;
struct tmComResTunerStandardAuto p;
int ret;
dprintk(DBGLVL_API, "%s(%d)\n", __func__, autodetect);
/* Disable TV Audio autodetect if not already set (buggy) */
if (autodetect)
p.mode = TU_STANDARD_AUTO;
else
p.mode = TU_STANDARD_MANUAL;
ret = saa7164_cmd_send(port->dev, port->tunerunit.unitid, SET_CUR,
TU_STANDARD_AUTO_CONTROL, sizeof(p), &p);
if (ret != SAA_OK)
printk(KERN_ERR
"%s() TU_STANDARD_AUTO_CONTROL error, ret = 0x%x\n",
__func__, ret);
return ret;
}
int saa7164_api_get_videomux(struct saa7164_port *port)
{
struct saa7164_dev *dev = port->dev;
int ret;
ret = saa7164_cmd_send(port->dev, port->vidproc.sourceid, GET_CUR,
SU_INPUT_SELECT_CONTROL, sizeof(u8), &port->mux_input);
if (ret != SAA_OK)
printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
dprintk(DBGLVL_ENC, "%s() v_mux=%d\n",
__func__, port->mux_input);
return ret;
}
static int saa7164_api_set_dif(struct saa7164_port *port, u8 reg, u8 val)
{
struct saa7164_dev *dev = port->dev;
u16 len = 0;
u8 buf[256];
int ret;
u8 mas;
dprintk(DBGLVL_API, "%s(nr=%d type=%d val=%x)\n", __func__,
port->nr, port->type, val);
if (port->nr == 0)
mas = 0xd0;
else
mas = 0xe0;
memset(buf, 0, sizeof(buf));
buf[0x00] = 0x04;
buf[0x01] = 0x00;
buf[0x02] = 0x00;
buf[0x03] = 0x00;
buf[0x04] = 0x04;
buf[0x05] = 0x00;
buf[0x06] = 0x00;
buf[0x07] = 0x00;
buf[0x08] = reg;
buf[0x09] = 0x26;
buf[0x0a] = mas;
buf[0x0b] = 0xb0;
buf[0x0c] = val;
buf[0x0d] = 0x00;
buf[0x0e] = 0x00;
buf[0x0f] = 0x00;
ret = saa7164_cmd_send(dev, port->ifunit.unitid, GET_LEN,
EXU_REGISTER_ACCESS_CONTROL, sizeof(len), &len);
if (ret != SAA_OK) {
printk(KERN_ERR "%s() error, ret(1) = 0x%x\n", __func__, ret);
return -EIO;
}
ret = saa7164_cmd_send(dev, port->ifunit.unitid, SET_CUR,
EXU_REGISTER_ACCESS_CONTROL, len, &buf);
if (ret != SAA_OK)
printk(KERN_ERR "%s() error, ret(2) = 0x%x\n", __func__, ret);
#if 0
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, buf, 16,
false);
#endif
return ret == SAA_OK ? 0 : -EIO;
}
/* Disable the IF block AGC controls */
int saa7164_api_configure_dif(struct saa7164_port *port, u32 std)
{
struct saa7164_dev *dev = port->dev;
int ret = 0;
u8 agc_disable;
dprintk(DBGLVL_API, "%s(nr=%d, 0x%x)\n", __func__, port->nr, std);
if (std & V4L2_STD_NTSC) {
dprintk(DBGLVL_API, " NTSC\n");
saa7164_api_set_dif(port, 0x00, 0x01); /* Video Standard */
agc_disable = 0;
} else if (std & V4L2_STD_PAL_I) {
dprintk(DBGLVL_API, " PAL-I\n");
saa7164_api_set_dif(port, 0x00, 0x08); /* Video Standard */
agc_disable = 0;
} else if (std & V4L2_STD_PAL_M) {
dprintk(DBGLVL_API, " PAL-M\n");
saa7164_api_set_dif(port, 0x00, 0x01); /* Video Standard */
agc_disable = 0;
} else if (std & V4L2_STD_PAL_N) {
dprintk(DBGLVL_API, " PAL-N\n");
saa7164_api_set_dif(port, 0x00, 0x01); /* Video Standard */
agc_disable = 0;
} else if (std & V4L2_STD_PAL_Nc) {
dprintk(DBGLVL_API, " PAL-Nc\n");
saa7164_api_set_dif(port, 0x00, 0x01); /* Video Standard */
agc_disable = 0;
} else if (std & V4L2_STD_PAL_B) {
dprintk(DBGLVL_API, " PAL-B\n");
saa7164_api_set_dif(port, 0x00, 0x02); /* Video Standard */
agc_disable = 0;
} else if (std & V4L2_STD_PAL_DK) {
dprintk(DBGLVL_API, " PAL-DK\n");
saa7164_api_set_dif(port, 0x00, 0x10); /* Video Standard */
agc_disable = 0;
} else if (std & V4L2_STD_SECAM_L) {
dprintk(DBGLVL_API, " SECAM-L\n");
saa7164_api_set_dif(port, 0x00, 0x20); /* Video Standard */
agc_disable = 0;
} else {
/* Unknown standard, assume DTV */
dprintk(DBGLVL_API, " Unknown (assuming DTV)\n");
/* Undefinded Video Standard */
saa7164_api_set_dif(port, 0x00, 0x80);
agc_disable = 1;
}
saa7164_api_set_dif(port, 0x48, 0xa0); /* AGC Functions 1 */
saa7164_api_set_dif(port, 0xc0, agc_disable); /* AGC Output Disable */
saa7164_api_set_dif(port, 0x7c, 0x04); /* CVBS EQ */
saa7164_api_set_dif(port, 0x04, 0x01); /* Active */
msleep(100);
saa7164_api_set_dif(port, 0x04, 0x00); /* Active (again) */
msleep(100);
return ret;
}
/* Ensure the dif is in the correct state for the operating mode
* (analog / dtv). We only configure the diff through the analog encoder
* so when we're in digital mode we need to find the appropriate encoder
* and use it to configure the DIF.
*/
int saa7164_api_initialize_dif(struct saa7164_port *port)
{
struct saa7164_dev *dev = port->dev;
struct saa7164_port *p = NULL;
int ret = -EINVAL;
u32 std = 0;
dprintk(DBGLVL_API, "%s(nr=%d type=%d)\n", __func__,
port->nr, port->type);
if (port->type == SAA7164_MPEG_ENCODER) {
/* Pick any analog standard to init the diff.
* we'll come back during encoder_init'
* and set the correct standard if requried.
*/
std = V4L2_STD_NTSC;
} else
if (port->type == SAA7164_MPEG_DVB) {
if (port->nr == SAA7164_PORT_TS1)
p = &dev->ports[SAA7164_PORT_ENC1];
else
p = &dev->ports[SAA7164_PORT_ENC2];
} else
if (port->type == SAA7164_MPEG_VBI) {
std = V4L2_STD_NTSC;
if (port->nr == SAA7164_PORT_VBI1)
p = &dev->ports[SAA7164_PORT_ENC1];
else
p = &dev->ports[SAA7164_PORT_ENC2];
} else
BUG();
if (p)
ret = saa7164_api_configure_dif(p, std);
return ret;
}
int saa7164_api_transition_port(struct saa7164_port *port, u8 mode)
{
struct saa7164_dev *dev = port->dev;
int ret;
dprintk(DBGLVL_API, "%s(nr=%d unitid=0x%x,%d)\n",
__func__, port->nr, port->hwcfg.unitid, mode);
ret = saa7164_cmd_send(port->dev, port->hwcfg.unitid, SET_CUR,
SAA_STATE_CONTROL, sizeof(mode), &mode);
if (ret != SAA_OK)
printk(KERN_ERR "%s(portnr %d unitid 0x%x) error, ret = 0x%x\n",
__func__, port->nr, port->hwcfg.unitid, ret);
return ret;
}
int saa7164_api_get_fw_version(struct saa7164_dev *dev, u32 *version)
{
int ret;
ret = saa7164_cmd_send(dev, 0, GET_CUR,
GET_FW_VERSION_CONTROL, sizeof(u32), version);
if (ret != SAA_OK)
printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
return ret;
}
int saa7164_api_read_eeprom(struct saa7164_dev *dev, u8 *buf, int buflen)
{
u8 reg[] = { 0x0f, 0x00 };
if (buflen < 128)
return -ENOMEM;
/* Assumption: Hauppauge eeprom is at 0xa0 on on bus 0 */
/* TODO: Pull the details from the boards struct */
return saa7164_api_i2c_read(&dev->i2c_bus[0], 0xa0 >> 1, sizeof(reg),
®[0], 128, buf);
}
static int saa7164_api_configure_port_vbi(struct saa7164_dev *dev,
struct saa7164_port *port)
{
struct tmComResVBIFormatDescrHeader *fmt = &port->vbi_fmt_ntsc;
dprintk(DBGLVL_API, " bFormatIndex = 0x%x\n", fmt->bFormatIndex);
dprintk(DBGLVL_API, " VideoStandard = 0x%x\n", fmt->VideoStandard);
dprintk(DBGLVL_API, " StartLine = %d\n", fmt->StartLine);
dprintk(DBGLVL_API, " EndLine = %d\n", fmt->EndLine);
dprintk(DBGLVL_API, " FieldRate = %d\n", fmt->FieldRate);
dprintk(DBGLVL_API, " bNumLines = %d\n", fmt->bNumLines);
/* Cache the hardware configuration in the port */
port->bufcounter = port->hwcfg.BARLocation;
port->pitch = port->hwcfg.BARLocation + (2 * sizeof(u32));
port->bufsize = port->hwcfg.BARLocation + (3 * sizeof(u32));
port->bufoffset = port->hwcfg.BARLocation + (4 * sizeof(u32));
port->bufptr32l = port->hwcfg.BARLocation +
(4 * sizeof(u32)) +
(sizeof(u32) * port->hwcfg.buffercount) + sizeof(u32);
port->bufptr32h = port->hwcfg.BARLocation +
(4 * sizeof(u32)) +
(sizeof(u32) * port->hwcfg.buffercount);
port->bufptr64 = port->hwcfg.BARLocation +
(4 * sizeof(u32)) +
(sizeof(u32) * port->hwcfg.buffercount);
dprintk(DBGLVL_API, " = port->hwcfg.BARLocation = 0x%x\n",
port->hwcfg.BARLocation);
dprintk(DBGLVL_API, " = VS_FORMAT_VBI (becomes dev->en[%d])\n",
port->nr);
return 0;
}
static int
saa7164_api_configure_port_mpeg2ts(struct saa7164_dev *dev,
struct saa7164_port *port,
struct tmComResTSFormatDescrHeader *tsfmt)
{
dprintk(DBGLVL_API, " bFormatIndex = 0x%x\n", tsfmt->bFormatIndex);
dprintk(DBGLVL_API, " bDataOffset = 0x%x\n", tsfmt->bDataOffset);
dprintk(DBGLVL_API, " bPacketLength= 0x%x\n", tsfmt->bPacketLength);
dprintk(DBGLVL_API, " bStrideLength= 0x%x\n", tsfmt->bStrideLength);
dprintk(DBGLVL_API, " bguid = (....)\n");
/* Cache the hardware configuration in the port */
port->bufcounter = port->hwcfg.BARLocation;
port->pitch = port->hwcfg.BARLocation + (2 * sizeof(u32));
port->bufsize = port->hwcfg.BARLocation + (3 * sizeof(u32));
port->bufoffset = port->hwcfg.BARLocation + (4 * sizeof(u32));
port->bufptr32l = port->hwcfg.BARLocation +
(4 * sizeof(u32)) +
(sizeof(u32) * port->hwcfg.buffercount) + sizeof(u32);
port->bufptr32h = port->hwcfg.BARLocation +
(4 * sizeof(u32)) +
(sizeof(u32) * port->hwcfg.buffercount);
port->bufptr64 = port->hwcfg.BARLocation +
(4 * sizeof(u32)) +
(sizeof(u32) * port->hwcfg.buffercount);
dprintk(DBGLVL_API, " = port->hwcfg.BARLocation = 0x%x\n",
port->hwcfg.BARLocation);
dprintk(DBGLVL_API, " = VS_FORMAT_MPEGTS (becomes dev->ts[%d])\n",
port->nr);
return 0;
}
static int
saa7164_api_configure_port_mpeg2ps(struct saa7164_dev *dev,
struct saa7164_port *port,
struct tmComResPSFormatDescrHeader *fmt)
{
dprintk(DBGLVL_API, " bFormatIndex = 0x%x\n", fmt->bFormatIndex);
dprintk(DBGLVL_API, " wPacketLength= 0x%x\n", fmt->wPacketLength);
dprintk(DBGLVL_API, " wPackLength= 0x%x\n", fmt->wPackLength);
dprintk(DBGLVL_API, " bPackDataType= 0x%x\n", fmt->bPackDataType);
/* Cache the hardware configuration in the port */
/* TODO: CHECK THIS in the port config */
port->bufcounter = port->hwcfg.BARLocation;
port->pitch = port->hwcfg.BARLocation + (2 * sizeof(u32));
port->bufsize = port->hwcfg.BARLocation + (3 * sizeof(u32));
port->bufoffset = port->hwcfg.BARLocation + (4 * sizeof(u32));
port->bufptr32l = port->hwcfg.BARLocation +
(4 * sizeof(u32)) +
(sizeof(u32) * port->hwcfg.buffercount) + sizeof(u32);
port->bufptr32h = port->hwcfg.BARLocation +
(4 * sizeof(u32)) +
(sizeof(u32) * port->hwcfg.buffercount);
port->bufptr64 = port->hwcfg.BARLocation +
(4 * sizeof(u32)) +
(sizeof(u32) * port->hwcfg.buffercount);
dprintk(DBGLVL_API, " = port->hwcfg.BARLocation = 0x%x\n",
port->hwcfg.BARLocation);
dprintk(DBGLVL_API, " = VS_FORMAT_MPEGPS (becomes dev->enc[%d])\n",
port->nr);
return 0;
}
static int saa7164_api_dump_subdevs(struct saa7164_dev *dev, u8 *buf, int len)
{
struct saa7164_port *tsport = NULL;
struct saa7164_port *encport = NULL;
struct saa7164_port *vbiport = NULL;
u32 idx, next_offset;
int i;
struct tmComResDescrHeader *hdr, *t;
struct tmComResExtDevDescrHeader *exthdr;
struct tmComResPathDescrHeader *pathhdr;
struct tmComResAntTermDescrHeader *anttermhdr;
struct tmComResTunerDescrHeader *tunerunithdr;
struct tmComResDMATermDescrHeader *vcoutputtermhdr;
struct tmComResTSFormatDescrHeader *tsfmt;
struct tmComResPSFormatDescrHeader *psfmt;
struct tmComResSelDescrHeader *psel;
struct tmComResProcDescrHeader *pdh;
struct tmComResAFeatureDescrHeader *afd;
struct tmComResEncoderDescrHeader *edh;
struct tmComResVBIFormatDescrHeader *vbifmt;
u32 currpath = 0;
dprintk(DBGLVL_API,
"%s(?,?,%d) sizeof(struct tmComResDescrHeader) = %d bytes\n",
__func__, len, (u32)sizeof(struct tmComResDescrHeader));
for (idx = 0; idx < (len - sizeof(struct tmComResDescrHeader));) {
hdr = (struct tmComResDescrHeader *)(buf + idx);
if (hdr->type != CS_INTERFACE)
return SAA_ERR_NOT_SUPPORTED;
dprintk(DBGLVL_API, "@ 0x%x =\n", idx);
switch (hdr->subtype) {
case GENERAL_REQUEST:
dprintk(DBGLVL_API, " GENERAL_REQUEST\n");
break;
case VC_TUNER_PATH:
dprintk(DBGLVL_API, " VC_TUNER_PATH\n");
pathhdr = (struct tmComResPathDescrHeader *)(buf + idx);
dprintk(DBGLVL_API, " pathid = 0x%x\n",
pathhdr->pathid);
currpath = pathhdr->pathid;
break;
case VC_INPUT_TERMINAL:
dprintk(DBGLVL_API, " VC_INPUT_TERMINAL\n");
anttermhdr =
(struct tmComResAntTermDescrHeader *)(buf + idx);
dprintk(DBGLVL_API, " terminalid = 0x%x\n",
anttermhdr->terminalid);
dprintk(DBGLVL_API, " terminaltype = 0x%x\n",
anttermhdr->terminaltype);
switch (anttermhdr->terminaltype) {
case ITT_ANTENNA:
dprintk(DBGLVL_API, " = ITT_ANTENNA\n");
break;
case LINE_CONNECTOR:
dprintk(DBGLVL_API, " = LINE_CONNECTOR\n");
break;
case SPDIF_CONNECTOR:
dprintk(DBGLVL_API, " = SPDIF_CONNECTOR\n");
break;
case COMPOSITE_CONNECTOR:
dprintk(DBGLVL_API,
" = COMPOSITE_CONNECTOR\n");
break;
case SVIDEO_CONNECTOR:
dprintk(DBGLVL_API, " = SVIDEO_CONNECTOR\n");
break;
case COMPONENT_CONNECTOR:
dprintk(DBGLVL_API,
" = COMPONENT_CONNECTOR\n");
break;
case STANDARD_DMA:
dprintk(DBGLVL_API, " = STANDARD_DMA\n");
break;
default:
dprintk(DBGLVL_API, " = undefined (0x%x)\n",
anttermhdr->terminaltype);
}
dprintk(DBGLVL_API, " assocterminal= 0x%x\n",
anttermhdr->assocterminal);
dprintk(DBGLVL_API, " iterminal = 0x%x\n",
anttermhdr->iterminal);
dprintk(DBGLVL_API, " controlsize = 0x%x\n",
anttermhdr->controlsize);
break;
case VC_OUTPUT_TERMINAL:
dprintk(DBGLVL_API, " VC_OUTPUT_TERMINAL\n");
vcoutputtermhdr =
(struct tmComResDMATermDescrHeader *)(buf + idx);
dprintk(DBGLVL_API, " unitid = 0x%x\n",
vcoutputtermhdr->unitid);
dprintk(DBGLVL_API, " terminaltype = 0x%x\n",
vcoutputtermhdr->terminaltype);
switch (vcoutputtermhdr->terminaltype) {
case ITT_ANTENNA:
dprintk(DBGLVL_API, " = ITT_ANTENNA\n");
break;
case LINE_CONNECTOR:
dprintk(DBGLVL_API, " = LINE_CONNECTOR\n");
break;
case SPDIF_CONNECTOR:
dprintk(DBGLVL_API, " = SPDIF_CONNECTOR\n");
break;
case COMPOSITE_CONNECTOR:
dprintk(DBGLVL_API,
" = COMPOSITE_CONNECTOR\n");
break;
case SVIDEO_CONNECTOR:
dprintk(DBGLVL_API, " = SVIDEO_CONNECTOR\n");
break;
case COMPONENT_CONNECTOR:
dprintk(DBGLVL_API,
" = COMPONENT_CONNECTOR\n");
break;
case STANDARD_DMA:
dprintk(DBGLVL_API, " = STANDARD_DMA\n");
break;
default:
dprintk(DBGLVL_API, " = undefined (0x%x)\n",
vcoutputtermhdr->terminaltype);
}
dprintk(DBGLVL_API, " assocterminal= 0x%x\n",
vcoutputtermhdr->assocterminal);
dprintk(DBGLVL_API, " sourceid = 0x%x\n",
vcoutputtermhdr->sourceid);
dprintk(DBGLVL_API, " iterminal = 0x%x\n",
vcoutputtermhdr->iterminal);
dprintk(DBGLVL_API, " BARLocation = 0x%x\n",
vcoutputtermhdr->BARLocation);
dprintk(DBGLVL_API, " flags = 0x%x\n",
vcoutputtermhdr->flags);
dprintk(DBGLVL_API, " interruptid = 0x%x\n",
vcoutputtermhdr->interruptid);
dprintk(DBGLVL_API, " buffercount = 0x%x\n",
vcoutputtermhdr->buffercount);
dprintk(DBGLVL_API, " metadatasize = 0x%x\n",
vcoutputtermhdr->metadatasize);
dprintk(DBGLVL_API, " controlsize = 0x%x\n",
vcoutputtermhdr->controlsize);
dprintk(DBGLVL_API, " numformats = 0x%x\n",
vcoutputtermhdr->numformats);
t = (struct tmComResDescrHeader *)
((struct tmComResDMATermDescrHeader *)(buf + idx));
next_offset = idx + (vcoutputtermhdr->len);
for (i = 0; i < vcoutputtermhdr->numformats; i++) {
t = (struct tmComResDescrHeader *)
(buf + next_offset);
switch (t->subtype) {
case VS_FORMAT_MPEG2TS:
tsfmt =
(struct tmComResTSFormatDescrHeader *)t;
if (currpath == 1)
tsport = &dev->ports[SAA7164_PORT_TS1];
else
tsport = &dev->ports[SAA7164_PORT_TS2];
memcpy(&tsport->hwcfg, vcoutputtermhdr,
sizeof(*vcoutputtermhdr));
saa7164_api_configure_port_mpeg2ts(dev,
tsport, tsfmt);
break;
case VS_FORMAT_MPEG2PS:
psfmt =
(struct tmComResPSFormatDescrHeader *)t;
if (currpath == 1)
encport = &dev->ports[SAA7164_PORT_ENC1];
else
encport = &dev->ports[SAA7164_PORT_ENC2];
memcpy(&encport->hwcfg, vcoutputtermhdr,
sizeof(*vcoutputtermhdr));
saa7164_api_configure_port_mpeg2ps(dev,
encport, psfmt);
break;
case VS_FORMAT_VBI:
vbifmt =
(struct tmComResVBIFormatDescrHeader *)t;
if (currpath == 1)
vbiport = &dev->ports[SAA7164_PORT_VBI1];
else
vbiport = &dev->ports[SAA7164_PORT_VBI2];
memcpy(&vbiport->hwcfg, vcoutputtermhdr,
sizeof(*vcoutputtermhdr));
memcpy(&vbiport->vbi_fmt_ntsc, vbifmt,
sizeof(*vbifmt));
saa7164_api_configure_port_vbi(dev,
vbiport);
break;
case VS_FORMAT_RDS:
dprintk(DBGLVL_API,
" = VS_FORMAT_RDS\n");
break;
case VS_FORMAT_UNCOMPRESSED:
dprintk(DBGLVL_API,
" = VS_FORMAT_UNCOMPRESSED\n");
break;
case VS_FORMAT_TYPE:
dprintk(DBGLVL_API,
" = VS_FORMAT_TYPE\n");
break;
default:
dprintk(DBGLVL_API,
" = undefined (0x%x)\n",
t->subtype);
}
next_offset += t->len;
}
break;
case TUNER_UNIT:
dprintk(DBGLVL_API, " TUNER_UNIT\n");
tunerunithdr =
(struct tmComResTunerDescrHeader *)(buf + idx);
dprintk(DBGLVL_API, " unitid = 0x%x\n",
tunerunithdr->unitid);
dprintk(DBGLVL_API, " sourceid = 0x%x\n",
tunerunithdr->sourceid);
dprintk(DBGLVL_API, " iunit = 0x%x\n",
tunerunithdr->iunit);
dprintk(DBGLVL_API, " tuningstandards = 0x%x\n",
tunerunithdr->tuningstandards);
dprintk(DBGLVL_API, " controlsize = 0x%x\n",
tunerunithdr->controlsize);
dprintk(DBGLVL_API, " controls = 0x%x\n",
tunerunithdr->controls);
if (tunerunithdr->unitid == tunerunithdr->iunit) {
if (currpath == 1)
encport = &dev->ports[SAA7164_PORT_ENC1];
else
encport = &dev->ports[SAA7164_PORT_ENC2];
memcpy(&encport->tunerunit, tunerunithdr,
sizeof(struct tmComResTunerDescrHeader));
dprintk(DBGLVL_API,
" (becomes dev->enc[%d] tuner)\n",
encport->nr);
}
break;
case VC_SELECTOR_UNIT:
psel = (struct tmComResSelDescrHeader *)(buf + idx);
dprintk(DBGLVL_API, " VC_SELECTOR_UNIT\n");
dprintk(DBGLVL_API, " unitid = 0x%x\n",
psel->unitid);
dprintk(DBGLVL_API, " nrinpins = 0x%x\n",
psel->nrinpins);
dprintk(DBGLVL_API, " sourceid = 0x%x\n",
psel->sourceid);
break;
case VC_PROCESSING_UNIT:
pdh = (struct tmComResProcDescrHeader *)(buf + idx);
dprintk(DBGLVL_API, " VC_PROCESSING_UNIT\n");
dprintk(DBGLVL_API, " unitid = 0x%x\n",
pdh->unitid);
dprintk(DBGLVL_API, " sourceid = 0x%x\n",
pdh->sourceid);
dprintk(DBGLVL_API, " controlsize = 0x%x\n",
pdh->controlsize);
if (pdh->controlsize == 0x04) {
if (currpath == 1)
encport = &dev->ports[SAA7164_PORT_ENC1];
else
encport = &dev->ports[SAA7164_PORT_ENC2];
memcpy(&encport->vidproc, pdh,
sizeof(struct tmComResProcDescrHeader));
dprintk(DBGLVL_API, " (becomes dev->enc[%d])\n",
encport->nr);
}
break;
case FEATURE_UNIT:
afd = (struct tmComResAFeatureDescrHeader *)(buf + idx);
dprintk(DBGLVL_API, " FEATURE_UNIT\n");
dprintk(DBGLVL_API, " unitid = 0x%x\n",
afd->unitid);
dprintk(DBGLVL_API, " sourceid = 0x%x\n",
afd->sourceid);
dprintk(DBGLVL_API, " controlsize = 0x%x\n",
afd->controlsize);
if (currpath == 1)
encport = &dev->ports[SAA7164_PORT_ENC1];
else
encport = &dev->ports[SAA7164_PORT_ENC2];
memcpy(&encport->audfeat, afd,
sizeof(struct tmComResAFeatureDescrHeader));
dprintk(DBGLVL_API, " (becomes dev->enc[%d])\n",
encport->nr);
break;
case ENCODER_UNIT:
edh = (struct tmComResEncoderDescrHeader *)(buf + idx);
dprintk(DBGLVL_API, " ENCODER_UNIT\n");
dprintk(DBGLVL_API, " subtype = 0x%x\n", edh->subtype);
dprintk(DBGLVL_API, " unitid = 0x%x\n", edh->unitid);
dprintk(DBGLVL_API, " vsourceid = 0x%x\n",
edh->vsourceid);
dprintk(DBGLVL_API, " asourceid = 0x%x\n",
edh->asourceid);
dprintk(DBGLVL_API, " iunit = 0x%x\n", edh->iunit);
if (edh->iunit == edh->unitid) {
if (currpath == 1)
encport = &dev->ports[SAA7164_PORT_ENC1];
else
encport = &dev->ports[SAA7164_PORT_ENC2];
memcpy(&encport->encunit, edh,
sizeof(struct tmComResEncoderDescrHeader));
dprintk(DBGLVL_API,
" (becomes dev->enc[%d])\n",
encport->nr);
}
break;
case EXTENSION_UNIT:
dprintk(DBGLVL_API, " EXTENSION_UNIT\n");
exthdr = (struct tmComResExtDevDescrHeader *)(buf + idx);
dprintk(DBGLVL_API, " unitid = 0x%x\n",
exthdr->unitid);
dprintk(DBGLVL_API, " deviceid = 0x%x\n",
exthdr->deviceid);
dprintk(DBGLVL_API, " devicetype = 0x%x\n",
exthdr->devicetype);
if (exthdr->devicetype & 0x1)
dprintk(DBGLVL_API, " = Decoder Device\n");
if (exthdr->devicetype & 0x2)
dprintk(DBGLVL_API, " = GPIO Source\n");
if (exthdr->devicetype & 0x4)
dprintk(DBGLVL_API, " = Video Decoder\n");
if (exthdr->devicetype & 0x8)
dprintk(DBGLVL_API, " = Audio Decoder\n");
if (exthdr->devicetype & 0x20)
dprintk(DBGLVL_API, " = Crossbar\n");
if (exthdr->devicetype & 0x40)
dprintk(DBGLVL_API, " = Tuner\n");
if (exthdr->devicetype & 0x80)
dprintk(DBGLVL_API, " = IF PLL\n");
if (exthdr->devicetype & 0x100)
dprintk(DBGLVL_API, " = Demodulator\n");
if (exthdr->devicetype & 0x200)
dprintk(DBGLVL_API, " = RDS Decoder\n");
if (exthdr->devicetype & 0x400)
dprintk(DBGLVL_API, " = Encoder\n");
if (exthdr->devicetype & 0x800)
dprintk(DBGLVL_API, " = IR Decoder\n");
if (exthdr->devicetype & 0x1000)
dprintk(DBGLVL_API, " = EEPROM\n");
if (exthdr->devicetype & 0x2000)
dprintk(DBGLVL_API,
" = VBI Decoder\n");
if (exthdr->devicetype & 0x10000)
dprintk(DBGLVL_API,
" = Streaming Device\n");
if (exthdr->devicetype & 0x20000)
dprintk(DBGLVL_API,
" = DRM Device\n");
if (exthdr->devicetype & 0x40000000)
dprintk(DBGLVL_API,
" = Generic Device\n");
if (exthdr->devicetype & 0x80000000)
dprintk(DBGLVL_API,
" = Config Space Device\n");
dprintk(DBGLVL_API, " numgpiopins = 0x%x\n",
exthdr->numgpiopins);
dprintk(DBGLVL_API, " numgpiogroups = 0x%x\n",
exthdr->numgpiogroups);
dprintk(DBGLVL_API, " controlsize = 0x%x\n",
exthdr->controlsize);
if (exthdr->devicetype & 0x80) {
if (currpath == 1)
encport = &dev->ports[SAA7164_PORT_ENC1];
else
encport = &dev->ports[SAA7164_PORT_ENC2];
memcpy(&encport->ifunit, exthdr,
sizeof(struct tmComResExtDevDescrHeader));
dprintk(DBGLVL_API,
" (becomes dev->enc[%d])\n",
encport->nr);
}
break;
case PVC_INFRARED_UNIT:
dprintk(DBGLVL_API, " PVC_INFRARED_UNIT\n");
break;
case DRM_UNIT:
dprintk(DBGLVL_API, " DRM_UNIT\n");
break;
default:
dprintk(DBGLVL_API, "default %d\n", hdr->subtype);
}
dprintk(DBGLVL_API, " 1.%x\n", hdr->len);
dprintk(DBGLVL_API, " 2.%x\n", hdr->type);
dprintk(DBGLVL_API, " 3.%x\n", hdr->subtype);
dprintk(DBGLVL_API, " 4.%x\n", hdr->unitid);
idx += hdr->len;
}
return 0;
}
int saa7164_api_enum_subdevs(struct saa7164_dev *dev)
{
int ret;
u32 buflen = 0;
u8 *buf;
dprintk(DBGLVL_API, "%s()\n", __func__);
/* Get the total descriptor length */
ret = saa7164_cmd_send(dev, 0, GET_LEN,
GET_DESCRIPTORS_CONTROL, sizeof(buflen), &buflen);
if (ret != SAA_OK)
printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
dprintk(DBGLVL_API, "%s() total descriptor size = %d bytes.\n",
__func__, buflen);
/* Allocate enough storage for all of the descs */
buf = kzalloc(buflen, GFP_KERNEL);
if (!buf)
return SAA_ERR_NO_RESOURCES;
/* Retrieve them */
ret = saa7164_cmd_send(dev, 0, GET_CUR,
GET_DESCRIPTORS_CONTROL, buflen, buf);
if (ret != SAA_OK) {
printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret);
goto out;
}
if (saa_debug & DBGLVL_API)
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, buf,
buflen & ~15, false);
saa7164_api_dump_subdevs(dev, buf, buflen);
out:
kfree(buf);
return ret;
}
int saa7164_api_i2c_read(struct saa7164_i2c *bus, u8 addr, u32 reglen, u8 *reg,
u32 datalen, u8 *data)
{
struct saa7164_dev *dev = bus->dev;
u16 len = 0;
int unitid;
u8 buf[256];
int ret;
dprintk(DBGLVL_API, "%s()\n", __func__);
if (reglen > 4)
return -EIO;
/* Prepare the send buffer */
/* Bytes 00-03 source register length
* 04-07 source bytes to read
* 08... register address
*/
memset(buf, 0, sizeof(buf));
memcpy((buf + 2 * sizeof(u32) + 0), reg, reglen);
*((u32 *)(buf + 0 * sizeof(u32))) = reglen;
*((u32 *)(buf + 1 * sizeof(u32))) = datalen;
unitid = saa7164_i2caddr_to_unitid(bus, addr);
if (unitid < 0) {
printk(KERN_ERR
"%s() error, cannot translate regaddr 0x%x to unitid\n",
__func__, addr);
return -EIO;
}
ret = saa7164_cmd_send(bus->dev, unitid, GET_LEN,
EXU_REGISTER_ACCESS_CONTROL, sizeof(len), &len);
if (ret != SAA_OK) {
printk(KERN_ERR "%s() error, ret(1) = 0x%x\n", __func__, ret);
return -EIO;
}
dprintk(DBGLVL_API, "%s() len = %d bytes\n", __func__, len);
if (saa_debug & DBGLVL_I2C)
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, buf,
32, false);
ret = saa7164_cmd_send(bus->dev, unitid, GET_CUR,
EXU_REGISTER_ACCESS_CONTROL, len, &buf);
if (ret != SAA_OK)
printk(KERN_ERR "%s() error, ret(2) = 0x%x\n", __func__, ret);
else {
if (saa_debug & DBGLVL_I2C)
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1,
buf, sizeof(buf), false);
memcpy(data, (buf + 2 * sizeof(u32) + reglen), datalen);
}
return ret == SAA_OK ? 0 : -EIO;
}
/* For a given 8 bit i2c address device, write the buffer */
int saa7164_api_i2c_write(struct saa7164_i2c *bus, u8 addr, u32 datalen,
u8 *data)
{
struct saa7164_dev *dev = bus->dev;
u16 len = 0;
int unitid;
int reglen;
u8 buf[256];
int ret;
dprintk(DBGLVL_API, "%s()\n", __func__);
if ((datalen == 0) || (datalen > 232))
return -EIO;
memset(buf, 0, sizeof(buf));
unitid = saa7164_i2caddr_to_unitid(bus, addr);
if (unitid < 0) {
printk(KERN_ERR
"%s() error, cannot translate regaddr 0x%x to unitid\n",
__func__, addr);
return -EIO;
}
reglen = saa7164_i2caddr_to_reglen(bus, addr);
if (reglen < 0) {
printk(KERN_ERR
"%s() error, cannot translate regaddr to reglen\n",
__func__);
return -EIO;
}
ret = saa7164_cmd_send(bus->dev, unitid, GET_LEN,
EXU_REGISTER_ACCESS_CONTROL, sizeof(len), &len);
if (ret != SAA_OK) {
printk(KERN_ERR "%s() error, ret(1) = 0x%x\n", __func__, ret);
return -EIO;
}
dprintk(DBGLVL_API, "%s() len = %d bytes\n", __func__, len);
/* Prepare the send buffer */
/* Bytes 00-03 dest register length
* 04-07 dest bytes to write
* 08... register address
*/
*((u32 *)(buf + 0 * sizeof(u32))) = reglen;
*((u32 *)(buf + 1 * sizeof(u32))) = datalen - reglen;
memcpy((buf + 2 * sizeof(u32)), data, datalen);
if (saa_debug & DBGLVL_I2C)
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1,
buf, sizeof(buf), false);
ret = saa7164_cmd_send(bus->dev, unitid, SET_CUR,
EXU_REGISTER_ACCESS_CONTROL, len, &buf);
if (ret != SAA_OK)
printk(KERN_ERR "%s() error, ret(2) = 0x%x\n", __func__, ret);
return ret == SAA_OK ? 0 : -EIO;
}
static int saa7164_api_modify_gpio(struct saa7164_dev *dev, u8 unitid,
u8 pin, u8 state)
{
int ret;
struct tmComResGPIO t;
dprintk(DBGLVL_API, "%s(0x%x, %d, %d)\n",
__func__, unitid, pin, state);
if ((pin > 7) || (state > 2))
return SAA_ERR_BAD_PARAMETER;
t.pin = pin;
t.state = state;
ret = saa7164_cmd_send(dev, unitid, SET_CUR,
EXU_GPIO_CONTROL, sizeof(t), &t);
if (ret != SAA_OK)
printk(KERN_ERR "%s() error, ret = 0x%x\n",
__func__, ret);
return ret;
}
int saa7164_api_set_gpiobit(struct saa7164_dev *dev, u8 unitid,
u8 pin)
{
return saa7164_api_modify_gpio(dev, unitid, pin, 1);
}
int saa7164_api_clear_gpiobit(struct saa7164_dev *dev, u8 unitid,
u8 pin)
{
return saa7164_api_modify_gpio(dev, unitid, pin, 0);
}
| gpl-2.0 |
chaoling/test123 | linux-2.6.39/net/ipv6/netfilter/ip6t_LOG.c | 3201 | 12550 | /*
* This is a module which is used for logging packets.
*/
/* (C) 2001 Jan Rekorajski <baggins@pld.org.pl>
* (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/ip.h>
#include <linux/spinlock.h>
#include <linux/icmpv6.h>
#include <net/udp.h>
#include <net/tcp.h>
#include <net/ipv6.h>
#include <linux/netfilter.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <net/netfilter/nf_log.h>
#include <net/netfilter/xt_log.h>
MODULE_AUTHOR("Jan Rekorajski <baggins@pld.org.pl>");
MODULE_DESCRIPTION("Xtables: IPv6 packet logging to syslog");
MODULE_LICENSE("GPL");
struct in_device;
#include <net/route.h>
#include <linux/netfilter_ipv6/ip6t_LOG.h>
/* One level of recursion won't kill us */
static void dump_packet(struct sbuff *m,
const struct nf_loginfo *info,
const struct sk_buff *skb, unsigned int ip6hoff,
int recurse)
{
u_int8_t currenthdr;
int fragment;
struct ipv6hdr _ip6h;
const struct ipv6hdr *ih;
unsigned int ptr;
unsigned int hdrlen = 0;
unsigned int logflags;
if (info->type == NF_LOG_TYPE_LOG)
logflags = info->u.log.logflags;
else
logflags = NF_LOG_MASK;
ih = skb_header_pointer(skb, ip6hoff, sizeof(_ip6h), &_ip6h);
if (ih == NULL) {
sb_add(m, "TRUNCATED");
return;
}
/* Max length: 88 "SRC=0000.0000.0000.0000.0000.0000.0000.0000 DST=0000.0000.0000.0000.0000.0000.0000.0000 " */
sb_add(m, "SRC=%pI6 DST=%pI6 ", &ih->saddr, &ih->daddr);
/* Max length: 44 "LEN=65535 TC=255 HOPLIMIT=255 FLOWLBL=FFFFF " */
sb_add(m, "LEN=%Zu TC=%u HOPLIMIT=%u FLOWLBL=%u ",
ntohs(ih->payload_len) + sizeof(struct ipv6hdr),
(ntohl(*(__be32 *)ih) & 0x0ff00000) >> 20,
ih->hop_limit,
(ntohl(*(__be32 *)ih) & 0x000fffff));
fragment = 0;
ptr = ip6hoff + sizeof(struct ipv6hdr);
currenthdr = ih->nexthdr;
while (currenthdr != NEXTHDR_NONE && ip6t_ext_hdr(currenthdr)) {
struct ipv6_opt_hdr _hdr;
const struct ipv6_opt_hdr *hp;
hp = skb_header_pointer(skb, ptr, sizeof(_hdr), &_hdr);
if (hp == NULL) {
sb_add(m, "TRUNCATED");
return;
}
/* Max length: 48 "OPT (...) " */
if (logflags & IP6T_LOG_IPOPT)
sb_add(m, "OPT ( ");
switch (currenthdr) {
case IPPROTO_FRAGMENT: {
struct frag_hdr _fhdr;
const struct frag_hdr *fh;
sb_add(m, "FRAG:");
fh = skb_header_pointer(skb, ptr, sizeof(_fhdr),
&_fhdr);
if (fh == NULL) {
sb_add(m, "TRUNCATED ");
return;
}
/* Max length: 6 "65535 " */
sb_add(m, "%u ", ntohs(fh->frag_off) & 0xFFF8);
/* Max length: 11 "INCOMPLETE " */
if (fh->frag_off & htons(0x0001))
sb_add(m, "INCOMPLETE ");
sb_add(m, "ID:%08x ", ntohl(fh->identification));
if (ntohs(fh->frag_off) & 0xFFF8)
fragment = 1;
hdrlen = 8;
break;
}
case IPPROTO_DSTOPTS:
case IPPROTO_ROUTING:
case IPPROTO_HOPOPTS:
if (fragment) {
if (logflags & IP6T_LOG_IPOPT)
sb_add(m, ")");
return;
}
hdrlen = ipv6_optlen(hp);
break;
/* Max Length */
case IPPROTO_AH:
if (logflags & IP6T_LOG_IPOPT) {
struct ip_auth_hdr _ahdr;
const struct ip_auth_hdr *ah;
/* Max length: 3 "AH " */
sb_add(m, "AH ");
if (fragment) {
sb_add(m, ")");
return;
}
ah = skb_header_pointer(skb, ptr, sizeof(_ahdr),
&_ahdr);
if (ah == NULL) {
/*
* Max length: 26 "INCOMPLETE [65535
* bytes] )"
*/
sb_add(m, "INCOMPLETE [%u bytes] )",
skb->len - ptr);
return;
}
/* Length: 15 "SPI=0xF1234567 */
sb_add(m, "SPI=0x%x ", ntohl(ah->spi));
}
hdrlen = (hp->hdrlen+2)<<2;
break;
case IPPROTO_ESP:
if (logflags & IP6T_LOG_IPOPT) {
struct ip_esp_hdr _esph;
const struct ip_esp_hdr *eh;
/* Max length: 4 "ESP " */
sb_add(m, "ESP ");
if (fragment) {
sb_add(m, ")");
return;
}
/*
* Max length: 26 "INCOMPLETE [65535 bytes] )"
*/
eh = skb_header_pointer(skb, ptr, sizeof(_esph),
&_esph);
if (eh == NULL) {
sb_add(m, "INCOMPLETE [%u bytes] )",
skb->len - ptr);
return;
}
/* Length: 16 "SPI=0xF1234567 )" */
sb_add(m, "SPI=0x%x )", ntohl(eh->spi) );
}
return;
default:
/* Max length: 20 "Unknown Ext Hdr 255" */
sb_add(m, "Unknown Ext Hdr %u", currenthdr);
return;
}
if (logflags & IP6T_LOG_IPOPT)
sb_add(m, ") ");
currenthdr = hp->nexthdr;
ptr += hdrlen;
}
switch (currenthdr) {
case IPPROTO_TCP: {
struct tcphdr _tcph;
const struct tcphdr *th;
/* Max length: 10 "PROTO=TCP " */
sb_add(m, "PROTO=TCP ");
if (fragment)
break;
/* Max length: 25 "INCOMPLETE [65535 bytes] " */
th = skb_header_pointer(skb, ptr, sizeof(_tcph), &_tcph);
if (th == NULL) {
sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - ptr);
return;
}
/* Max length: 20 "SPT=65535 DPT=65535 " */
sb_add(m, "SPT=%u DPT=%u ",
ntohs(th->source), ntohs(th->dest));
/* Max length: 30 "SEQ=4294967295 ACK=4294967295 " */
if (logflags & IP6T_LOG_TCPSEQ)
sb_add(m, "SEQ=%u ACK=%u ",
ntohl(th->seq), ntohl(th->ack_seq));
/* Max length: 13 "WINDOW=65535 " */
sb_add(m, "WINDOW=%u ", ntohs(th->window));
/* Max length: 9 "RES=0x3C " */
sb_add(m, "RES=0x%02x ", (u_int8_t)(ntohl(tcp_flag_word(th) & TCP_RESERVED_BITS) >> 22));
/* Max length: 32 "CWR ECE URG ACK PSH RST SYN FIN " */
if (th->cwr)
sb_add(m, "CWR ");
if (th->ece)
sb_add(m, "ECE ");
if (th->urg)
sb_add(m, "URG ");
if (th->ack)
sb_add(m, "ACK ");
if (th->psh)
sb_add(m, "PSH ");
if (th->rst)
sb_add(m, "RST ");
if (th->syn)
sb_add(m, "SYN ");
if (th->fin)
sb_add(m, "FIN ");
/* Max length: 11 "URGP=65535 " */
sb_add(m, "URGP=%u ", ntohs(th->urg_ptr));
if ((logflags & IP6T_LOG_TCPOPT) &&
th->doff * 4 > sizeof(struct tcphdr)) {
u_int8_t _opt[60 - sizeof(struct tcphdr)];
const u_int8_t *op;
unsigned int i;
unsigned int optsize = th->doff * 4
- sizeof(struct tcphdr);
op = skb_header_pointer(skb,
ptr + sizeof(struct tcphdr),
optsize, _opt);
if (op == NULL) {
sb_add(m, "OPT (TRUNCATED)");
return;
}
/* Max length: 127 "OPT (" 15*4*2chars ") " */
sb_add(m, "OPT (");
for (i =0; i < optsize; i++)
sb_add(m, "%02X", op[i]);
sb_add(m, ") ");
}
break;
}
case IPPROTO_UDP:
case IPPROTO_UDPLITE: {
struct udphdr _udph;
const struct udphdr *uh;
if (currenthdr == IPPROTO_UDP)
/* Max length: 10 "PROTO=UDP " */
sb_add(m, "PROTO=UDP " );
else /* Max length: 14 "PROTO=UDPLITE " */
sb_add(m, "PROTO=UDPLITE ");
if (fragment)
break;
/* Max length: 25 "INCOMPLETE [65535 bytes] " */
uh = skb_header_pointer(skb, ptr, sizeof(_udph), &_udph);
if (uh == NULL) {
sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - ptr);
return;
}
/* Max length: 20 "SPT=65535 DPT=65535 " */
sb_add(m, "SPT=%u DPT=%u LEN=%u ",
ntohs(uh->source), ntohs(uh->dest),
ntohs(uh->len));
break;
}
case IPPROTO_ICMPV6: {
struct icmp6hdr _icmp6h;
const struct icmp6hdr *ic;
/* Max length: 13 "PROTO=ICMPv6 " */
sb_add(m, "PROTO=ICMPv6 ");
if (fragment)
break;
/* Max length: 25 "INCOMPLETE [65535 bytes] " */
ic = skb_header_pointer(skb, ptr, sizeof(_icmp6h), &_icmp6h);
if (ic == NULL) {
sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - ptr);
return;
}
/* Max length: 18 "TYPE=255 CODE=255 " */
sb_add(m, "TYPE=%u CODE=%u ", ic->icmp6_type, ic->icmp6_code);
switch (ic->icmp6_type) {
case ICMPV6_ECHO_REQUEST:
case ICMPV6_ECHO_REPLY:
/* Max length: 19 "ID=65535 SEQ=65535 " */
sb_add(m, "ID=%u SEQ=%u ",
ntohs(ic->icmp6_identifier),
ntohs(ic->icmp6_sequence));
break;
case ICMPV6_MGM_QUERY:
case ICMPV6_MGM_REPORT:
case ICMPV6_MGM_REDUCTION:
break;
case ICMPV6_PARAMPROB:
/* Max length: 17 "POINTER=ffffffff " */
sb_add(m, "POINTER=%08x ", ntohl(ic->icmp6_pointer));
/* Fall through */
case ICMPV6_DEST_UNREACH:
case ICMPV6_PKT_TOOBIG:
case ICMPV6_TIME_EXCEED:
/* Max length: 3+maxlen */
if (recurse) {
sb_add(m, "[");
dump_packet(m, info, skb,
ptr + sizeof(_icmp6h), 0);
sb_add(m, "] ");
}
/* Max length: 10 "MTU=65535 " */
if (ic->icmp6_type == ICMPV6_PKT_TOOBIG)
sb_add(m, "MTU=%u ", ntohl(ic->icmp6_mtu));
}
break;
}
/* Max length: 10 "PROTO=255 " */
default:
sb_add(m, "PROTO=%u ", currenthdr);
}
/* Max length: 15 "UID=4294967295 " */
if ((logflags & IP6T_LOG_UID) && recurse && skb->sk) {
read_lock_bh(&skb->sk->sk_callback_lock);
if (skb->sk->sk_socket && skb->sk->sk_socket->file)
sb_add(m, "UID=%u GID=%u ",
skb->sk->sk_socket->file->f_cred->fsuid,
skb->sk->sk_socket->file->f_cred->fsgid);
read_unlock_bh(&skb->sk->sk_callback_lock);
}
/* Max length: 16 "MARK=0xFFFFFFFF " */
if (!recurse && skb->mark)
sb_add(m, "MARK=0x%x ", skb->mark);
}
static void dump_mac_header(struct sbuff *m,
const struct nf_loginfo *info,
const struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
unsigned int logflags = 0;
if (info->type == NF_LOG_TYPE_LOG)
logflags = info->u.log.logflags;
if (!(logflags & IP6T_LOG_MACDECODE))
goto fallback;
switch (dev->type) {
case ARPHRD_ETHER:
sb_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
ntohs(eth_hdr(skb)->h_proto));
return;
default:
break;
}
fallback:
sb_add(m, "MAC=");
if (dev->hard_header_len &&
skb->mac_header != skb->network_header) {
const unsigned char *p = skb_mac_header(skb);
unsigned int len = dev->hard_header_len;
unsigned int i;
if (dev->type == ARPHRD_SIT &&
(p -= ETH_HLEN) < skb->head)
p = NULL;
if (p != NULL) {
sb_add(m, "%02x", *p++);
for (i = 1; i < len; i++)
sb_add(m, ":%02x", *p++);
}
sb_add(m, " ");
if (dev->type == ARPHRD_SIT) {
const struct iphdr *iph =
(struct iphdr *)skb_mac_header(skb);
sb_add(m, "TUNNEL=%pI4->%pI4 ", &iph->saddr, &iph->daddr);
}
} else
sb_add(m, " ");
}
static struct nf_loginfo default_loginfo = {
.type = NF_LOG_TYPE_LOG,
.u = {
.log = {
.level = 5,
.logflags = NF_LOG_MASK,
},
},
};
static void
ip6t_log_packet(u_int8_t pf,
unsigned int hooknum,
const struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
const struct nf_loginfo *loginfo,
const char *prefix)
{
struct sbuff *m = sb_open();
if (!loginfo)
loginfo = &default_loginfo;
sb_add(m, "<%d>%sIN=%s OUT=%s ", loginfo->u.log.level,
prefix,
in ? in->name : "",
out ? out->name : "");
if (in != NULL)
dump_mac_header(m, loginfo, skb);
dump_packet(m, loginfo, skb, skb_network_offset(skb), 1);
sb_close(m);
}
static unsigned int
log_tg6(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ip6t_log_info *loginfo = par->targinfo;
struct nf_loginfo li;
li.type = NF_LOG_TYPE_LOG;
li.u.log.level = loginfo->level;
li.u.log.logflags = loginfo->logflags;
ip6t_log_packet(NFPROTO_IPV6, par->hooknum, skb, par->in, par->out,
&li, loginfo->prefix);
return XT_CONTINUE;
}
static int log_tg6_check(const struct xt_tgchk_param *par)
{
const struct ip6t_log_info *loginfo = par->targinfo;
if (loginfo->level >= 8) {
pr_debug("level %u >= 8\n", loginfo->level);
return -EINVAL;
}
if (loginfo->prefix[sizeof(loginfo->prefix)-1] != '\0') {
pr_debug("prefix not null-terminated\n");
return -EINVAL;
}
return 0;
}
static struct xt_target log_tg6_reg __read_mostly = {
.name = "LOG",
.family = NFPROTO_IPV6,
.target = log_tg6,
.targetsize = sizeof(struct ip6t_log_info),
.checkentry = log_tg6_check,
.me = THIS_MODULE,
};
static struct nf_logger ip6t_logger __read_mostly = {
.name = "ip6t_LOG",
.logfn = &ip6t_log_packet,
.me = THIS_MODULE,
};
static int __init log_tg6_init(void)
{
int ret;
ret = xt_register_target(&log_tg6_reg);
if (ret < 0)
return ret;
nf_log_register(NFPROTO_IPV6, &ip6t_logger);
return 0;
}
static void __exit log_tg6_exit(void)
{
nf_log_unregister(&ip6t_logger);
xt_unregister_target(&log_tg6_reg);
}
module_init(log_tg6_init);
module_exit(log_tg6_exit);
| gpl-2.0 |
MI2S/android_kernel_xiaomi_aries | arch/x86/pci/mrst.c | 4737 | 9076 | /*
* Moorestown PCI support
* Copyright (c) 2008 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
*
* Moorestown has an interesting PCI implementation:
* - configuration space is memory mapped (as defined by MCFG)
* - Lincroft devices also have a real, type 1 configuration space
* - Early Lincroft silicon has a type 1 access bug that will cause
* a hang if non-existent devices are accessed
* - some devices have the "fixed BAR" capability, which means
* they can't be relocated or modified; check for that during
* BAR sizing
*
* So, we use the MCFG space for all reads and writes, but also send
* Lincroft writes to type 1 space. But only read/write if the device
* actually exists, otherwise return all 1s for reads and bit bucket
* the writes.
*/
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/dmi.h>
#include <asm/acpi.h>
#include <asm/segment.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/pci_x86.h>
#include <asm/hw_irq.h>
#include <asm/io_apic.h>
#define PCIE_CAP_OFFSET 0x100
/* Fixed BAR fields */
#define PCIE_VNDR_CAP_ID_FIXED_BAR 0x00 /* Fixed BAR (TBD) */
#define PCI_FIXED_BAR_0_SIZE 0x04
#define PCI_FIXED_BAR_1_SIZE 0x08
#define PCI_FIXED_BAR_2_SIZE 0x0c
#define PCI_FIXED_BAR_3_SIZE 0x10
#define PCI_FIXED_BAR_4_SIZE 0x14
#define PCI_FIXED_BAR_5_SIZE 0x1c
static int pci_soc_mode = 0;
/**
* fixed_bar_cap - return the offset of the fixed BAR cap if found
* @bus: PCI bus
* @devfn: device in question
*
* Look for the fixed BAR cap on @bus and @devfn, returning its offset
* if found or 0 otherwise.
*/
static int fixed_bar_cap(struct pci_bus *bus, unsigned int devfn)
{
int pos;
u32 pcie_cap = 0, cap_data;
pos = PCIE_CAP_OFFSET;
if (!raw_pci_ext_ops)
return 0;
while (pos) {
if (raw_pci_ext_ops->read(pci_domain_nr(bus), bus->number,
devfn, pos, 4, &pcie_cap))
return 0;
if (PCI_EXT_CAP_ID(pcie_cap) == 0x0000 ||
PCI_EXT_CAP_ID(pcie_cap) == 0xffff)
break;
if (PCI_EXT_CAP_ID(pcie_cap) == PCI_EXT_CAP_ID_VNDR) {
raw_pci_ext_ops->read(pci_domain_nr(bus), bus->number,
devfn, pos + 4, 4, &cap_data);
if ((cap_data & 0xffff) == PCIE_VNDR_CAP_ID_FIXED_BAR)
return pos;
}
pos = PCI_EXT_CAP_NEXT(pcie_cap);
}
return 0;
}
static int pci_device_update_fixed(struct pci_bus *bus, unsigned int devfn,
int reg, int len, u32 val, int offset)
{
u32 size;
unsigned int domain, busnum;
int bar = (reg - PCI_BASE_ADDRESS_0) >> 2;
domain = pci_domain_nr(bus);
busnum = bus->number;
if (val == ~0 && len == 4) {
unsigned long decode;
raw_pci_ext_ops->read(domain, busnum, devfn,
offset + 8 + (bar * 4), 4, &size);
/* Turn the size into a decode pattern for the sizing code */
if (size) {
decode = size - 1;
decode |= decode >> 1;
decode |= decode >> 2;
decode |= decode >> 4;
decode |= decode >> 8;
decode |= decode >> 16;
decode++;
decode = ~(decode - 1);
} else {
decode = 0;
}
/*
* If val is all ones, the core code is trying to size the reg,
* so update the mmconfig space with the real size.
*
* Note: this assumes the fixed size we got is a power of two.
*/
return raw_pci_ext_ops->write(domain, busnum, devfn, reg, 4,
decode);
}
/* This is some other kind of BAR write, so just do it. */
return raw_pci_ext_ops->write(domain, busnum, devfn, reg, len, val);
}
/**
* type1_access_ok - check whether to use type 1
* @bus: bus number
* @devfn: device & function in question
*
* If the bus is on a Lincroft chip and it exists, or is not on a Lincroft at
* all, the we can go ahead with any reads & writes. If it's on a Lincroft,
* but doesn't exist, avoid the access altogether to keep the chip from
* hanging.
*/
static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg)
{
/* This is a workaround for A0 LNC bug where PCI status register does
* not have new CAP bit set. can not be written by SW either.
*
* PCI header type in real LNC indicates a single function device, this
* will prevent probing other devices under the same function in PCI
* shim. Therefore, use the header type in shim instead.
*/
if (reg >= 0x100 || reg == PCI_STATUS || reg == PCI_HEADER_TYPE)
return 0;
if (bus == 0 && (devfn == PCI_DEVFN(2, 0)
|| devfn == PCI_DEVFN(0, 0)
|| devfn == PCI_DEVFN(3, 0)))
return 1;
return 0; /* langwell on others */
}
static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 *value)
{
if (type1_access_ok(bus->number, devfn, where))
return pci_direct_conf1.read(pci_domain_nr(bus), bus->number,
devfn, where, size, value);
return raw_pci_ext_ops->read(pci_domain_nr(bus), bus->number,
devfn, where, size, value);
}
static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 value)
{
int offset;
/* On MRST, there is no PCI ROM BAR, this will cause a subsequent read
* to ROM BAR return 0 then being ignored.
*/
if (where == PCI_ROM_ADDRESS)
return 0;
/*
* Devices with fixed BARs need special handling:
* - BAR sizing code will save, write ~0, read size, restore
* - so writes to fixed BARs need special handling
* - other writes to fixed BAR devices should go through mmconfig
*/
offset = fixed_bar_cap(bus, devfn);
if (offset &&
(where >= PCI_BASE_ADDRESS_0 && where <= PCI_BASE_ADDRESS_5)) {
return pci_device_update_fixed(bus, devfn, where, size, value,
offset);
}
/*
* On Moorestown update both real & mmconfig space
* Note: early Lincroft silicon can't handle type 1 accesses to
* non-existent devices, so just eat the write in that case.
*/
if (type1_access_ok(bus->number, devfn, where))
return pci_direct_conf1.write(pci_domain_nr(bus), bus->number,
devfn, where, size, value);
return raw_pci_ext_ops->write(pci_domain_nr(bus), bus->number, devfn,
where, size, value);
}
static int mrst_pci_irq_enable(struct pci_dev *dev)
{
u8 pin;
struct io_apic_irq_attr irq_attr;
pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
/* MRST only have IOAPIC, the PCI irq lines are 1:1 mapped to
* IOAPIC RTE entries, so we just enable RTE for the device.
*/
irq_attr.ioapic = mp_find_ioapic(dev->irq);
irq_attr.ioapic_pin = dev->irq;
irq_attr.trigger = 1; /* level */
irq_attr.polarity = 1; /* active low */
io_apic_set_pci_routing(&dev->dev, dev->irq, &irq_attr);
return 0;
}
struct pci_ops pci_mrst_ops = {
.read = pci_read,
.write = pci_write,
};
/**
* pci_mrst_init - installs pci_mrst_ops
*
* Moorestown has an interesting PCI implementation (see above).
* Called when the early platform detection installs it.
*/
int __init pci_mrst_init(void)
{
printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
pci_mmcfg_late_init();
pcibios_enable_irq = mrst_pci_irq_enable;
pci_root_ops = pci_mrst_ops;
pci_soc_mode = 1;
/* Continue with standard init */
return 1;
}
/* Langwell devices are not true pci devices, they are not subject to 10 ms
* d3 to d0 delay required by pci spec.
*/
static void __devinit pci_d3delay_fixup(struct pci_dev *dev)
{
/* PCI fixups are effectively decided compile time. If we have a dual
SoC/non-SoC kernel we don't want to mangle d3 on non SoC devices */
if (!pci_soc_mode)
return;
/* true pci devices in lincroft should allow type 1 access, the rest
* are langwell fake pci devices.
*/
if (type1_access_ok(dev->bus->number, dev->devfn, PCI_DEVICE_ID))
return;
dev->d3_delay = 0;
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_d3delay_fixup);
static void __devinit mrst_power_off_unused_dev(struct pci_dev *dev)
{
pci_set_power_state(dev, PCI_D3cold);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0801, mrst_power_off_unused_dev);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0809, mrst_power_off_unused_dev);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x080C, mrst_power_off_unused_dev);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0812, mrst_power_off_unused_dev);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0815, mrst_power_off_unused_dev);
/*
* Langwell devices reside at fixed offsets, don't try to move them.
*/
static void __devinit pci_fixed_bar_fixup(struct pci_dev *dev)
{
unsigned long offset;
u32 size;
int i;
if (!pci_soc_mode)
return;
/* Must have extended configuration space */
if (dev->cfg_size < PCIE_CAP_OFFSET + 4)
return;
/* Fixup the BAR sizes for fixed BAR devices and make them unmoveable */
offset = fixed_bar_cap(dev->bus, dev->devfn);
if (!offset || PCI_DEVFN(2, 0) == dev->devfn ||
PCI_DEVFN(2, 2) == dev->devfn)
return;
for (i = 0; i < PCI_ROM_RESOURCE; i++) {
pci_read_config_dword(dev, offset + 8 + (i * 4), &size);
dev->resource[i].end = dev->resource[i].start + size - 1;
dev->resource[i].flags |= IORESOURCE_PCI_FIXED;
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_fixed_bar_fixup);
| gpl-2.0 |
danielpanzella/P900-kernel-source | fs/proc/uptime.c | 7297 | 1305 | #include <linux/fs.h>
#include <linux/init.h>
#include <linux/proc_fs.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/time.h>
#include <linux/kernel_stat.h>
#include <asm/cputime.h>
static int uptime_proc_show(struct seq_file *m, void *v)
{
struct timespec uptime;
struct timespec idle;
u64 idletime;
u64 nsec;
u32 rem;
int i;
idletime = 0;
for_each_possible_cpu(i)
idletime += (__force u64) kcpustat_cpu(i).cpustat[CPUTIME_IDLE];
do_posix_clock_monotonic_gettime(&uptime);
monotonic_to_bootbased(&uptime);
nsec = cputime64_to_jiffies64(idletime) * TICK_NSEC;
idle.tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
idle.tv_nsec = rem;
seq_printf(m, "%lu.%02lu %lu.%02lu\n",
(unsigned long) uptime.tv_sec,
(uptime.tv_nsec / (NSEC_PER_SEC / 100)),
(unsigned long) idle.tv_sec,
(idle.tv_nsec / (NSEC_PER_SEC / 100)));
return 0;
}
static int uptime_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, uptime_proc_show, NULL);
}
static const struct file_operations uptime_proc_fops = {
.open = uptime_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int __init proc_uptime_init(void)
{
proc_create("uptime", 0, NULL, &uptime_proc_fops);
return 0;
}
module_init(proc_uptime_init);
| gpl-2.0 |
realthunder/a33_linux | fs/reiserfs/fix_node.c | 7297 | 79019 | /*
* Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
*/
/**
** old_item_num
** old_entry_num
** set_entry_sizes
** create_virtual_node
** check_left
** check_right
** directory_part_size
** get_num_ver
** set_parameters
** is_leaf_removable
** are_leaves_removable
** get_empty_nodes
** get_lfree
** get_rfree
** is_left_neighbor_in_cache
** decrement_key
** get_far_parent
** get_parents
** can_node_be_removed
** ip_check_balance
** dc_check_balance_internal
** dc_check_balance_leaf
** dc_check_balance
** check_balance
** get_direct_parent
** get_neighbors
** fix_nodes
**
**
**/
#include <linux/time.h>
#include <linux/slab.h>
#include <linux/string.h>
#include "reiserfs.h"
#include <linux/buffer_head.h>
/* To make any changes in the tree we find a node, that contains item
to be changed/deleted or position in the node we insert a new item
to. We call this node S. To do balancing we need to decide what we
will shift to left/right neighbor, or to a new node, where new item
will be etc. To make this analysis simpler we build virtual
node. Virtual node is an array of items, that will replace items of
node S. (For instance if we are going to delete an item, virtual
node does not contain it). Virtual node keeps information about
item sizes and types, mergeability of first and last items, sizes
of all entries in directory item. We use this array of items when
calculating what we can shift to neighbors and how many nodes we
have to have if we do not any shiftings, if we shift to left/right
neighbor or to both. */
/* taking item number in virtual node, returns number of item, that it has in source buffer */
static inline int old_item_num(int new_num, int affected_item_num, int mode)
{
if (mode == M_PASTE || mode == M_CUT || new_num < affected_item_num)
return new_num;
if (mode == M_INSERT) {
RFALSE(new_num == 0,
"vs-8005: for INSERT mode and item number of inserted item");
return new_num - 1;
}
RFALSE(mode != M_DELETE,
"vs-8010: old_item_num: mode must be M_DELETE (mode = \'%c\'",
mode);
/* delete mode */
return new_num + 1;
}
static void create_virtual_node(struct tree_balance *tb, int h)
{
struct item_head *ih;
struct virtual_node *vn = tb->tb_vn;
int new_num;
struct buffer_head *Sh; /* this comes from tb->S[h] */
Sh = PATH_H_PBUFFER(tb->tb_path, h);
/* size of changed node */
vn->vn_size =
MAX_CHILD_SIZE(Sh) - B_FREE_SPACE(Sh) + tb->insert_size[h];
/* for internal nodes array if virtual items is not created */
if (h) {
vn->vn_nr_item = (vn->vn_size - DC_SIZE) / (DC_SIZE + KEY_SIZE);
return;
}
/* number of items in virtual node */
vn->vn_nr_item =
B_NR_ITEMS(Sh) + ((vn->vn_mode == M_INSERT) ? 1 : 0) -
((vn->vn_mode == M_DELETE) ? 1 : 0);
/* first virtual item */
vn->vn_vi = (struct virtual_item *)(tb->tb_vn + 1);
memset(vn->vn_vi, 0, vn->vn_nr_item * sizeof(struct virtual_item));
vn->vn_free_ptr += vn->vn_nr_item * sizeof(struct virtual_item);
/* first item in the node */
ih = B_N_PITEM_HEAD(Sh, 0);
/* define the mergeability for 0-th item (if it is not being deleted) */
if (op_is_left_mergeable(&(ih->ih_key), Sh->b_size)
&& (vn->vn_mode != M_DELETE || vn->vn_affected_item_num))
vn->vn_vi[0].vi_type |= VI_TYPE_LEFT_MERGEABLE;
/* go through all items those remain in the virtual node (except for the new (inserted) one) */
for (new_num = 0; new_num < vn->vn_nr_item; new_num++) {
int j;
struct virtual_item *vi = vn->vn_vi + new_num;
int is_affected =
((new_num != vn->vn_affected_item_num) ? 0 : 1);
if (is_affected && vn->vn_mode == M_INSERT)
continue;
/* get item number in source node */
j = old_item_num(new_num, vn->vn_affected_item_num,
vn->vn_mode);
vi->vi_item_len += ih_item_len(ih + j) + IH_SIZE;
vi->vi_ih = ih + j;
vi->vi_item = B_I_PITEM(Sh, ih + j);
vi->vi_uarea = vn->vn_free_ptr;
// FIXME: there is no check, that item operation did not
// consume too much memory
vn->vn_free_ptr +=
op_create_vi(vn, vi, is_affected, tb->insert_size[0]);
if (tb->vn_buf + tb->vn_buf_size < vn->vn_free_ptr)
reiserfs_panic(tb->tb_sb, "vs-8030",
"virtual node space consumed");
if (!is_affected)
/* this is not being changed */
continue;
if (vn->vn_mode == M_PASTE || vn->vn_mode == M_CUT) {
vn->vn_vi[new_num].vi_item_len += tb->insert_size[0];
vi->vi_new_data = vn->vn_data; // pointer to data which is going to be pasted
}
}
/* virtual inserted item is not defined yet */
if (vn->vn_mode == M_INSERT) {
struct virtual_item *vi = vn->vn_vi + vn->vn_affected_item_num;
RFALSE(vn->vn_ins_ih == NULL,
"vs-8040: item header of inserted item is not specified");
vi->vi_item_len = tb->insert_size[0];
vi->vi_ih = vn->vn_ins_ih;
vi->vi_item = vn->vn_data;
vi->vi_uarea = vn->vn_free_ptr;
op_create_vi(vn, vi, 0 /*not pasted or cut */ ,
tb->insert_size[0]);
}
/* set right merge flag we take right delimiting key and check whether it is a mergeable item */
if (tb->CFR[0]) {
struct reiserfs_key *key;
key = B_N_PDELIM_KEY(tb->CFR[0], tb->rkey[0]);
if (op_is_left_mergeable(key, Sh->b_size)
&& (vn->vn_mode != M_DELETE
|| vn->vn_affected_item_num != B_NR_ITEMS(Sh) - 1))
vn->vn_vi[vn->vn_nr_item - 1].vi_type |=
VI_TYPE_RIGHT_MERGEABLE;
#ifdef CONFIG_REISERFS_CHECK
if (op_is_left_mergeable(key, Sh->b_size) &&
!(vn->vn_mode != M_DELETE
|| vn->vn_affected_item_num != B_NR_ITEMS(Sh) - 1)) {
/* we delete last item and it could be merged with right neighbor's first item */
if (!
(B_NR_ITEMS(Sh) == 1
&& is_direntry_le_ih(B_N_PITEM_HEAD(Sh, 0))
&& I_ENTRY_COUNT(B_N_PITEM_HEAD(Sh, 0)) == 1)) {
/* node contains more than 1 item, or item is not directory item, or this item contains more than 1 entry */
print_block(Sh, 0, -1, -1);
reiserfs_panic(tb->tb_sb, "vs-8045",
"rdkey %k, affected item==%d "
"(mode==%c) Must be %c",
key, vn->vn_affected_item_num,
vn->vn_mode, M_DELETE);
}
}
#endif
}
}
/* using virtual node check, how many items can be shifted to left
neighbor */
static void check_left(struct tree_balance *tb, int h, int cur_free)
{
int i;
struct virtual_node *vn = tb->tb_vn;
struct virtual_item *vi;
int d_size, ih_size;
RFALSE(cur_free < 0, "vs-8050: cur_free (%d) < 0", cur_free);
/* internal level */
if (h > 0) {
tb->lnum[h] = cur_free / (DC_SIZE + KEY_SIZE);
return;
}
/* leaf level */
if (!cur_free || !vn->vn_nr_item) {
/* no free space or nothing to move */
tb->lnum[h] = 0;
tb->lbytes = -1;
return;
}
RFALSE(!PATH_H_PPARENT(tb->tb_path, 0),
"vs-8055: parent does not exist or invalid");
vi = vn->vn_vi;
if ((unsigned int)cur_free >=
(vn->vn_size -
((vi->vi_type & VI_TYPE_LEFT_MERGEABLE) ? IH_SIZE : 0))) {
/* all contents of S[0] fits into L[0] */
RFALSE(vn->vn_mode == M_INSERT || vn->vn_mode == M_PASTE,
"vs-8055: invalid mode or balance condition failed");
tb->lnum[0] = vn->vn_nr_item;
tb->lbytes = -1;
return;
}
d_size = 0, ih_size = IH_SIZE;
/* first item may be merge with last item in left neighbor */
if (vi->vi_type & VI_TYPE_LEFT_MERGEABLE)
d_size = -((int)IH_SIZE), ih_size = 0;
tb->lnum[0] = 0;
for (i = 0; i < vn->vn_nr_item;
i++, ih_size = IH_SIZE, d_size = 0, vi++) {
d_size += vi->vi_item_len;
if (cur_free >= d_size) {
/* the item can be shifted entirely */
cur_free -= d_size;
tb->lnum[0]++;
continue;
}
/* the item cannot be shifted entirely, try to split it */
/* check whether L[0] can hold ih and at least one byte of the item body */
if (cur_free <= ih_size) {
/* cannot shift even a part of the current item */
tb->lbytes = -1;
return;
}
cur_free -= ih_size;
tb->lbytes = op_check_left(vi, cur_free, 0, 0);
if (tb->lbytes != -1)
/* count partially shifted item */
tb->lnum[0]++;
break;
}
return;
}
/* using virtual node check, how many items can be shifted to right
neighbor */
static void check_right(struct tree_balance *tb, int h, int cur_free)
{
int i;
struct virtual_node *vn = tb->tb_vn;
struct virtual_item *vi;
int d_size, ih_size;
RFALSE(cur_free < 0, "vs-8070: cur_free < 0");
/* internal level */
if (h > 0) {
tb->rnum[h] = cur_free / (DC_SIZE + KEY_SIZE);
return;
}
/* leaf level */
if (!cur_free || !vn->vn_nr_item) {
/* no free space */
tb->rnum[h] = 0;
tb->rbytes = -1;
return;
}
RFALSE(!PATH_H_PPARENT(tb->tb_path, 0),
"vs-8075: parent does not exist or invalid");
vi = vn->vn_vi + vn->vn_nr_item - 1;
if ((unsigned int)cur_free >=
(vn->vn_size -
((vi->vi_type & VI_TYPE_RIGHT_MERGEABLE) ? IH_SIZE : 0))) {
/* all contents of S[0] fits into R[0] */
RFALSE(vn->vn_mode == M_INSERT || vn->vn_mode == M_PASTE,
"vs-8080: invalid mode or balance condition failed");
tb->rnum[h] = vn->vn_nr_item;
tb->rbytes = -1;
return;
}
d_size = 0, ih_size = IH_SIZE;
/* last item may be merge with first item in right neighbor */
if (vi->vi_type & VI_TYPE_RIGHT_MERGEABLE)
d_size = -(int)IH_SIZE, ih_size = 0;
tb->rnum[0] = 0;
for (i = vn->vn_nr_item - 1; i >= 0;
i--, d_size = 0, ih_size = IH_SIZE, vi--) {
d_size += vi->vi_item_len;
if (cur_free >= d_size) {
/* the item can be shifted entirely */
cur_free -= d_size;
tb->rnum[0]++;
continue;
}
/* check whether R[0] can hold ih and at least one byte of the item body */
if (cur_free <= ih_size) { /* cannot shift even a part of the current item */
tb->rbytes = -1;
return;
}
/* R[0] can hold the header of the item and at least one byte of its body */
cur_free -= ih_size; /* cur_free is still > 0 */
tb->rbytes = op_check_right(vi, cur_free);
if (tb->rbytes != -1)
/* count partially shifted item */
tb->rnum[0]++;
break;
}
return;
}
/*
* from - number of items, which are shifted to left neighbor entirely
* to - number of item, which are shifted to right neighbor entirely
* from_bytes - number of bytes of boundary item (or directory entries) which are shifted to left neighbor
* to_bytes - number of bytes of boundary item (or directory entries) which are shifted to right neighbor */
static int get_num_ver(int mode, struct tree_balance *tb, int h,
int from, int from_bytes,
int to, int to_bytes, short *snum012, int flow)
{
int i;
int cur_free;
// int bytes;
int units;
struct virtual_node *vn = tb->tb_vn;
// struct virtual_item * vi;
int total_node_size, max_node_size, current_item_size;
int needed_nodes;
int start_item, /* position of item we start filling node from */
end_item, /* position of item we finish filling node by */
start_bytes, /* number of first bytes (entries for directory) of start_item-th item
we do not include into node that is being filled */
end_bytes; /* number of last bytes (entries for directory) of end_item-th item
we do node include into node that is being filled */
int split_item_positions[2]; /* these are positions in virtual item of
items, that are split between S[0] and
S1new and S1new and S2new */
split_item_positions[0] = -1;
split_item_positions[1] = -1;
/* We only create additional nodes if we are in insert or paste mode
or we are in replace mode at the internal level. If h is 0 and
the mode is M_REPLACE then in fix_nodes we change the mode to
paste or insert before we get here in the code. */
RFALSE(tb->insert_size[h] < 0 || (mode != M_INSERT && mode != M_PASTE),
"vs-8100: insert_size < 0 in overflow");
max_node_size = MAX_CHILD_SIZE(PATH_H_PBUFFER(tb->tb_path, h));
/* snum012 [0-2] - number of items, that lay
to S[0], first new node and second new node */
snum012[3] = -1; /* s1bytes */
snum012[4] = -1; /* s2bytes */
/* internal level */
if (h > 0) {
i = ((to - from) * (KEY_SIZE + DC_SIZE) + DC_SIZE);
if (i == max_node_size)
return 1;
return (i / max_node_size + 1);
}
/* leaf level */
needed_nodes = 1;
total_node_size = 0;
cur_free = max_node_size;
// start from 'from'-th item
start_item = from;
// skip its first 'start_bytes' units
start_bytes = ((from_bytes != -1) ? from_bytes : 0);
// last included item is the 'end_item'-th one
end_item = vn->vn_nr_item - to - 1;
// do not count last 'end_bytes' units of 'end_item'-th item
end_bytes = (to_bytes != -1) ? to_bytes : 0;
/* go through all item beginning from the start_item-th item and ending by
the end_item-th item. Do not count first 'start_bytes' units of
'start_item'-th item and last 'end_bytes' of 'end_item'-th item */
for (i = start_item; i <= end_item; i++) {
struct virtual_item *vi = vn->vn_vi + i;
int skip_from_end = ((i == end_item) ? end_bytes : 0);
RFALSE(needed_nodes > 3, "vs-8105: too many nodes are needed");
/* get size of current item */
current_item_size = vi->vi_item_len;
/* do not take in calculation head part (from_bytes) of from-th item */
current_item_size -=
op_part_size(vi, 0 /*from start */ , start_bytes);
/* do not take in calculation tail part of last item */
current_item_size -=
op_part_size(vi, 1 /*from end */ , skip_from_end);
/* if item fits into current node entierly */
if (total_node_size + current_item_size <= max_node_size) {
snum012[needed_nodes - 1]++;
total_node_size += current_item_size;
start_bytes = 0;
continue;
}
if (current_item_size > max_node_size) {
/* virtual item length is longer, than max size of item in
a node. It is impossible for direct item */
RFALSE(is_direct_le_ih(vi->vi_ih),
"vs-8110: "
"direct item length is %d. It can not be longer than %d",
current_item_size, max_node_size);
/* we will try to split it */
flow = 1;
}
if (!flow) {
/* as we do not split items, take new node and continue */
needed_nodes++;
i--;
total_node_size = 0;
continue;
}
// calculate number of item units which fit into node being
// filled
{
int free_space;
free_space = max_node_size - total_node_size - IH_SIZE;
units =
op_check_left(vi, free_space, start_bytes,
skip_from_end);
if (units == -1) {
/* nothing fits into current node, take new node and continue */
needed_nodes++, i--, total_node_size = 0;
continue;
}
}
/* something fits into the current node */
//if (snum012[3] != -1 || needed_nodes != 1)
// reiserfs_panic (tb->tb_sb, "vs-8115: get_num_ver: too many nodes required");
//snum012[needed_nodes - 1 + 3] = op_unit_num (vi) - start_bytes - units;
start_bytes += units;
snum012[needed_nodes - 1 + 3] = units;
if (needed_nodes > 2)
reiserfs_warning(tb->tb_sb, "vs-8111",
"split_item_position is out of range");
snum012[needed_nodes - 1]++;
split_item_positions[needed_nodes - 1] = i;
needed_nodes++;
/* continue from the same item with start_bytes != -1 */
start_item = i;
i--;
total_node_size = 0;
}
// sum012[4] (if it is not -1) contains number of units of which
// are to be in S1new, snum012[3] - to be in S0. They are supposed
// to be S1bytes and S2bytes correspondingly, so recalculate
if (snum012[4] > 0) {
int split_item_num;
int bytes_to_r, bytes_to_l;
int bytes_to_S1new;
split_item_num = split_item_positions[1];
bytes_to_l =
((from == split_item_num
&& from_bytes != -1) ? from_bytes : 0);
bytes_to_r =
((end_item == split_item_num
&& end_bytes != -1) ? end_bytes : 0);
bytes_to_S1new =
((split_item_positions[0] ==
split_item_positions[1]) ? snum012[3] : 0);
// s2bytes
snum012[4] =
op_unit_num(&vn->vn_vi[split_item_num]) - snum012[4] -
bytes_to_r - bytes_to_l - bytes_to_S1new;
if (vn->vn_vi[split_item_num].vi_index != TYPE_DIRENTRY &&
vn->vn_vi[split_item_num].vi_index != TYPE_INDIRECT)
reiserfs_warning(tb->tb_sb, "vs-8115",
"not directory or indirect item");
}
/* now we know S2bytes, calculate S1bytes */
if (snum012[3] > 0) {
int split_item_num;
int bytes_to_r, bytes_to_l;
int bytes_to_S2new;
split_item_num = split_item_positions[0];
bytes_to_l =
((from == split_item_num
&& from_bytes != -1) ? from_bytes : 0);
bytes_to_r =
((end_item == split_item_num
&& end_bytes != -1) ? end_bytes : 0);
bytes_to_S2new =
((split_item_positions[0] == split_item_positions[1]
&& snum012[4] != -1) ? snum012[4] : 0);
// s1bytes
snum012[3] =
op_unit_num(&vn->vn_vi[split_item_num]) - snum012[3] -
bytes_to_r - bytes_to_l - bytes_to_S2new;
}
return needed_nodes;
}
/* Set parameters for balancing.
* Performs write of results of analysis of balancing into structure tb,
* where it will later be used by the functions that actually do the balancing.
* Parameters:
* tb tree_balance structure;
* h current level of the node;
* lnum number of items from S[h] that must be shifted to L[h];
* rnum number of items from S[h] that must be shifted to R[h];
* blk_num number of blocks that S[h] will be splitted into;
* s012 number of items that fall into splitted nodes.
* lbytes number of bytes which flow to the left neighbor from the item that is not
* not shifted entirely
* rbytes number of bytes which flow to the right neighbor from the item that is not
* not shifted entirely
* s1bytes number of bytes which flow to the first new node when S[0] splits (this number is contained in s012 array)
*/
static void set_parameters(struct tree_balance *tb, int h, int lnum,
int rnum, int blk_num, short *s012, int lb, int rb)
{
tb->lnum[h] = lnum;
tb->rnum[h] = rnum;
tb->blknum[h] = blk_num;
if (h == 0) { /* only for leaf level */
if (s012 != NULL) {
tb->s0num = *s012++,
tb->s1num = *s012++, tb->s2num = *s012++;
tb->s1bytes = *s012++;
tb->s2bytes = *s012;
}
tb->lbytes = lb;
tb->rbytes = rb;
}
PROC_INFO_ADD(tb->tb_sb, lnum[h], lnum);
PROC_INFO_ADD(tb->tb_sb, rnum[h], rnum);
PROC_INFO_ADD(tb->tb_sb, lbytes[h], lb);
PROC_INFO_ADD(tb->tb_sb, rbytes[h], rb);
}
/* check, does node disappear if we shift tb->lnum[0] items to left
neighbor and tb->rnum[0] to the right one. */
static int is_leaf_removable(struct tree_balance *tb)
{
struct virtual_node *vn = tb->tb_vn;
int to_left, to_right;
int size;
int remain_items;
/* number of items, that will be shifted to left (right) neighbor
entirely */
to_left = tb->lnum[0] - ((tb->lbytes != -1) ? 1 : 0);
to_right = tb->rnum[0] - ((tb->rbytes != -1) ? 1 : 0);
remain_items = vn->vn_nr_item;
/* how many items remain in S[0] after shiftings to neighbors */
remain_items -= (to_left + to_right);
if (remain_items < 1) {
/* all content of node can be shifted to neighbors */
set_parameters(tb, 0, to_left, vn->vn_nr_item - to_left, 0,
NULL, -1, -1);
return 1;
}
if (remain_items > 1 || tb->lbytes == -1 || tb->rbytes == -1)
/* S[0] is not removable */
return 0;
/* check, whether we can divide 1 remaining item between neighbors */
/* get size of remaining item (in item units) */
size = op_unit_num(&(vn->vn_vi[to_left]));
if (tb->lbytes + tb->rbytes >= size) {
set_parameters(tb, 0, to_left + 1, to_right + 1, 0, NULL,
tb->lbytes, -1);
return 1;
}
return 0;
}
/* check whether L, S, R can be joined in one node */
static int are_leaves_removable(struct tree_balance *tb, int lfree, int rfree)
{
struct virtual_node *vn = tb->tb_vn;
int ih_size;
struct buffer_head *S0;
S0 = PATH_H_PBUFFER(tb->tb_path, 0);
ih_size = 0;
if (vn->vn_nr_item) {
if (vn->vn_vi[0].vi_type & VI_TYPE_LEFT_MERGEABLE)
ih_size += IH_SIZE;
if (vn->vn_vi[vn->vn_nr_item - 1].
vi_type & VI_TYPE_RIGHT_MERGEABLE)
ih_size += IH_SIZE;
} else {
/* there was only one item and it will be deleted */
struct item_head *ih;
RFALSE(B_NR_ITEMS(S0) != 1,
"vs-8125: item number must be 1: it is %d",
B_NR_ITEMS(S0));
ih = B_N_PITEM_HEAD(S0, 0);
if (tb->CFR[0]
&& !comp_short_le_keys(&(ih->ih_key),
B_N_PDELIM_KEY(tb->CFR[0],
tb->rkey[0])))
if (is_direntry_le_ih(ih)) {
/* Directory must be in correct state here: that is
somewhere at the left side should exist first directory
item. But the item being deleted can not be that first
one because its right neighbor is item of the same
directory. (But first item always gets deleted in last
turn). So, neighbors of deleted item can be merged, so
we can save ih_size */
ih_size = IH_SIZE;
/* we might check that left neighbor exists and is of the
same directory */
RFALSE(le_ih_k_offset(ih) == DOT_OFFSET,
"vs-8130: first directory item can not be removed until directory is not empty");
}
}
if (MAX_CHILD_SIZE(S0) + vn->vn_size <= rfree + lfree + ih_size) {
set_parameters(tb, 0, -1, -1, -1, NULL, -1, -1);
PROC_INFO_INC(tb->tb_sb, leaves_removable);
return 1;
}
return 0;
}
/* when we do not split item, lnum and rnum are numbers of entire items */
#define SET_PAR_SHIFT_LEFT \
if (h)\
{\
int to_l;\
\
to_l = (MAX_NR_KEY(Sh)+1 - lpar + vn->vn_nr_item + 1) / 2 -\
(MAX_NR_KEY(Sh) + 1 - lpar);\
\
set_parameters (tb, h, to_l, 0, lnver, NULL, -1, -1);\
}\
else \
{\
if (lset==LEFT_SHIFT_FLOW)\
set_parameters (tb, h, lpar, 0, lnver, snum012+lset,\
tb->lbytes, -1);\
else\
set_parameters (tb, h, lpar - (tb->lbytes!=-1), 0, lnver, snum012+lset,\
-1, -1);\
}
#define SET_PAR_SHIFT_RIGHT \
if (h)\
{\
int to_r;\
\
to_r = (MAX_NR_KEY(Sh)+1 - rpar + vn->vn_nr_item + 1) / 2 - (MAX_NR_KEY(Sh) + 1 - rpar);\
\
set_parameters (tb, h, 0, to_r, rnver, NULL, -1, -1);\
}\
else \
{\
if (rset==RIGHT_SHIFT_FLOW)\
set_parameters (tb, h, 0, rpar, rnver, snum012+rset,\
-1, tb->rbytes);\
else\
set_parameters (tb, h, 0, rpar - (tb->rbytes!=-1), rnver, snum012+rset,\
-1, -1);\
}
static void free_buffers_in_tb(struct tree_balance *tb)
{
int i;
pathrelse(tb->tb_path);
for (i = 0; i < MAX_HEIGHT; i++) {
brelse(tb->L[i]);
brelse(tb->R[i]);
brelse(tb->FL[i]);
brelse(tb->FR[i]);
brelse(tb->CFL[i]);
brelse(tb->CFR[i]);
tb->L[i] = NULL;
tb->R[i] = NULL;
tb->FL[i] = NULL;
tb->FR[i] = NULL;
tb->CFL[i] = NULL;
tb->CFR[i] = NULL;
}
}
/* Get new buffers for storing new nodes that are created while balancing.
* Returns: SCHEDULE_OCCURRED - schedule occurred while the function worked;
* CARRY_ON - schedule didn't occur while the function worked;
* NO_DISK_SPACE - no disk space.
*/
/* The function is NOT SCHEDULE-SAFE! */
static int get_empty_nodes(struct tree_balance *tb, int h)
{
struct buffer_head *new_bh,
*Sh = PATH_H_PBUFFER(tb->tb_path, h);
b_blocknr_t *blocknr, blocknrs[MAX_AMOUNT_NEEDED] = { 0, };
int counter, number_of_freeblk, amount_needed, /* number of needed empty blocks */
retval = CARRY_ON;
struct super_block *sb = tb->tb_sb;
/* number_of_freeblk is the number of empty blocks which have been
acquired for use by the balancing algorithm minus the number of
empty blocks used in the previous levels of the analysis,
number_of_freeblk = tb->cur_blknum can be non-zero if a schedule occurs
after empty blocks are acquired, and the balancing analysis is
then restarted, amount_needed is the number needed by this level
(h) of the balancing analysis.
Note that for systems with many processes writing, it would be
more layout optimal to calculate the total number needed by all
levels and then to run reiserfs_new_blocks to get all of them at once. */
/* Initiate number_of_freeblk to the amount acquired prior to the restart of
the analysis or 0 if not restarted, then subtract the amount needed
by all of the levels of the tree below h. */
/* blknum includes S[h], so we subtract 1 in this calculation */
for (counter = 0, number_of_freeblk = tb->cur_blknum;
counter < h; counter++)
number_of_freeblk -=
(tb->blknum[counter]) ? (tb->blknum[counter] -
1) : 0;
/* Allocate missing empty blocks. */
/* if Sh == 0 then we are getting a new root */
amount_needed = (Sh) ? (tb->blknum[h] - 1) : 1;
/* Amount_needed = the amount that we need more than the amount that we have. */
if (amount_needed > number_of_freeblk)
amount_needed -= number_of_freeblk;
else /* If we have enough already then there is nothing to do. */
return CARRY_ON;
/* No need to check quota - is not allocated for blocks used for formatted nodes */
if (reiserfs_new_form_blocknrs(tb, blocknrs,
amount_needed) == NO_DISK_SPACE)
return NO_DISK_SPACE;
/* for each blocknumber we just got, get a buffer and stick it on FEB */
for (blocknr = blocknrs, counter = 0;
counter < amount_needed; blocknr++, counter++) {
RFALSE(!*blocknr,
"PAP-8135: reiserfs_new_blocknrs failed when got new blocks");
new_bh = sb_getblk(sb, *blocknr);
RFALSE(buffer_dirty(new_bh) ||
buffer_journaled(new_bh) ||
buffer_journal_dirty(new_bh),
"PAP-8140: journaled or dirty buffer %b for the new block",
new_bh);
/* Put empty buffers into the array. */
RFALSE(tb->FEB[tb->cur_blknum],
"PAP-8141: busy slot for new buffer");
set_buffer_journal_new(new_bh);
tb->FEB[tb->cur_blknum++] = new_bh;
}
if (retval == CARRY_ON && FILESYSTEM_CHANGED_TB(tb))
retval = REPEAT_SEARCH;
return retval;
}
/* Get free space of the left neighbor, which is stored in the parent
* node of the left neighbor. */
static int get_lfree(struct tree_balance *tb, int h)
{
struct buffer_head *l, *f;
int order;
if ((f = PATH_H_PPARENT(tb->tb_path, h)) == NULL ||
(l = tb->FL[h]) == NULL)
return 0;
if (f == l)
order = PATH_H_B_ITEM_ORDER(tb->tb_path, h) - 1;
else {
order = B_NR_ITEMS(l);
f = l;
}
return (MAX_CHILD_SIZE(f) - dc_size(B_N_CHILD(f, order)));
}
/* Get free space of the right neighbor,
* which is stored in the parent node of the right neighbor.
*/
static int get_rfree(struct tree_balance *tb, int h)
{
struct buffer_head *r, *f;
int order;
if ((f = PATH_H_PPARENT(tb->tb_path, h)) == NULL ||
(r = tb->FR[h]) == NULL)
return 0;
if (f == r)
order = PATH_H_B_ITEM_ORDER(tb->tb_path, h) + 1;
else {
order = 0;
f = r;
}
return (MAX_CHILD_SIZE(f) - dc_size(B_N_CHILD(f, order)));
}
/* Check whether left neighbor is in memory. */
static int is_left_neighbor_in_cache(struct tree_balance *tb, int h)
{
struct buffer_head *father, *left;
struct super_block *sb = tb->tb_sb;
b_blocknr_t left_neighbor_blocknr;
int left_neighbor_position;
/* Father of the left neighbor does not exist. */
if (!tb->FL[h])
return 0;
/* Calculate father of the node to be balanced. */
father = PATH_H_PBUFFER(tb->tb_path, h + 1);
RFALSE(!father ||
!B_IS_IN_TREE(father) ||
!B_IS_IN_TREE(tb->FL[h]) ||
!buffer_uptodate(father) ||
!buffer_uptodate(tb->FL[h]),
"vs-8165: F[h] (%b) or FL[h] (%b) is invalid",
father, tb->FL[h]);
/* Get position of the pointer to the left neighbor into the left father. */
left_neighbor_position = (father == tb->FL[h]) ?
tb->lkey[h] : B_NR_ITEMS(tb->FL[h]);
/* Get left neighbor block number. */
left_neighbor_blocknr =
B_N_CHILD_NUM(tb->FL[h], left_neighbor_position);
/* Look for the left neighbor in the cache. */
if ((left = sb_find_get_block(sb, left_neighbor_blocknr))) {
RFALSE(buffer_uptodate(left) && !B_IS_IN_TREE(left),
"vs-8170: left neighbor (%b %z) is not in the tree",
left, left);
put_bh(left);
return 1;
}
return 0;
}
#define LEFT_PARENTS 'l'
#define RIGHT_PARENTS 'r'
static void decrement_key(struct cpu_key *key)
{
// call item specific function for this key
item_ops[cpu_key_k_type(key)]->decrement_key(key);
}
/* Calculate far left/right parent of the left/right neighbor of the current node, that
* is calculate the left/right (FL[h]/FR[h]) neighbor of the parent F[h].
* Calculate left/right common parent of the current node and L[h]/R[h].
* Calculate left/right delimiting key position.
* Returns: PATH_INCORRECT - path in the tree is not correct;
SCHEDULE_OCCURRED - schedule occurred while the function worked;
* CARRY_ON - schedule didn't occur while the function worked;
*/
static int get_far_parent(struct tree_balance *tb,
int h,
struct buffer_head **pfather,
struct buffer_head **pcom_father, char c_lr_par)
{
struct buffer_head *parent;
INITIALIZE_PATH(s_path_to_neighbor_father);
struct treepath *path = tb->tb_path;
struct cpu_key s_lr_father_key;
int counter,
position = INT_MAX,
first_last_position = 0,
path_offset = PATH_H_PATH_OFFSET(path, h);
/* Starting from F[h] go upwards in the tree, and look for the common
ancestor of F[h], and its neighbor l/r, that should be obtained. */
counter = path_offset;
RFALSE(counter < FIRST_PATH_ELEMENT_OFFSET,
"PAP-8180: invalid path length");
for (; counter > FIRST_PATH_ELEMENT_OFFSET; counter--) {
/* Check whether parent of the current buffer in the path is really parent in the tree. */
if (!B_IS_IN_TREE
(parent = PATH_OFFSET_PBUFFER(path, counter - 1)))
return REPEAT_SEARCH;
/* Check whether position in the parent is correct. */
if ((position =
PATH_OFFSET_POSITION(path,
counter - 1)) >
B_NR_ITEMS(parent))
return REPEAT_SEARCH;
/* Check whether parent at the path really points to the child. */
if (B_N_CHILD_NUM(parent, position) !=
PATH_OFFSET_PBUFFER(path, counter)->b_blocknr)
return REPEAT_SEARCH;
/* Return delimiting key if position in the parent is not equal to first/last one. */
if (c_lr_par == RIGHT_PARENTS)
first_last_position = B_NR_ITEMS(parent);
if (position != first_last_position) {
*pcom_father = parent;
get_bh(*pcom_father);
/*(*pcom_father = parent)->b_count++; */
break;
}
}
/* if we are in the root of the tree, then there is no common father */
if (counter == FIRST_PATH_ELEMENT_OFFSET) {
/* Check whether first buffer in the path is the root of the tree. */
if (PATH_OFFSET_PBUFFER
(tb->tb_path,
FIRST_PATH_ELEMENT_OFFSET)->b_blocknr ==
SB_ROOT_BLOCK(tb->tb_sb)) {
*pfather = *pcom_father = NULL;
return CARRY_ON;
}
return REPEAT_SEARCH;
}
RFALSE(B_LEVEL(*pcom_father) <= DISK_LEAF_NODE_LEVEL,
"PAP-8185: (%b %z) level too small",
*pcom_father, *pcom_father);
/* Check whether the common parent is locked. */
if (buffer_locked(*pcom_father)) {
/* Release the write lock while the buffer is busy */
reiserfs_write_unlock(tb->tb_sb);
__wait_on_buffer(*pcom_father);
reiserfs_write_lock(tb->tb_sb);
if (FILESYSTEM_CHANGED_TB(tb)) {
brelse(*pcom_father);
return REPEAT_SEARCH;
}
}
/* So, we got common parent of the current node and its left/right neighbor.
Now we are geting the parent of the left/right neighbor. */
/* Form key to get parent of the left/right neighbor. */
le_key2cpu_key(&s_lr_father_key,
B_N_PDELIM_KEY(*pcom_father,
(c_lr_par ==
LEFT_PARENTS) ? (tb->lkey[h - 1] =
position -
1) : (tb->rkey[h -
1] =
position)));
if (c_lr_par == LEFT_PARENTS)
decrement_key(&s_lr_father_key);
if (search_by_key
(tb->tb_sb, &s_lr_father_key, &s_path_to_neighbor_father,
h + 1) == IO_ERROR)
// path is released
return IO_ERROR;
if (FILESYSTEM_CHANGED_TB(tb)) {
pathrelse(&s_path_to_neighbor_father);
brelse(*pcom_father);
return REPEAT_SEARCH;
}
*pfather = PATH_PLAST_BUFFER(&s_path_to_neighbor_father);
RFALSE(B_LEVEL(*pfather) != h + 1,
"PAP-8190: (%b %z) level too small", *pfather, *pfather);
RFALSE(s_path_to_neighbor_father.path_length <
FIRST_PATH_ELEMENT_OFFSET, "PAP-8192: path length is too small");
s_path_to_neighbor_father.path_length--;
pathrelse(&s_path_to_neighbor_father);
return CARRY_ON;
}
/* Get parents of neighbors of node in the path(S[path_offset]) and common parents of
* S[path_offset] and L[path_offset]/R[path_offset]: F[path_offset], FL[path_offset],
* FR[path_offset], CFL[path_offset], CFR[path_offset].
* Calculate numbers of left and right delimiting keys position: lkey[path_offset], rkey[path_offset].
* Returns: SCHEDULE_OCCURRED - schedule occurred while the function worked;
* CARRY_ON - schedule didn't occur while the function worked;
*/
static int get_parents(struct tree_balance *tb, int h)
{
struct treepath *path = tb->tb_path;
int position,
ret,
path_offset = PATH_H_PATH_OFFSET(tb->tb_path, h);
struct buffer_head *curf, *curcf;
/* Current node is the root of the tree or will be root of the tree */
if (path_offset <= FIRST_PATH_ELEMENT_OFFSET) {
/* The root can not have parents.
Release nodes which previously were obtained as parents of the current node neighbors. */
brelse(tb->FL[h]);
brelse(tb->CFL[h]);
brelse(tb->FR[h]);
brelse(tb->CFR[h]);
tb->FL[h] = NULL;
tb->CFL[h] = NULL;
tb->FR[h] = NULL;
tb->CFR[h] = NULL;
return CARRY_ON;
}
/* Get parent FL[path_offset] of L[path_offset]. */
position = PATH_OFFSET_POSITION(path, path_offset - 1);
if (position) {
/* Current node is not the first child of its parent. */
curf = PATH_OFFSET_PBUFFER(path, path_offset - 1);
curcf = PATH_OFFSET_PBUFFER(path, path_offset - 1);
get_bh(curf);
get_bh(curf);
tb->lkey[h] = position - 1;
} else {
/* Calculate current parent of L[path_offset], which is the left neighbor of the current node.
Calculate current common parent of L[path_offset] and the current node. Note that
CFL[path_offset] not equal FL[path_offset] and CFL[path_offset] not equal F[path_offset].
Calculate lkey[path_offset]. */
if ((ret = get_far_parent(tb, h + 1, &curf,
&curcf,
LEFT_PARENTS)) != CARRY_ON)
return ret;
}
brelse(tb->FL[h]);
tb->FL[h] = curf; /* New initialization of FL[h]. */
brelse(tb->CFL[h]);
tb->CFL[h] = curcf; /* New initialization of CFL[h]. */
RFALSE((curf && !B_IS_IN_TREE(curf)) ||
(curcf && !B_IS_IN_TREE(curcf)),
"PAP-8195: FL (%b) or CFL (%b) is invalid", curf, curcf);
/* Get parent FR[h] of R[h]. */
/* Current node is the last child of F[h]. FR[h] != F[h]. */
if (position == B_NR_ITEMS(PATH_H_PBUFFER(path, h + 1))) {
/* Calculate current parent of R[h], which is the right neighbor of F[h].
Calculate current common parent of R[h] and current node. Note that CFR[h]
not equal FR[path_offset] and CFR[h] not equal F[h]. */
if ((ret =
get_far_parent(tb, h + 1, &curf, &curcf,
RIGHT_PARENTS)) != CARRY_ON)
return ret;
} else {
/* Current node is not the last child of its parent F[h]. */
curf = PATH_OFFSET_PBUFFER(path, path_offset - 1);
curcf = PATH_OFFSET_PBUFFER(path, path_offset - 1);
get_bh(curf);
get_bh(curf);
tb->rkey[h] = position;
}
brelse(tb->FR[h]);
/* New initialization of FR[path_offset]. */
tb->FR[h] = curf;
brelse(tb->CFR[h]);
/* New initialization of CFR[path_offset]. */
tb->CFR[h] = curcf;
RFALSE((curf && !B_IS_IN_TREE(curf)) ||
(curcf && !B_IS_IN_TREE(curcf)),
"PAP-8205: FR (%b) or CFR (%b) is invalid", curf, curcf);
return CARRY_ON;
}
/* it is possible to remove node as result of shiftings to
neighbors even when we insert or paste item. */
static inline int can_node_be_removed(int mode, int lfree, int sfree, int rfree,
struct tree_balance *tb, int h)
{
struct buffer_head *Sh = PATH_H_PBUFFER(tb->tb_path, h);
int levbytes = tb->insert_size[h];
struct item_head *ih;
struct reiserfs_key *r_key = NULL;
ih = B_N_PITEM_HEAD(Sh, 0);
if (tb->CFR[h])
r_key = B_N_PDELIM_KEY(tb->CFR[h], tb->rkey[h]);
if (lfree + rfree + sfree < MAX_CHILD_SIZE(Sh) + levbytes
/* shifting may merge items which might save space */
-
((!h
&& op_is_left_mergeable(&(ih->ih_key), Sh->b_size)) ? IH_SIZE : 0)
-
((!h && r_key
&& op_is_left_mergeable(r_key, Sh->b_size)) ? IH_SIZE : 0)
+ ((h) ? KEY_SIZE : 0)) {
/* node can not be removed */
if (sfree >= levbytes) { /* new item fits into node S[h] without any shifting */
if (!h)
tb->s0num =
B_NR_ITEMS(Sh) +
((mode == M_INSERT) ? 1 : 0);
set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
return NO_BALANCING_NEEDED;
}
}
PROC_INFO_INC(tb->tb_sb, can_node_be_removed[h]);
return !NO_BALANCING_NEEDED;
}
/* Check whether current node S[h] is balanced when increasing its size by
* Inserting or Pasting.
* Calculate parameters for balancing for current level h.
* Parameters:
* tb tree_balance structure;
* h current level of the node;
* inum item number in S[h];
* mode i - insert, p - paste;
* Returns: 1 - schedule occurred;
* 0 - balancing for higher levels needed;
* -1 - no balancing for higher levels needed;
* -2 - no disk space.
*/
/* ip means Inserting or Pasting */
static int ip_check_balance(struct tree_balance *tb, int h)
{
struct virtual_node *vn = tb->tb_vn;
int levbytes, /* Number of bytes that must be inserted into (value
is negative if bytes are deleted) buffer which
contains node being balanced. The mnemonic is
that the attempted change in node space used level
is levbytes bytes. */
ret;
int lfree, sfree, rfree /* free space in L, S and R */ ;
/* nver is short for number of vertixes, and lnver is the number if
we shift to the left, rnver is the number if we shift to the
right, and lrnver is the number if we shift in both directions.
The goal is to minimize first the number of vertixes, and second,
the number of vertixes whose contents are changed by shifting,
and third the number of uncached vertixes whose contents are
changed by shifting and must be read from disk. */
int nver, lnver, rnver, lrnver;
/* used at leaf level only, S0 = S[0] is the node being balanced,
sInum [ I = 0,1,2 ] is the number of items that will
remain in node SI after balancing. S1 and S2 are new
nodes that might be created. */
/* we perform 8 calls to get_num_ver(). For each call we calculate five parameters.
where 4th parameter is s1bytes and 5th - s2bytes
*/
short snum012[40] = { 0, }; /* s0num, s1num, s2num for 8 cases
0,1 - do not shift and do not shift but bottle
2 - shift only whole item to left
3 - shift to left and bottle as much as possible
4,5 - shift to right (whole items and as much as possible
6,7 - shift to both directions (whole items and as much as possible)
*/
/* Sh is the node whose balance is currently being checked */
struct buffer_head *Sh;
Sh = PATH_H_PBUFFER(tb->tb_path, h);
levbytes = tb->insert_size[h];
/* Calculate balance parameters for creating new root. */
if (!Sh) {
if (!h)
reiserfs_panic(tb->tb_sb, "vs-8210",
"S[0] can not be 0");
switch (ret = get_empty_nodes(tb, h)) {
case CARRY_ON:
set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
return NO_BALANCING_NEEDED; /* no balancing for higher levels needed */
case NO_DISK_SPACE:
case REPEAT_SEARCH:
return ret;
default:
reiserfs_panic(tb->tb_sb, "vs-8215", "incorrect "
"return value of get_empty_nodes");
}
}
if ((ret = get_parents(tb, h)) != CARRY_ON) /* get parents of S[h] neighbors. */
return ret;
sfree = B_FREE_SPACE(Sh);
/* get free space of neighbors */
rfree = get_rfree(tb, h);
lfree = get_lfree(tb, h);
if (can_node_be_removed(vn->vn_mode, lfree, sfree, rfree, tb, h) ==
NO_BALANCING_NEEDED)
/* and new item fits into node S[h] without any shifting */
return NO_BALANCING_NEEDED;
create_virtual_node(tb, h);
/*
determine maximal number of items we can shift to the left neighbor (in tb structure)
and the maximal number of bytes that can flow to the left neighbor
from the left most liquid item that cannot be shifted from S[0] entirely (returned value)
*/
check_left(tb, h, lfree);
/*
determine maximal number of items we can shift to the right neighbor (in tb structure)
and the maximal number of bytes that can flow to the right neighbor
from the right most liquid item that cannot be shifted from S[0] entirely (returned value)
*/
check_right(tb, h, rfree);
/* all contents of internal node S[h] can be moved into its
neighbors, S[h] will be removed after balancing */
if (h && (tb->rnum[h] + tb->lnum[h] >= vn->vn_nr_item + 1)) {
int to_r;
/* Since we are working on internal nodes, and our internal
nodes have fixed size entries, then we can balance by the
number of items rather than the space they consume. In this
routine we set the left node equal to the right node,
allowing a difference of less than or equal to 1 child
pointer. */
to_r =
((MAX_NR_KEY(Sh) << 1) + 2 - tb->lnum[h] - tb->rnum[h] +
vn->vn_nr_item + 1) / 2 - (MAX_NR_KEY(Sh) + 1 -
tb->rnum[h]);
set_parameters(tb, h, vn->vn_nr_item + 1 - to_r, to_r, 0, NULL,
-1, -1);
return CARRY_ON;
}
/* this checks balance condition, that any two neighboring nodes can not fit in one node */
RFALSE(h &&
(tb->lnum[h] >= vn->vn_nr_item + 1 ||
tb->rnum[h] >= vn->vn_nr_item + 1),
"vs-8220: tree is not balanced on internal level");
RFALSE(!h && ((tb->lnum[h] >= vn->vn_nr_item && (tb->lbytes == -1)) ||
(tb->rnum[h] >= vn->vn_nr_item && (tb->rbytes == -1))),
"vs-8225: tree is not balanced on leaf level");
/* all contents of S[0] can be moved into its neighbors
S[0] will be removed after balancing. */
if (!h && is_leaf_removable(tb))
return CARRY_ON;
/* why do we perform this check here rather than earlier??
Answer: we can win 1 node in some cases above. Moreover we
checked it above, when we checked, that S[0] is not removable
in principle */
if (sfree >= levbytes) { /* new item fits into node S[h] without any shifting */
if (!h)
tb->s0num = vn->vn_nr_item;
set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
return NO_BALANCING_NEEDED;
}
{
int lpar, rpar, nset, lset, rset, lrset;
/*
* regular overflowing of the node
*/
/* get_num_ver works in 2 modes (FLOW & NO_FLOW)
lpar, rpar - number of items we can shift to left/right neighbor (including splitting item)
nset, lset, rset, lrset - shows, whether flowing items give better packing
*/
#define FLOW 1
#define NO_FLOW 0 /* do not any splitting */
/* we choose one the following */
#define NOTHING_SHIFT_NO_FLOW 0
#define NOTHING_SHIFT_FLOW 5
#define LEFT_SHIFT_NO_FLOW 10
#define LEFT_SHIFT_FLOW 15
#define RIGHT_SHIFT_NO_FLOW 20
#define RIGHT_SHIFT_FLOW 25
#define LR_SHIFT_NO_FLOW 30
#define LR_SHIFT_FLOW 35
lpar = tb->lnum[h];
rpar = tb->rnum[h];
/* calculate number of blocks S[h] must be split into when
nothing is shifted to the neighbors,
as well as number of items in each part of the split node (s012 numbers),
and number of bytes (s1bytes) of the shared drop which flow to S1 if any */
nset = NOTHING_SHIFT_NO_FLOW;
nver = get_num_ver(vn->vn_mode, tb, h,
0, -1, h ? vn->vn_nr_item : 0, -1,
snum012, NO_FLOW);
if (!h) {
int nver1;
/* note, that in this case we try to bottle between S[0] and S1 (S1 - the first new node) */
nver1 = get_num_ver(vn->vn_mode, tb, h,
0, -1, 0, -1,
snum012 + NOTHING_SHIFT_FLOW, FLOW);
if (nver > nver1)
nset = NOTHING_SHIFT_FLOW, nver = nver1;
}
/* calculate number of blocks S[h] must be split into when
l_shift_num first items and l_shift_bytes of the right most
liquid item to be shifted are shifted to the left neighbor,
as well as number of items in each part of the splitted node (s012 numbers),
and number of bytes (s1bytes) of the shared drop which flow to S1 if any
*/
lset = LEFT_SHIFT_NO_FLOW;
lnver = get_num_ver(vn->vn_mode, tb, h,
lpar - ((h || tb->lbytes == -1) ? 0 : 1),
-1, h ? vn->vn_nr_item : 0, -1,
snum012 + LEFT_SHIFT_NO_FLOW, NO_FLOW);
if (!h) {
int lnver1;
lnver1 = get_num_ver(vn->vn_mode, tb, h,
lpar -
((tb->lbytes != -1) ? 1 : 0),
tb->lbytes, 0, -1,
snum012 + LEFT_SHIFT_FLOW, FLOW);
if (lnver > lnver1)
lset = LEFT_SHIFT_FLOW, lnver = lnver1;
}
/* calculate number of blocks S[h] must be split into when
r_shift_num first items and r_shift_bytes of the left most
liquid item to be shifted are shifted to the right neighbor,
as well as number of items in each part of the splitted node (s012 numbers),
and number of bytes (s1bytes) of the shared drop which flow to S1 if any
*/
rset = RIGHT_SHIFT_NO_FLOW;
rnver = get_num_ver(vn->vn_mode, tb, h,
0, -1,
h ? (vn->vn_nr_item - rpar) : (rpar -
((tb->
rbytes !=
-1) ? 1 :
0)), -1,
snum012 + RIGHT_SHIFT_NO_FLOW, NO_FLOW);
if (!h) {
int rnver1;
rnver1 = get_num_ver(vn->vn_mode, tb, h,
0, -1,
(rpar -
((tb->rbytes != -1) ? 1 : 0)),
tb->rbytes,
snum012 + RIGHT_SHIFT_FLOW, FLOW);
if (rnver > rnver1)
rset = RIGHT_SHIFT_FLOW, rnver = rnver1;
}
/* calculate number of blocks S[h] must be split into when
items are shifted in both directions,
as well as number of items in each part of the splitted node (s012 numbers),
and number of bytes (s1bytes) of the shared drop which flow to S1 if any
*/
lrset = LR_SHIFT_NO_FLOW;
lrnver = get_num_ver(vn->vn_mode, tb, h,
lpar - ((h || tb->lbytes == -1) ? 0 : 1),
-1,
h ? (vn->vn_nr_item - rpar) : (rpar -
((tb->
rbytes !=
-1) ? 1 :
0)), -1,
snum012 + LR_SHIFT_NO_FLOW, NO_FLOW);
if (!h) {
int lrnver1;
lrnver1 = get_num_ver(vn->vn_mode, tb, h,
lpar -
((tb->lbytes != -1) ? 1 : 0),
tb->lbytes,
(rpar -
((tb->rbytes != -1) ? 1 : 0)),
tb->rbytes,
snum012 + LR_SHIFT_FLOW, FLOW);
if (lrnver > lrnver1)
lrset = LR_SHIFT_FLOW, lrnver = lrnver1;
}
/* Our general shifting strategy is:
1) to minimized number of new nodes;
2) to minimized number of neighbors involved in shifting;
3) to minimized number of disk reads; */
/* we can win TWO or ONE nodes by shifting in both directions */
if (lrnver < lnver && lrnver < rnver) {
RFALSE(h &&
(tb->lnum[h] != 1 ||
tb->rnum[h] != 1 ||
lrnver != 1 || rnver != 2 || lnver != 2
|| h != 1), "vs-8230: bad h");
if (lrset == LR_SHIFT_FLOW)
set_parameters(tb, h, tb->lnum[h], tb->rnum[h],
lrnver, snum012 + lrset,
tb->lbytes, tb->rbytes);
else
set_parameters(tb, h,
tb->lnum[h] -
((tb->lbytes == -1) ? 0 : 1),
tb->rnum[h] -
((tb->rbytes == -1) ? 0 : 1),
lrnver, snum012 + lrset, -1, -1);
return CARRY_ON;
}
/* if shifting doesn't lead to better packing then don't shift */
if (nver == lrnver) {
set_parameters(tb, h, 0, 0, nver, snum012 + nset, -1,
-1);
return CARRY_ON;
}
/* now we know that for better packing shifting in only one
direction either to the left or to the right is required */
/* if shifting to the left is better than shifting to the right */
if (lnver < rnver) {
SET_PAR_SHIFT_LEFT;
return CARRY_ON;
}
/* if shifting to the right is better than shifting to the left */
if (lnver > rnver) {
SET_PAR_SHIFT_RIGHT;
return CARRY_ON;
}
/* now shifting in either direction gives the same number
of nodes and we can make use of the cached neighbors */
if (is_left_neighbor_in_cache(tb, h)) {
SET_PAR_SHIFT_LEFT;
return CARRY_ON;
}
/* shift to the right independently on whether the right neighbor in cache or not */
SET_PAR_SHIFT_RIGHT;
return CARRY_ON;
}
}
/* Check whether current node S[h] is balanced when Decreasing its size by
* Deleting or Cutting for INTERNAL node of S+tree.
* Calculate parameters for balancing for current level h.
* Parameters:
* tb tree_balance structure;
* h current level of the node;
* inum item number in S[h];
* mode i - insert, p - paste;
* Returns: 1 - schedule occurred;
* 0 - balancing for higher levels needed;
* -1 - no balancing for higher levels needed;
* -2 - no disk space.
*
* Note: Items of internal nodes have fixed size, so the balance condition for
* the internal part of S+tree is as for the B-trees.
*/
static int dc_check_balance_internal(struct tree_balance *tb, int h)
{
struct virtual_node *vn = tb->tb_vn;
/* Sh is the node whose balance is currently being checked,
and Fh is its father. */
struct buffer_head *Sh, *Fh;
int maxsize, ret;
int lfree, rfree /* free space in L and R */ ;
Sh = PATH_H_PBUFFER(tb->tb_path, h);
Fh = PATH_H_PPARENT(tb->tb_path, h);
maxsize = MAX_CHILD_SIZE(Sh);
/* using tb->insert_size[h], which is negative in this case, create_virtual_node calculates: */
/* new_nr_item = number of items node would have if operation is */
/* performed without balancing (new_nr_item); */
create_virtual_node(tb, h);
if (!Fh) { /* S[h] is the root. */
if (vn->vn_nr_item > 0) {
set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
return NO_BALANCING_NEEDED; /* no balancing for higher levels needed */
}
/* new_nr_item == 0.
* Current root will be deleted resulting in
* decrementing the tree height. */
set_parameters(tb, h, 0, 0, 0, NULL, -1, -1);
return CARRY_ON;
}
if ((ret = get_parents(tb, h)) != CARRY_ON)
return ret;
/* get free space of neighbors */
rfree = get_rfree(tb, h);
lfree = get_lfree(tb, h);
/* determine maximal number of items we can fit into neighbors */
check_left(tb, h, lfree);
check_right(tb, h, rfree);
if (vn->vn_nr_item >= MIN_NR_KEY(Sh)) { /* Balance condition for the internal node is valid.
* In this case we balance only if it leads to better packing. */
if (vn->vn_nr_item == MIN_NR_KEY(Sh)) { /* Here we join S[h] with one of its neighbors,
* which is impossible with greater values of new_nr_item. */
if (tb->lnum[h] >= vn->vn_nr_item + 1) {
/* All contents of S[h] can be moved to L[h]. */
int n;
int order_L;
order_L =
((n =
PATH_H_B_ITEM_ORDER(tb->tb_path,
h)) ==
0) ? B_NR_ITEMS(tb->FL[h]) : n - 1;
n = dc_size(B_N_CHILD(tb->FL[h], order_L)) /
(DC_SIZE + KEY_SIZE);
set_parameters(tb, h, -n - 1, 0, 0, NULL, -1,
-1);
return CARRY_ON;
}
if (tb->rnum[h] >= vn->vn_nr_item + 1) {
/* All contents of S[h] can be moved to R[h]. */
int n;
int order_R;
order_R =
((n =
PATH_H_B_ITEM_ORDER(tb->tb_path,
h)) ==
B_NR_ITEMS(Fh)) ? 0 : n + 1;
n = dc_size(B_N_CHILD(tb->FR[h], order_R)) /
(DC_SIZE + KEY_SIZE);
set_parameters(tb, h, 0, -n - 1, 0, NULL, -1,
-1);
return CARRY_ON;
}
}
if (tb->rnum[h] + tb->lnum[h] >= vn->vn_nr_item + 1) {
/* All contents of S[h] can be moved to the neighbors (L[h] & R[h]). */
int to_r;
to_r =
((MAX_NR_KEY(Sh) << 1) + 2 - tb->lnum[h] -
tb->rnum[h] + vn->vn_nr_item + 1) / 2 -
(MAX_NR_KEY(Sh) + 1 - tb->rnum[h]);
set_parameters(tb, h, vn->vn_nr_item + 1 - to_r, to_r,
0, NULL, -1, -1);
return CARRY_ON;
}
/* Balancing does not lead to better packing. */
set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
return NO_BALANCING_NEEDED;
}
/* Current node contain insufficient number of items. Balancing is required. */
/* Check whether we can merge S[h] with left neighbor. */
if (tb->lnum[h] >= vn->vn_nr_item + 1)
if (is_left_neighbor_in_cache(tb, h)
|| tb->rnum[h] < vn->vn_nr_item + 1 || !tb->FR[h]) {
int n;
int order_L;
order_L =
((n =
PATH_H_B_ITEM_ORDER(tb->tb_path,
h)) ==
0) ? B_NR_ITEMS(tb->FL[h]) : n - 1;
n = dc_size(B_N_CHILD(tb->FL[h], order_L)) / (DC_SIZE +
KEY_SIZE);
set_parameters(tb, h, -n - 1, 0, 0, NULL, -1, -1);
return CARRY_ON;
}
/* Check whether we can merge S[h] with right neighbor. */
if (tb->rnum[h] >= vn->vn_nr_item + 1) {
int n;
int order_R;
order_R =
((n =
PATH_H_B_ITEM_ORDER(tb->tb_path,
h)) == B_NR_ITEMS(Fh)) ? 0 : (n + 1);
n = dc_size(B_N_CHILD(tb->FR[h], order_R)) / (DC_SIZE +
KEY_SIZE);
set_parameters(tb, h, 0, -n - 1, 0, NULL, -1, -1);
return CARRY_ON;
}
/* All contents of S[h] can be moved to the neighbors (L[h] & R[h]). */
if (tb->rnum[h] + tb->lnum[h] >= vn->vn_nr_item + 1) {
int to_r;
to_r =
((MAX_NR_KEY(Sh) << 1) + 2 - tb->lnum[h] - tb->rnum[h] +
vn->vn_nr_item + 1) / 2 - (MAX_NR_KEY(Sh) + 1 -
tb->rnum[h]);
set_parameters(tb, h, vn->vn_nr_item + 1 - to_r, to_r, 0, NULL,
-1, -1);
return CARRY_ON;
}
/* For internal nodes try to borrow item from a neighbor */
RFALSE(!tb->FL[h] && !tb->FR[h], "vs-8235: trying to borrow for root");
/* Borrow one or two items from caching neighbor */
if (is_left_neighbor_in_cache(tb, h) || !tb->FR[h]) {
int from_l;
from_l =
(MAX_NR_KEY(Sh) + 1 - tb->lnum[h] + vn->vn_nr_item +
1) / 2 - (vn->vn_nr_item + 1);
set_parameters(tb, h, -from_l, 0, 1, NULL, -1, -1);
return CARRY_ON;
}
set_parameters(tb, h, 0,
-((MAX_NR_KEY(Sh) + 1 - tb->rnum[h] + vn->vn_nr_item +
1) / 2 - (vn->vn_nr_item + 1)), 1, NULL, -1, -1);
return CARRY_ON;
}
/* Check whether current node S[h] is balanced when Decreasing its size by
* Deleting or Truncating for LEAF node of S+tree.
* Calculate parameters for balancing for current level h.
* Parameters:
* tb tree_balance structure;
* h current level of the node;
* inum item number in S[h];
* mode i - insert, p - paste;
* Returns: 1 - schedule occurred;
* 0 - balancing for higher levels needed;
* -1 - no balancing for higher levels needed;
* -2 - no disk space.
*/
static int dc_check_balance_leaf(struct tree_balance *tb, int h)
{
struct virtual_node *vn = tb->tb_vn;
/* Number of bytes that must be deleted from
(value is negative if bytes are deleted) buffer which
contains node being balanced. The mnemonic is that the
attempted change in node space used level is levbytes bytes. */
int levbytes;
/* the maximal item size */
int maxsize, ret;
/* S0 is the node whose balance is currently being checked,
and F0 is its father. */
struct buffer_head *S0, *F0;
int lfree, rfree /* free space in L and R */ ;
S0 = PATH_H_PBUFFER(tb->tb_path, 0);
F0 = PATH_H_PPARENT(tb->tb_path, 0);
levbytes = tb->insert_size[h];
maxsize = MAX_CHILD_SIZE(S0); /* maximal possible size of an item */
if (!F0) { /* S[0] is the root now. */
RFALSE(-levbytes >= maxsize - B_FREE_SPACE(S0),
"vs-8240: attempt to create empty buffer tree");
set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
return NO_BALANCING_NEEDED;
}
if ((ret = get_parents(tb, h)) != CARRY_ON)
return ret;
/* get free space of neighbors */
rfree = get_rfree(tb, h);
lfree = get_lfree(tb, h);
create_virtual_node(tb, h);
/* if 3 leaves can be merge to one, set parameters and return */
if (are_leaves_removable(tb, lfree, rfree))
return CARRY_ON;
/* determine maximal number of items we can shift to the left/right neighbor
and the maximal number of bytes that can flow to the left/right neighbor
from the left/right most liquid item that cannot be shifted from S[0] entirely
*/
check_left(tb, h, lfree);
check_right(tb, h, rfree);
/* check whether we can merge S with left neighbor. */
if (tb->lnum[0] >= vn->vn_nr_item && tb->lbytes == -1)
if (is_left_neighbor_in_cache(tb, h) || ((tb->rnum[0] - ((tb->rbytes == -1) ? 0 : 1)) < vn->vn_nr_item) || /* S can not be merged with R */
!tb->FR[h]) {
RFALSE(!tb->FL[h],
"vs-8245: dc_check_balance_leaf: FL[h] must exist");
/* set parameter to merge S[0] with its left neighbor */
set_parameters(tb, h, -1, 0, 0, NULL, -1, -1);
return CARRY_ON;
}
/* check whether we can merge S[0] with right neighbor. */
if (tb->rnum[0] >= vn->vn_nr_item && tb->rbytes == -1) {
set_parameters(tb, h, 0, -1, 0, NULL, -1, -1);
return CARRY_ON;
}
/* All contents of S[0] can be moved to the neighbors (L[0] & R[0]). Set parameters and return */
if (is_leaf_removable(tb))
return CARRY_ON;
/* Balancing is not required. */
tb->s0num = vn->vn_nr_item;
set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
return NO_BALANCING_NEEDED;
}
/* Check whether current node S[h] is balanced when Decreasing its size by
* Deleting or Cutting.
* Calculate parameters for balancing for current level h.
* Parameters:
* tb tree_balance structure;
* h current level of the node;
* inum item number in S[h];
* mode d - delete, c - cut.
* Returns: 1 - schedule occurred;
* 0 - balancing for higher levels needed;
* -1 - no balancing for higher levels needed;
* -2 - no disk space.
*/
static int dc_check_balance(struct tree_balance *tb, int h)
{
RFALSE(!(PATH_H_PBUFFER(tb->tb_path, h)),
"vs-8250: S is not initialized");
if (h)
return dc_check_balance_internal(tb, h);
else
return dc_check_balance_leaf(tb, h);
}
/* Check whether current node S[h] is balanced.
* Calculate parameters for balancing for current level h.
* Parameters:
*
* tb tree_balance structure:
*
* tb is a large structure that must be read about in the header file
* at the same time as this procedure if the reader is to successfully
* understand this procedure
*
* h current level of the node;
* inum item number in S[h];
* mode i - insert, p - paste, d - delete, c - cut.
* Returns: 1 - schedule occurred;
* 0 - balancing for higher levels needed;
* -1 - no balancing for higher levels needed;
* -2 - no disk space.
*/
static int check_balance(int mode,
struct tree_balance *tb,
int h,
int inum,
int pos_in_item,
struct item_head *ins_ih, const void *data)
{
struct virtual_node *vn;
vn = tb->tb_vn = (struct virtual_node *)(tb->vn_buf);
vn->vn_free_ptr = (char *)(tb->tb_vn + 1);
vn->vn_mode = mode;
vn->vn_affected_item_num = inum;
vn->vn_pos_in_item = pos_in_item;
vn->vn_ins_ih = ins_ih;
vn->vn_data = data;
RFALSE(mode == M_INSERT && !vn->vn_ins_ih,
"vs-8255: ins_ih can not be 0 in insert mode");
if (tb->insert_size[h] > 0)
/* Calculate balance parameters when size of node is increasing. */
return ip_check_balance(tb, h);
/* Calculate balance parameters when size of node is decreasing. */
return dc_check_balance(tb, h);
}
/* Check whether parent at the path is the really parent of the current node.*/
static int get_direct_parent(struct tree_balance *tb, int h)
{
struct buffer_head *bh;
struct treepath *path = tb->tb_path;
int position,
path_offset = PATH_H_PATH_OFFSET(tb->tb_path, h);
/* We are in the root or in the new root. */
if (path_offset <= FIRST_PATH_ELEMENT_OFFSET) {
RFALSE(path_offset < FIRST_PATH_ELEMENT_OFFSET - 1,
"PAP-8260: invalid offset in the path");
if (PATH_OFFSET_PBUFFER(path, FIRST_PATH_ELEMENT_OFFSET)->
b_blocknr == SB_ROOT_BLOCK(tb->tb_sb)) {
/* Root is not changed. */
PATH_OFFSET_PBUFFER(path, path_offset - 1) = NULL;
PATH_OFFSET_POSITION(path, path_offset - 1) = 0;
return CARRY_ON;
}
return REPEAT_SEARCH; /* Root is changed and we must recalculate the path. */
}
if (!B_IS_IN_TREE
(bh = PATH_OFFSET_PBUFFER(path, path_offset - 1)))
return REPEAT_SEARCH; /* Parent in the path is not in the tree. */
if ((position =
PATH_OFFSET_POSITION(path,
path_offset - 1)) > B_NR_ITEMS(bh))
return REPEAT_SEARCH;
if (B_N_CHILD_NUM(bh, position) !=
PATH_OFFSET_PBUFFER(path, path_offset)->b_blocknr)
/* Parent in the path is not parent of the current node in the tree. */
return REPEAT_SEARCH;
if (buffer_locked(bh)) {
reiserfs_write_unlock(tb->tb_sb);
__wait_on_buffer(bh);
reiserfs_write_lock(tb->tb_sb);
if (FILESYSTEM_CHANGED_TB(tb))
return REPEAT_SEARCH;
}
return CARRY_ON; /* Parent in the path is unlocked and really parent of the current node. */
}
/* Using lnum[h] and rnum[h] we should determine what neighbors
* of S[h] we
* need in order to balance S[h], and get them if necessary.
* Returns: SCHEDULE_OCCURRED - schedule occurred while the function worked;
* CARRY_ON - schedule didn't occur while the function worked;
*/
static int get_neighbors(struct tree_balance *tb, int h)
{
int child_position,
path_offset = PATH_H_PATH_OFFSET(tb->tb_path, h + 1);
unsigned long son_number;
struct super_block *sb = tb->tb_sb;
struct buffer_head *bh;
PROC_INFO_INC(sb, get_neighbors[h]);
if (tb->lnum[h]) {
/* We need left neighbor to balance S[h]. */
PROC_INFO_INC(sb, need_l_neighbor[h]);
bh = PATH_OFFSET_PBUFFER(tb->tb_path, path_offset);
RFALSE(bh == tb->FL[h] &&
!PATH_OFFSET_POSITION(tb->tb_path, path_offset),
"PAP-8270: invalid position in the parent");
child_position =
(bh ==
tb->FL[h]) ? tb->lkey[h] : B_NR_ITEMS(tb->
FL[h]);
son_number = B_N_CHILD_NUM(tb->FL[h], child_position);
reiserfs_write_unlock(sb);
bh = sb_bread(sb, son_number);
reiserfs_write_lock(sb);
if (!bh)
return IO_ERROR;
if (FILESYSTEM_CHANGED_TB(tb)) {
brelse(bh);
PROC_INFO_INC(sb, get_neighbors_restart[h]);
return REPEAT_SEARCH;
}
RFALSE(!B_IS_IN_TREE(tb->FL[h]) ||
child_position > B_NR_ITEMS(tb->FL[h]) ||
B_N_CHILD_NUM(tb->FL[h], child_position) !=
bh->b_blocknr, "PAP-8275: invalid parent");
RFALSE(!B_IS_IN_TREE(bh), "PAP-8280: invalid child");
RFALSE(!h &&
B_FREE_SPACE(bh) !=
MAX_CHILD_SIZE(bh) -
dc_size(B_N_CHILD(tb->FL[0], child_position)),
"PAP-8290: invalid child size of left neighbor");
brelse(tb->L[h]);
tb->L[h] = bh;
}
/* We need right neighbor to balance S[path_offset]. */
if (tb->rnum[h]) { /* We need right neighbor to balance S[path_offset]. */
PROC_INFO_INC(sb, need_r_neighbor[h]);
bh = PATH_OFFSET_PBUFFER(tb->tb_path, path_offset);
RFALSE(bh == tb->FR[h] &&
PATH_OFFSET_POSITION(tb->tb_path,
path_offset) >=
B_NR_ITEMS(bh),
"PAP-8295: invalid position in the parent");
child_position =
(bh == tb->FR[h]) ? tb->rkey[h] + 1 : 0;
son_number = B_N_CHILD_NUM(tb->FR[h], child_position);
reiserfs_write_unlock(sb);
bh = sb_bread(sb, son_number);
reiserfs_write_lock(sb);
if (!bh)
return IO_ERROR;
if (FILESYSTEM_CHANGED_TB(tb)) {
brelse(bh);
PROC_INFO_INC(sb, get_neighbors_restart[h]);
return REPEAT_SEARCH;
}
brelse(tb->R[h]);
tb->R[h] = bh;
RFALSE(!h
&& B_FREE_SPACE(bh) !=
MAX_CHILD_SIZE(bh) -
dc_size(B_N_CHILD(tb->FR[0], child_position)),
"PAP-8300: invalid child size of right neighbor (%d != %d - %d)",
B_FREE_SPACE(bh), MAX_CHILD_SIZE(bh),
dc_size(B_N_CHILD(tb->FR[0], child_position)));
}
return CARRY_ON;
}
static int get_virtual_node_size(struct super_block *sb, struct buffer_head *bh)
{
int max_num_of_items;
int max_num_of_entries;
unsigned long blocksize = sb->s_blocksize;
#define MIN_NAME_LEN 1
max_num_of_items = (blocksize - BLKH_SIZE) / (IH_SIZE + MIN_ITEM_LEN);
max_num_of_entries = (blocksize - BLKH_SIZE - IH_SIZE) /
(DEH_SIZE + MIN_NAME_LEN);
return sizeof(struct virtual_node) +
max(max_num_of_items * sizeof(struct virtual_item),
sizeof(struct virtual_item) + sizeof(struct direntry_uarea) +
(max_num_of_entries - 1) * sizeof(__u16));
}
/* maybe we should fail balancing we are going to perform when kmalloc
fails several times. But now it will loop until kmalloc gets
required memory */
static int get_mem_for_virtual_node(struct tree_balance *tb)
{
int check_fs = 0;
int size;
char *buf;
size = get_virtual_node_size(tb->tb_sb, PATH_PLAST_BUFFER(tb->tb_path));
if (size > tb->vn_buf_size) {
/* we have to allocate more memory for virtual node */
if (tb->vn_buf) {
/* free memory allocated before */
kfree(tb->vn_buf);
/* this is not needed if kfree is atomic */
check_fs = 1;
}
/* virtual node requires now more memory */
tb->vn_buf_size = size;
/* get memory for virtual item */
buf = kmalloc(size, GFP_ATOMIC | __GFP_NOWARN);
if (!buf) {
/* getting memory with GFP_KERNEL priority may involve
balancing now (due to indirect_to_direct conversion on
dcache shrinking). So, release path and collected
resources here */
free_buffers_in_tb(tb);
buf = kmalloc(size, GFP_NOFS);
if (!buf) {
tb->vn_buf_size = 0;
}
tb->vn_buf = buf;
schedule();
return REPEAT_SEARCH;
}
tb->vn_buf = buf;
}
if (check_fs && FILESYSTEM_CHANGED_TB(tb))
return REPEAT_SEARCH;
return CARRY_ON;
}
#ifdef CONFIG_REISERFS_CHECK
static void tb_buffer_sanity_check(struct super_block *sb,
struct buffer_head *bh,
const char *descr, int level)
{
if (bh) {
if (atomic_read(&(bh->b_count)) <= 0)
reiserfs_panic(sb, "jmacd-1", "negative or zero "
"reference counter for buffer %s[%d] "
"(%b)", descr, level, bh);
if (!buffer_uptodate(bh))
reiserfs_panic(sb, "jmacd-2", "buffer is not up "
"to date %s[%d] (%b)",
descr, level, bh);
if (!B_IS_IN_TREE(bh))
reiserfs_panic(sb, "jmacd-3", "buffer is not "
"in tree %s[%d] (%b)",
descr, level, bh);
if (bh->b_bdev != sb->s_bdev)
reiserfs_panic(sb, "jmacd-4", "buffer has wrong "
"device %s[%d] (%b)",
descr, level, bh);
if (bh->b_size != sb->s_blocksize)
reiserfs_panic(sb, "jmacd-5", "buffer has wrong "
"blocksize %s[%d] (%b)",
descr, level, bh);
if (bh->b_blocknr > SB_BLOCK_COUNT(sb))
reiserfs_panic(sb, "jmacd-6", "buffer block "
"number too high %s[%d] (%b)",
descr, level, bh);
}
}
#else
static void tb_buffer_sanity_check(struct super_block *sb,
struct buffer_head *bh,
const char *descr, int level)
{;
}
#endif
static int clear_all_dirty_bits(struct super_block *s, struct buffer_head *bh)
{
return reiserfs_prepare_for_journal(s, bh, 0);
}
static int wait_tb_buffers_until_unlocked(struct tree_balance *tb)
{
struct buffer_head *locked;
#ifdef CONFIG_REISERFS_CHECK
int repeat_counter = 0;
#endif
int i;
do {
locked = NULL;
for (i = tb->tb_path->path_length;
!locked && i > ILLEGAL_PATH_ELEMENT_OFFSET; i--) {
if (PATH_OFFSET_PBUFFER(tb->tb_path, i)) {
/* if I understand correctly, we can only be sure the last buffer
** in the path is in the tree --clm
*/
#ifdef CONFIG_REISERFS_CHECK
if (PATH_PLAST_BUFFER(tb->tb_path) ==
PATH_OFFSET_PBUFFER(tb->tb_path, i))
tb_buffer_sanity_check(tb->tb_sb,
PATH_OFFSET_PBUFFER
(tb->tb_path,
i), "S",
tb->tb_path->
path_length - i);
#endif
if (!clear_all_dirty_bits(tb->tb_sb,
PATH_OFFSET_PBUFFER
(tb->tb_path,
i))) {
locked =
PATH_OFFSET_PBUFFER(tb->tb_path,
i);
}
}
}
for (i = 0; !locked && i < MAX_HEIGHT && tb->insert_size[i];
i++) {
if (tb->lnum[i]) {
if (tb->L[i]) {
tb_buffer_sanity_check(tb->tb_sb,
tb->L[i],
"L", i);
if (!clear_all_dirty_bits
(tb->tb_sb, tb->L[i]))
locked = tb->L[i];
}
if (!locked && tb->FL[i]) {
tb_buffer_sanity_check(tb->tb_sb,
tb->FL[i],
"FL", i);
if (!clear_all_dirty_bits
(tb->tb_sb, tb->FL[i]))
locked = tb->FL[i];
}
if (!locked && tb->CFL[i]) {
tb_buffer_sanity_check(tb->tb_sb,
tb->CFL[i],
"CFL", i);
if (!clear_all_dirty_bits
(tb->tb_sb, tb->CFL[i]))
locked = tb->CFL[i];
}
}
if (!locked && (tb->rnum[i])) {
if (tb->R[i]) {
tb_buffer_sanity_check(tb->tb_sb,
tb->R[i],
"R", i);
if (!clear_all_dirty_bits
(tb->tb_sb, tb->R[i]))
locked = tb->R[i];
}
if (!locked && tb->FR[i]) {
tb_buffer_sanity_check(tb->tb_sb,
tb->FR[i],
"FR", i);
if (!clear_all_dirty_bits
(tb->tb_sb, tb->FR[i]))
locked = tb->FR[i];
}
if (!locked && tb->CFR[i]) {
tb_buffer_sanity_check(tb->tb_sb,
tb->CFR[i],
"CFR", i);
if (!clear_all_dirty_bits
(tb->tb_sb, tb->CFR[i]))
locked = tb->CFR[i];
}
}
}
/* as far as I can tell, this is not required. The FEB list seems
** to be full of newly allocated nodes, which will never be locked,
** dirty, or anything else.
** To be safe, I'm putting in the checks and waits in. For the moment,
** they are needed to keep the code in journal.c from complaining
** about the buffer. That code is inside CONFIG_REISERFS_CHECK as well.
** --clm
*/
for (i = 0; !locked && i < MAX_FEB_SIZE; i++) {
if (tb->FEB[i]) {
if (!clear_all_dirty_bits
(tb->tb_sb, tb->FEB[i]))
locked = tb->FEB[i];
}
}
if (locked) {
#ifdef CONFIG_REISERFS_CHECK
repeat_counter++;
if ((repeat_counter % 10000) == 0) {
reiserfs_warning(tb->tb_sb, "reiserfs-8200",
"too many iterations waiting "
"for buffer to unlock "
"(%b)", locked);
/* Don't loop forever. Try to recover from possible error. */
return (FILESYSTEM_CHANGED_TB(tb)) ?
REPEAT_SEARCH : CARRY_ON;
}
#endif
reiserfs_write_unlock(tb->tb_sb);
__wait_on_buffer(locked);
reiserfs_write_lock(tb->tb_sb);
if (FILESYSTEM_CHANGED_TB(tb))
return REPEAT_SEARCH;
}
} while (locked);
return CARRY_ON;
}
/* Prepare for balancing, that is
* get all necessary parents, and neighbors;
* analyze what and where should be moved;
* get sufficient number of new nodes;
* Balancing will start only after all resources will be collected at a time.
*
* When ported to SMP kernels, only at the last moment after all needed nodes
* are collected in cache, will the resources be locked using the usual
* textbook ordered lock acquisition algorithms. Note that ensuring that
* this code neither write locks what it does not need to write lock nor locks out of order
* will be a pain in the butt that could have been avoided. Grumble grumble. -Hans
*
* fix is meant in the sense of render unchanging
*
* Latency might be improved by first gathering a list of what buffers are needed
* and then getting as many of them in parallel as possible? -Hans
*
* Parameters:
* op_mode i - insert, d - delete, c - cut (truncate), p - paste (append)
* tb tree_balance structure;
* inum item number in S[h];
* pos_in_item - comment this if you can
* ins_ih item head of item being inserted
* data inserted item or data to be pasted
* Returns: 1 - schedule occurred while the function worked;
* 0 - schedule didn't occur while the function worked;
* -1 - if no_disk_space
*/
int fix_nodes(int op_mode, struct tree_balance *tb,
struct item_head *ins_ih, const void *data)
{
int ret, h, item_num = PATH_LAST_POSITION(tb->tb_path);
int pos_in_item;
/* we set wait_tb_buffers_run when we have to restore any dirty bits cleared
** during wait_tb_buffers_run
*/
int wait_tb_buffers_run = 0;
struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
++REISERFS_SB(tb->tb_sb)->s_fix_nodes;
pos_in_item = tb->tb_path->pos_in_item;
tb->fs_gen = get_generation(tb->tb_sb);
/* we prepare and log the super here so it will already be in the
** transaction when do_balance needs to change it.
** This way do_balance won't have to schedule when trying to prepare
** the super for logging
*/
reiserfs_prepare_for_journal(tb->tb_sb,
SB_BUFFER_WITH_SB(tb->tb_sb), 1);
journal_mark_dirty(tb->transaction_handle, tb->tb_sb,
SB_BUFFER_WITH_SB(tb->tb_sb));
if (FILESYSTEM_CHANGED_TB(tb))
return REPEAT_SEARCH;
/* if it possible in indirect_to_direct conversion */
if (buffer_locked(tbS0)) {
reiserfs_write_unlock(tb->tb_sb);
__wait_on_buffer(tbS0);
reiserfs_write_lock(tb->tb_sb);
if (FILESYSTEM_CHANGED_TB(tb))
return REPEAT_SEARCH;
}
#ifdef CONFIG_REISERFS_CHECK
if (REISERFS_SB(tb->tb_sb)->cur_tb) {
print_cur_tb("fix_nodes");
reiserfs_panic(tb->tb_sb, "PAP-8305",
"there is pending do_balance");
}
if (!buffer_uptodate(tbS0) || !B_IS_IN_TREE(tbS0))
reiserfs_panic(tb->tb_sb, "PAP-8320", "S[0] (%b %z) is "
"not uptodate at the beginning of fix_nodes "
"or not in tree (mode %c)",
tbS0, tbS0, op_mode);
/* Check parameters. */
switch (op_mode) {
case M_INSERT:
if (item_num <= 0 || item_num > B_NR_ITEMS(tbS0))
reiserfs_panic(tb->tb_sb, "PAP-8330", "Incorrect "
"item number %d (in S0 - %d) in case "
"of insert", item_num,
B_NR_ITEMS(tbS0));
break;
case M_PASTE:
case M_DELETE:
case M_CUT:
if (item_num < 0 || item_num >= B_NR_ITEMS(tbS0)) {
print_block(tbS0, 0, -1, -1);
reiserfs_panic(tb->tb_sb, "PAP-8335", "Incorrect "
"item number(%d); mode = %c "
"insert_size = %d",
item_num, op_mode,
tb->insert_size[0]);
}
break;
default:
reiserfs_panic(tb->tb_sb, "PAP-8340", "Incorrect mode "
"of operation");
}
#endif
if (get_mem_for_virtual_node(tb) == REPEAT_SEARCH)
// FIXME: maybe -ENOMEM when tb->vn_buf == 0? Now just repeat
return REPEAT_SEARCH;
/* Starting from the leaf level; for all levels h of the tree. */
for (h = 0; h < MAX_HEIGHT && tb->insert_size[h]; h++) {
ret = get_direct_parent(tb, h);
if (ret != CARRY_ON)
goto repeat;
ret = check_balance(op_mode, tb, h, item_num,
pos_in_item, ins_ih, data);
if (ret != CARRY_ON) {
if (ret == NO_BALANCING_NEEDED) {
/* No balancing for higher levels needed. */
ret = get_neighbors(tb, h);
if (ret != CARRY_ON)
goto repeat;
if (h != MAX_HEIGHT - 1)
tb->insert_size[h + 1] = 0;
/* ok, analysis and resource gathering are complete */
break;
}
goto repeat;
}
ret = get_neighbors(tb, h);
if (ret != CARRY_ON)
goto repeat;
/* No disk space, or schedule occurred and analysis may be
* invalid and needs to be redone. */
ret = get_empty_nodes(tb, h);
if (ret != CARRY_ON)
goto repeat;
if (!PATH_H_PBUFFER(tb->tb_path, h)) {
/* We have a positive insert size but no nodes exist on this
level, this means that we are creating a new root. */
RFALSE(tb->blknum[h] != 1,
"PAP-8350: creating new empty root");
if (h < MAX_HEIGHT - 1)
tb->insert_size[h + 1] = 0;
} else if (!PATH_H_PBUFFER(tb->tb_path, h + 1)) {
if (tb->blknum[h] > 1) {
/* The tree needs to be grown, so this node S[h]
which is the root node is split into two nodes,
and a new node (S[h+1]) will be created to
become the root node. */
RFALSE(h == MAX_HEIGHT - 1,
"PAP-8355: attempt to create too high of a tree");
tb->insert_size[h + 1] =
(DC_SIZE +
KEY_SIZE) * (tb->blknum[h] - 1) +
DC_SIZE;
} else if (h < MAX_HEIGHT - 1)
tb->insert_size[h + 1] = 0;
} else
tb->insert_size[h + 1] =
(DC_SIZE + KEY_SIZE) * (tb->blknum[h] - 1);
}
ret = wait_tb_buffers_until_unlocked(tb);
if (ret == CARRY_ON) {
if (FILESYSTEM_CHANGED_TB(tb)) {
wait_tb_buffers_run = 1;
ret = REPEAT_SEARCH;
goto repeat;
} else {
return CARRY_ON;
}
} else {
wait_tb_buffers_run = 1;
goto repeat;
}
repeat:
// fix_nodes was unable to perform its calculation due to
// filesystem got changed under us, lack of free disk space or i/o
// failure. If the first is the case - the search will be
// repeated. For now - free all resources acquired so far except
// for the new allocated nodes
{
int i;
/* Release path buffers. */
if (wait_tb_buffers_run) {
pathrelse_and_restore(tb->tb_sb, tb->tb_path);
} else {
pathrelse(tb->tb_path);
}
/* brelse all resources collected for balancing */
for (i = 0; i < MAX_HEIGHT; i++) {
if (wait_tb_buffers_run) {
reiserfs_restore_prepared_buffer(tb->tb_sb,
tb->L[i]);
reiserfs_restore_prepared_buffer(tb->tb_sb,
tb->R[i]);
reiserfs_restore_prepared_buffer(tb->tb_sb,
tb->FL[i]);
reiserfs_restore_prepared_buffer(tb->tb_sb,
tb->FR[i]);
reiserfs_restore_prepared_buffer(tb->tb_sb,
tb->
CFL[i]);
reiserfs_restore_prepared_buffer(tb->tb_sb,
tb->
CFR[i]);
}
brelse(tb->L[i]);
brelse(tb->R[i]);
brelse(tb->FL[i]);
brelse(tb->FR[i]);
brelse(tb->CFL[i]);
brelse(tb->CFR[i]);
tb->L[i] = NULL;
tb->R[i] = NULL;
tb->FL[i] = NULL;
tb->FR[i] = NULL;
tb->CFL[i] = NULL;
tb->CFR[i] = NULL;
}
if (wait_tb_buffers_run) {
for (i = 0; i < MAX_FEB_SIZE; i++) {
if (tb->FEB[i])
reiserfs_restore_prepared_buffer
(tb->tb_sb, tb->FEB[i]);
}
}
return ret;
}
}
/* Anatoly will probably forgive me renaming tb to tb. I just
wanted to make lines shorter */
void unfix_nodes(struct tree_balance *tb)
{
int i;
/* Release path buffers. */
pathrelse_and_restore(tb->tb_sb, tb->tb_path);
/* brelse all resources collected for balancing */
for (i = 0; i < MAX_HEIGHT; i++) {
reiserfs_restore_prepared_buffer(tb->tb_sb, tb->L[i]);
reiserfs_restore_prepared_buffer(tb->tb_sb, tb->R[i]);
reiserfs_restore_prepared_buffer(tb->tb_sb, tb->FL[i]);
reiserfs_restore_prepared_buffer(tb->tb_sb, tb->FR[i]);
reiserfs_restore_prepared_buffer(tb->tb_sb, tb->CFL[i]);
reiserfs_restore_prepared_buffer(tb->tb_sb, tb->CFR[i]);
brelse(tb->L[i]);
brelse(tb->R[i]);
brelse(tb->FL[i]);
brelse(tb->FR[i]);
brelse(tb->CFL[i]);
brelse(tb->CFR[i]);
}
/* deal with list of allocated (used and unused) nodes */
for (i = 0; i < MAX_FEB_SIZE; i++) {
if (tb->FEB[i]) {
b_blocknr_t blocknr = tb->FEB[i]->b_blocknr;
/* de-allocated block which was not used by balancing and
bforget about buffer for it */
brelse(tb->FEB[i]);
reiserfs_free_block(tb->transaction_handle, NULL,
blocknr, 0);
}
if (tb->used[i]) {
/* release used as new nodes including a new root */
brelse(tb->used[i]);
}
}
kfree(tb->vn_buf);
}
| gpl-2.0 |
rogersb11/android_kernel_samsung_d2 | drivers/watchdog/intel_scu_watchdog.c | 7297 | 15505 | /*
* Intel_SCU 0.2: An Intel SCU IOH Based Watchdog Device
* for Intel part #(s):
* - AF82MP20 PCH
*
* Copyright (C) 2009-2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General
* Public License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the Free
* Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
* The full GNU General Public License is included in this
* distribution in the file called COPYING.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/fs.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/sfi.h>
#include <asm/irq.h>
#include <linux/atomic.h>
#include <asm/intel_scu_ipc.h>
#include <asm/apb_timer.h>
#include <asm/mrst.h>
#include "intel_scu_watchdog.h"
/* Bounds number of times we will retry loading time count */
/* This retry is a work around for a silicon bug. */
#define MAX_RETRY 16
#define IPC_SET_WATCHDOG_TIMER 0xF8
static int timer_margin = DEFAULT_SOFT_TO_HARD_MARGIN;
module_param(timer_margin, int, 0);
MODULE_PARM_DESC(timer_margin,
"Watchdog timer margin"
"Time between interrupt and resetting the system"
"The range is from 1 to 160"
"This is the time for all keep alives to arrive");
static int timer_set = DEFAULT_TIME;
module_param(timer_set, int, 0);
MODULE_PARM_DESC(timer_set,
"Default Watchdog timer setting"
"Complete cycle time"
"The range is from 1 to 170"
"This is the time for all keep alives to arrive");
/* After watchdog device is closed, check force_boot. If:
* force_boot == 0, then force boot on next watchdog interrupt after close,
* force_boot == 1, then force boot immediately when device is closed.
*/
static int force_boot;
module_param(force_boot, int, 0);
MODULE_PARM_DESC(force_boot,
"A value of 1 means that the driver will reboot"
"the system immediately if the /dev/watchdog device is closed"
"A value of 0 means that when /dev/watchdog device is closed"
"the watchdog timer will be refreshed for one more interval"
"of length: timer_set. At the end of this interval, the"
"watchdog timer will reset the system."
);
/* there is only one device in the system now; this can be made into
* an array in the future if we have more than one device */
static struct intel_scu_watchdog_dev watchdog_device;
/* Forces restart, if force_reboot is set */
static void watchdog_fire(void)
{
if (force_boot) {
pr_crit("Initiating system reboot\n");
emergency_restart();
pr_crit("Reboot didn't ?????\n");
}
else {
pr_crit("Immediate Reboot Disabled\n");
pr_crit("System will reset when watchdog timer times out!\n");
}
}
static int check_timer_margin(int new_margin)
{
if ((new_margin < MIN_TIME_CYCLE) ||
(new_margin > MAX_TIME - timer_set)) {
pr_debug("value of new_margin %d is out of the range %d to %d\n",
new_margin, MIN_TIME_CYCLE, MAX_TIME - timer_set);
return -EINVAL;
}
return 0;
}
/*
* IPC operations
*/
static int watchdog_set_ipc(int soft_threshold, int threshold)
{
u32 *ipc_wbuf;
u8 cbuf[16] = { '\0' };
int ipc_ret = 0;
ipc_wbuf = (u32 *)&cbuf;
ipc_wbuf[0] = soft_threshold;
ipc_wbuf[1] = threshold;
ipc_ret = intel_scu_ipc_command(
IPC_SET_WATCHDOG_TIMER,
0,
ipc_wbuf,
2,
NULL,
0);
if (ipc_ret != 0)
pr_err("Error setting SCU watchdog timer: %x\n", ipc_ret);
return ipc_ret;
};
/*
* Intel_SCU operations
*/
/* timer interrupt handler */
static irqreturn_t watchdog_timer_interrupt(int irq, void *dev_id)
{
int int_status;
int_status = ioread32(watchdog_device.timer_interrupt_status_addr);
pr_debug("irq, int_status: %x\n", int_status);
if (int_status != 0)
return IRQ_NONE;
/* has the timer been started? If not, then this is spurious */
if (watchdog_device.timer_started == 0) {
pr_debug("spurious interrupt received\n");
return IRQ_HANDLED;
}
/* temporarily disable the timer */
iowrite32(0x00000002, watchdog_device.timer_control_addr);
/* set the timer to the threshold */
iowrite32(watchdog_device.threshold,
watchdog_device.timer_load_count_addr);
/* allow the timer to run */
iowrite32(0x00000003, watchdog_device.timer_control_addr);
return IRQ_HANDLED;
}
static int intel_scu_keepalive(void)
{
/* read eoi register - clears interrupt */
ioread32(watchdog_device.timer_clear_interrupt_addr);
/* temporarily disable the timer */
iowrite32(0x00000002, watchdog_device.timer_control_addr);
/* set the timer to the soft_threshold */
iowrite32(watchdog_device.soft_threshold,
watchdog_device.timer_load_count_addr);
/* allow the timer to run */
iowrite32(0x00000003, watchdog_device.timer_control_addr);
return 0;
}
static int intel_scu_stop(void)
{
iowrite32(0, watchdog_device.timer_control_addr);
return 0;
}
static int intel_scu_set_heartbeat(u32 t)
{
int ipc_ret;
int retry_count;
u32 soft_value;
u32 hw_pre_value;
u32 hw_value;
watchdog_device.timer_set = t;
watchdog_device.threshold =
timer_margin * watchdog_device.timer_tbl_ptr->freq_hz;
watchdog_device.soft_threshold =
(watchdog_device.timer_set - timer_margin)
* watchdog_device.timer_tbl_ptr->freq_hz;
pr_debug("set_heartbeat: timer freq is %d\n",
watchdog_device.timer_tbl_ptr->freq_hz);
pr_debug("set_heartbeat: timer_set is %x (hex)\n",
watchdog_device.timer_set);
pr_debug("set_hearbeat: timer_margin is %x (hex)\n", timer_margin);
pr_debug("set_heartbeat: threshold is %x (hex)\n",
watchdog_device.threshold);
pr_debug("set_heartbeat: soft_threshold is %x (hex)\n",
watchdog_device.soft_threshold);
/* Adjust thresholds by FREQ_ADJUSTMENT factor, to make the */
/* watchdog timing come out right. */
watchdog_device.threshold =
watchdog_device.threshold / FREQ_ADJUSTMENT;
watchdog_device.soft_threshold =
watchdog_device.soft_threshold / FREQ_ADJUSTMENT;
/* temporarily disable the timer */
iowrite32(0x00000002, watchdog_device.timer_control_addr);
/* send the threshold and soft_threshold via IPC to the processor */
ipc_ret = watchdog_set_ipc(watchdog_device.soft_threshold,
watchdog_device.threshold);
if (ipc_ret != 0) {
/* Make sure the watchdog timer is stopped */
intel_scu_stop();
return ipc_ret;
}
/* Soft Threshold set loop. Early versions of silicon did */
/* not always set this count correctly. This loop checks */
/* the value and retries if it was not set correctly. */
retry_count = 0;
soft_value = watchdog_device.soft_threshold & 0xFFFF0000;
do {
/* Make sure timer is stopped */
intel_scu_stop();
if (MAX_RETRY < retry_count++) {
/* Unable to set timer value */
pr_err("Unable to set timer\n");
return -ENODEV;
}
/* set the timer to the soft threshold */
iowrite32(watchdog_device.soft_threshold,
watchdog_device.timer_load_count_addr);
/* read count value before starting timer */
hw_pre_value = ioread32(watchdog_device.timer_load_count_addr);
hw_pre_value = hw_pre_value & 0xFFFF0000;
/* Start the timer */
iowrite32(0x00000003, watchdog_device.timer_control_addr);
/* read the value the time loaded into its count reg */
hw_value = ioread32(watchdog_device.timer_load_count_addr);
hw_value = hw_value & 0xFFFF0000;
} while (soft_value != hw_value);
watchdog_device.timer_started = 1;
return 0;
}
/*
* /dev/watchdog handling
*/
static int intel_scu_open(struct inode *inode, struct file *file)
{
/* Set flag to indicate that watchdog device is open */
if (test_and_set_bit(0, &watchdog_device.driver_open))
return -EBUSY;
/* Check for reopen of driver. Reopens are not allowed */
if (watchdog_device.driver_closed)
return -EPERM;
return nonseekable_open(inode, file);
}
static int intel_scu_release(struct inode *inode, struct file *file)
{
/*
* This watchdog should not be closed, after the timer
* is started with the WDIPC_SETTIMEOUT ioctl
* If force_boot is set watchdog_fire() will cause an
* immediate reset. If force_boot is not set, the watchdog
* timer is refreshed for one more interval. At the end
* of that interval, the watchdog timer will reset the system.
*/
if (!test_and_clear_bit(0, &watchdog_device.driver_open)) {
pr_debug("intel_scu_release, without open\n");
return -ENOTTY;
}
if (!watchdog_device.timer_started) {
/* Just close, since timer has not been started */
pr_debug("closed, without starting timer\n");
return 0;
}
pr_crit("Unexpected close of /dev/watchdog!\n");
/* Since the timer was started, prevent future reopens */
watchdog_device.driver_closed = 1;
/* Refresh the timer for one more interval */
intel_scu_keepalive();
/* Reboot system (if force_boot is set) */
watchdog_fire();
/* We should only reach this point if force_boot is not set */
return 0;
}
static ssize_t intel_scu_write(struct file *file,
char const *data,
size_t len,
loff_t *ppos)
{
if (watchdog_device.timer_started)
/* Watchdog already started, keep it alive */
intel_scu_keepalive();
else
/* Start watchdog with timer value set by init */
intel_scu_set_heartbeat(watchdog_device.timer_set);
return len;
}
static long intel_scu_ioctl(struct file *file,
unsigned int cmd,
unsigned long arg)
{
void __user *argp = (void __user *)arg;
u32 __user *p = argp;
u32 new_margin;
static const struct watchdog_info ident = {
.options = WDIOF_SETTIMEOUT
| WDIOF_KEEPALIVEPING,
.firmware_version = 0, /* @todo Get from SCU via
ipc_get_scu_fw_version()? */
.identity = "Intel_SCU IOH Watchdog" /* len < 32 */
};
switch (cmd) {
case WDIOC_GETSUPPORT:
return copy_to_user(argp,
&ident,
sizeof(ident)) ? -EFAULT : 0;
case WDIOC_GETSTATUS:
case WDIOC_GETBOOTSTATUS:
return put_user(0, p);
case WDIOC_KEEPALIVE:
intel_scu_keepalive();
return 0;
case WDIOC_SETTIMEOUT:
if (get_user(new_margin, p))
return -EFAULT;
if (check_timer_margin(new_margin))
return -EINVAL;
if (intel_scu_set_heartbeat(new_margin))
return -EINVAL;
return 0;
case WDIOC_GETTIMEOUT:
return put_user(watchdog_device.soft_threshold, p);
default:
return -ENOTTY;
}
}
/*
* Notifier for system down
*/
static int intel_scu_notify_sys(struct notifier_block *this,
unsigned long code,
void *another_unused)
{
if (code == SYS_DOWN || code == SYS_HALT)
/* Turn off the watchdog timer. */
intel_scu_stop();
return NOTIFY_DONE;
}
/*
* Kernel Interfaces
*/
static const struct file_operations intel_scu_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.write = intel_scu_write,
.unlocked_ioctl = intel_scu_ioctl,
.open = intel_scu_open,
.release = intel_scu_release,
};
static int __init intel_scu_watchdog_init(void)
{
int ret;
u32 __iomem *tmp_addr;
/*
* We don't really need to check this as the SFI timer get will fail
* but if we do so we can exit with a clearer reason and no noise.
*
* If it isn't an intel MID device then it doesn't have this watchdog
*/
if (!mrst_identify_cpu())
return -ENODEV;
/* Check boot parameters to verify that their initial values */
/* are in range. */
/* Check value of timer_set boot parameter */
if ((timer_set < MIN_TIME_CYCLE) ||
(timer_set > MAX_TIME - MIN_TIME_CYCLE)) {
pr_err("value of timer_set %x (hex) is out of range from %x to %x (hex)\n",
timer_set, MIN_TIME_CYCLE, MAX_TIME - MIN_TIME_CYCLE);
return -EINVAL;
}
/* Check value of timer_margin boot parameter */
if (check_timer_margin(timer_margin))
return -EINVAL;
watchdog_device.timer_tbl_ptr = sfi_get_mtmr(sfi_mtimer_num-1);
if (watchdog_device.timer_tbl_ptr == NULL) {
pr_debug("timer is not available\n");
return -ENODEV;
}
/* make sure the timer exists */
if (watchdog_device.timer_tbl_ptr->phys_addr == 0) {
pr_debug("timer %d does not have valid physical memory\n",
sfi_mtimer_num);
return -ENODEV;
}
if (watchdog_device.timer_tbl_ptr->irq == 0) {
pr_debug("timer %d invalid irq\n", sfi_mtimer_num);
return -ENODEV;
}
tmp_addr = ioremap_nocache(watchdog_device.timer_tbl_ptr->phys_addr,
20);
if (tmp_addr == NULL) {
pr_debug("timer unable to ioremap\n");
return -ENOMEM;
}
watchdog_device.timer_load_count_addr = tmp_addr++;
watchdog_device.timer_current_value_addr = tmp_addr++;
watchdog_device.timer_control_addr = tmp_addr++;
watchdog_device.timer_clear_interrupt_addr = tmp_addr++;
watchdog_device.timer_interrupt_status_addr = tmp_addr++;
/* Set the default time values in device structure */
watchdog_device.timer_set = timer_set;
watchdog_device.threshold =
timer_margin * watchdog_device.timer_tbl_ptr->freq_hz;
watchdog_device.soft_threshold =
(watchdog_device.timer_set - timer_margin)
* watchdog_device.timer_tbl_ptr->freq_hz;
watchdog_device.intel_scu_notifier.notifier_call =
intel_scu_notify_sys;
ret = register_reboot_notifier(&watchdog_device.intel_scu_notifier);
if (ret) {
pr_err("cannot register notifier %d)\n", ret);
goto register_reboot_error;
}
watchdog_device.miscdev.minor = WATCHDOG_MINOR;
watchdog_device.miscdev.name = "watchdog";
watchdog_device.miscdev.fops = &intel_scu_fops;
ret = misc_register(&watchdog_device.miscdev);
if (ret) {
pr_err("cannot register miscdev %d err =%d\n",
WATCHDOG_MINOR, ret);
goto misc_register_error;
}
ret = request_irq((unsigned int)watchdog_device.timer_tbl_ptr->irq,
watchdog_timer_interrupt,
IRQF_SHARED, "watchdog",
&watchdog_device.timer_load_count_addr);
if (ret) {
pr_err("error requesting irq %d\n", ret);
goto request_irq_error;
}
/* Make sure timer is disabled before returning */
intel_scu_stop();
return 0;
/* error cleanup */
request_irq_error:
misc_deregister(&watchdog_device.miscdev);
misc_register_error:
unregister_reboot_notifier(&watchdog_device.intel_scu_notifier);
register_reboot_error:
intel_scu_stop();
iounmap(watchdog_device.timer_load_count_addr);
return ret;
}
static void __exit intel_scu_watchdog_exit(void)
{
misc_deregister(&watchdog_device.miscdev);
unregister_reboot_notifier(&watchdog_device.intel_scu_notifier);
/* disable the timer */
iowrite32(0x00000002, watchdog_device.timer_control_addr);
iounmap(watchdog_device.timer_load_count_addr);
}
late_initcall(intel_scu_watchdog_init);
module_exit(intel_scu_watchdog_exit);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("Intel SCU Watchdog Device Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
MODULE_VERSION(WDT_VER);
| gpl-2.0 |
fnoji/android_kernel_htc_impj | drivers/misc/ibmasm/r_heartbeat.c | 9601 | 2611 |
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright (C) IBM Corporation, 2004
*
* Author: Max Asböck <amax@us.ibm.com>
*
*/
#include <linux/sched.h>
#include "ibmasm.h"
#include "dot_command.h"
/*
* Reverse Heartbeat, i.e. heartbeats sent from the driver to the
* service processor.
* These heartbeats are initiated by user level programs.
*/
/* the reverse heartbeat dot command */
#pragma pack(1)
static struct {
struct dot_command_header header;
unsigned char command[3];
} rhb_dot_cmd = {
.header = {
.type = sp_read,
.command_size = 3,
.data_size = 0,
.status = 0
},
.command = { 4, 3, 6 }
};
#pragma pack()
void ibmasm_init_reverse_heartbeat(struct service_processor *sp, struct reverse_heartbeat *rhb)
{
init_waitqueue_head(&rhb->wait);
rhb->stopped = 0;
}
/**
* start_reverse_heartbeat
* Loop forever, sending a reverse heartbeat dot command to the service
* processor, then sleeping. The loop comes to an end if the service
* processor fails to respond 3 times or we were interrupted.
*/
int ibmasm_start_reverse_heartbeat(struct service_processor *sp, struct reverse_heartbeat *rhb)
{
struct command *cmd;
int times_failed = 0;
int result = 1;
cmd = ibmasm_new_command(sp, sizeof rhb_dot_cmd);
if (!cmd)
return -ENOMEM;
while (times_failed < 3) {
memcpy(cmd->buffer, (void *)&rhb_dot_cmd, sizeof rhb_dot_cmd);
cmd->status = IBMASM_CMD_PENDING;
ibmasm_exec_command(sp, cmd);
ibmasm_wait_for_response(cmd, IBMASM_CMD_TIMEOUT_NORMAL);
if (cmd->status != IBMASM_CMD_COMPLETE)
times_failed++;
wait_event_interruptible_timeout(rhb->wait,
rhb->stopped,
REVERSE_HEARTBEAT_TIMEOUT * HZ);
if (signal_pending(current) || rhb->stopped) {
result = -EINTR;
break;
}
}
command_put(cmd);
rhb->stopped = 0;
return result;
}
void ibmasm_stop_reverse_heartbeat(struct reverse_heartbeat *rhb)
{
rhb->stopped = 1;
wake_up_interruptible(&rhb->wait);
}
| gpl-2.0 |
Anteater-GitHub/edison-linux | net/netfilter/xt_length.c | 13697 | 1866 | /* Kernel module to match packet length. */
/* (C) 1999-2001 James Morris <jmorros@intercode.com.au>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/ipv6.h>
#include <net/ip.h>
#include <linux/netfilter/xt_length.h>
#include <linux/netfilter/x_tables.h>
MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
MODULE_DESCRIPTION("Xtables: Packet length (Layer3,4,5) match");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ipt_length");
MODULE_ALIAS("ip6t_length");
static bool
length_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_length_info *info = par->matchinfo;
u_int16_t pktlen = ntohs(ip_hdr(skb)->tot_len);
return (pktlen >= info->min && pktlen <= info->max) ^ info->invert;
}
static bool
length_mt6(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct xt_length_info *info = par->matchinfo;
const u_int16_t pktlen = ntohs(ipv6_hdr(skb)->payload_len) +
sizeof(struct ipv6hdr);
return (pktlen >= info->min && pktlen <= info->max) ^ info->invert;
}
static struct xt_match length_mt_reg[] __read_mostly = {
{
.name = "length",
.family = NFPROTO_IPV4,
.match = length_mt,
.matchsize = sizeof(struct xt_length_info),
.me = THIS_MODULE,
},
{
.name = "length",
.family = NFPROTO_IPV6,
.match = length_mt6,
.matchsize = sizeof(struct xt_length_info),
.me = THIS_MODULE,
},
};
static int __init length_mt_init(void)
{
return xt_register_matches(length_mt_reg, ARRAY_SIZE(length_mt_reg));
}
static void __exit length_mt_exit(void)
{
xt_unregister_matches(length_mt_reg, ARRAY_SIZE(length_mt_reg));
}
module_init(length_mt_init);
module_exit(length_mt_exit);
| gpl-2.0 |
sooorajjj/android_kernel_cyanogen_msm8916 | fs/isofs/joliet.c | 14465 | 1357 | /*
* linux/fs/isofs/joliet.c
*
* (C) 1996 Gordon Chaffee
*
* Joliet: Microsoft's Unicode extensions to iso9660
*/
#include <linux/types.h>
#include <linux/nls.h>
#include "isofs.h"
/*
* Convert Unicode 16 to UTF-8 or ASCII.
*/
static int
uni16_to_x8(unsigned char *ascii, __be16 *uni, int len, struct nls_table *nls)
{
__be16 *ip, ch;
unsigned char *op;
ip = uni;
op = ascii;
while ((ch = get_unaligned(ip)) && len) {
int llen;
llen = nls->uni2char(be16_to_cpu(ch), op, NLS_MAX_CHARSET_SIZE);
if (llen > 0)
op += llen;
else
*op++ = '?';
ip++;
len--;
}
*op = 0;
return (op - ascii);
}
int
get_joliet_filename(struct iso_directory_record * de, unsigned char *outname, struct inode * inode)
{
unsigned char utf8;
struct nls_table *nls;
unsigned char len = 0;
utf8 = ISOFS_SB(inode->i_sb)->s_utf8;
nls = ISOFS_SB(inode->i_sb)->s_nls_iocharset;
if (utf8) {
len = utf16s_to_utf8s((const wchar_t *) de->name,
de->name_len[0] >> 1, UTF16_BIG_ENDIAN,
outname, PAGE_SIZE);
} else {
len = uni16_to_x8(outname, (__be16 *) de->name,
de->name_len[0] >> 1, nls);
}
if ((len > 2) && (outname[len-2] == ';') && (outname[len-1] == '1'))
len -= 2;
/*
* Windows doesn't like periods at the end of a name,
* so neither do we
*/
while (len >= 2 && (outname[len-1] == '.'))
len--;
return len;
}
| gpl-2.0 |
MyAOSP/kernel_moto_wingray | drivers/md/multipath.c | 130 | 14226 | /*
* multipath.c : Multiple Devices driver for Linux
*
* Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
*
* Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
*
* MULTIPATH management functions.
*
* derived from raid1.c.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* You should have received a copy of the GNU General Public License
* (for example /usr/src/linux/COPYING); if not, write to the Free
* Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/blkdev.h>
#include <linux/raid/md_u.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include "md.h"
#include "multipath.h"
#define MAX_WORK_PER_DISK 128
#define NR_RESERVED_BUFS 32
static int multipath_map (multipath_conf_t *conf)
{
int i, disks = conf->raid_disks;
/*
* Later we do read balancing on the read side
* now we use the first available disk.
*/
rcu_read_lock();
for (i = 0; i < disks; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
if (rdev && test_bit(In_sync, &rdev->flags)) {
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
return i;
}
}
rcu_read_unlock();
printk(KERN_ERR "multipath_map(): no more operational IO paths?\n");
return (-1);
}
static void multipath_reschedule_retry (struct multipath_bh *mp_bh)
{
unsigned long flags;
mddev_t *mddev = mp_bh->mddev;
multipath_conf_t *conf = mddev->private;
spin_lock_irqsave(&conf->device_lock, flags);
list_add(&mp_bh->retry_list, &conf->retry_list);
spin_unlock_irqrestore(&conf->device_lock, flags);
md_wakeup_thread(mddev->thread);
}
/*
* multipath_end_bh_io() is called when we have finished servicing a multipathed
* operation and are ready to return a success/failure code to the buffer
* cache layer.
*/
static void multipath_end_bh_io (struct multipath_bh *mp_bh, int err)
{
struct bio *bio = mp_bh->master_bio;
multipath_conf_t *conf = mp_bh->mddev->private;
bio_endio(bio, err);
mempool_free(mp_bh, conf->pool);
}
static void multipath_end_request(struct bio *bio, int error)
{
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct multipath_bh *mp_bh = bio->bi_private;
multipath_conf_t *conf = mp_bh->mddev->private;
mdk_rdev_t *rdev = conf->multipaths[mp_bh->path].rdev;
if (uptodate)
multipath_end_bh_io(mp_bh, 0);
else if (!(bio->bi_rw & REQ_RAHEAD)) {
/*
* oops, IO error:
*/
char b[BDEVNAME_SIZE];
md_error (mp_bh->mddev, rdev);
printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n",
bdevname(rdev->bdev,b),
(unsigned long long)bio->bi_sector);
multipath_reschedule_retry(mp_bh);
} else
multipath_end_bh_io(mp_bh, error);
rdev_dec_pending(rdev, conf->mddev);
}
static int multipath_make_request(mddev_t *mddev, struct bio * bio)
{
multipath_conf_t *conf = mddev->private;
struct multipath_bh * mp_bh;
struct multipath_info *multipath;
if (unlikely(bio->bi_rw & REQ_FLUSH)) {
md_flush_request(mddev, bio);
return 0;
}
mp_bh = mempool_alloc(conf->pool, GFP_NOIO);
mp_bh->master_bio = bio;
mp_bh->mddev = mddev;
mp_bh->path = multipath_map(conf);
if (mp_bh->path < 0) {
bio_endio(bio, -EIO);
mempool_free(mp_bh, conf->pool);
return 0;
}
multipath = conf->multipaths + mp_bh->path;
mp_bh->bio = *bio;
mp_bh->bio.bi_sector += multipath->rdev->data_offset;
mp_bh->bio.bi_bdev = multipath->rdev->bdev;
mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
mp_bh->bio.bi_end_io = multipath_end_request;
mp_bh->bio.bi_private = mp_bh;
generic_make_request(&mp_bh->bio);
return 0;
}
static void multipath_status (struct seq_file *seq, mddev_t *mddev)
{
multipath_conf_t *conf = mddev->private;
int i;
seq_printf (seq, " [%d/%d] [", conf->raid_disks,
conf->working_disks);
for (i = 0; i < conf->raid_disks; i++)
seq_printf (seq, "%s",
conf->multipaths[i].rdev &&
test_bit(In_sync, &conf->multipaths[i].rdev->flags) ? "U" : "_");
seq_printf (seq, "]");
}
static int multipath_congested(void *data, int bits)
{
mddev_t *mddev = data;
multipath_conf_t *conf = mddev->private;
int i, ret = 0;
if (mddev_congested(mddev, bits))
return 1;
rcu_read_lock();
for (i = 0; i < mddev->raid_disks ; i++) {
mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct request_queue *q = bdev_get_queue(rdev->bdev);
ret |= bdi_congested(&q->backing_dev_info, bits);
/* Just like multipath_map, we just check the
* first available device
*/
break;
}
}
rcu_read_unlock();
return ret;
}
/*
* Careful, this can execute in IRQ contexts as well!
*/
static void multipath_error (mddev_t *mddev, mdk_rdev_t *rdev)
{
multipath_conf_t *conf = mddev->private;
if (conf->working_disks <= 1) {
/*
* Uh oh, we can do nothing if this is our last path, but
* first check if this is a queued request for a device
* which has just failed.
*/
printk(KERN_ALERT
"multipath: only one IO path left and IO error.\n");
/* leave it active... it's all we have */
} else {
/*
* Mark disk as unusable
*/
if (!test_bit(Faulty, &rdev->flags)) {
char b[BDEVNAME_SIZE];
clear_bit(In_sync, &rdev->flags);
set_bit(Faulty, &rdev->flags);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
conf->working_disks--;
mddev->degraded++;
printk(KERN_ALERT "multipath: IO failure on %s,"
" disabling IO path.\n"
"multipath: Operation continuing"
" on %d IO paths.\n",
bdevname (rdev->bdev,b),
conf->working_disks);
}
}
}
static void print_multipath_conf (multipath_conf_t *conf)
{
int i;
struct multipath_info *tmp;
printk("MULTIPATH conf printout:\n");
if (!conf) {
printk("(conf==NULL)\n");
return;
}
printk(" --- wd:%d rd:%d\n", conf->working_disks,
conf->raid_disks);
for (i = 0; i < conf->raid_disks; i++) {
char b[BDEVNAME_SIZE];
tmp = conf->multipaths + i;
if (tmp->rdev)
printk(" disk%d, o:%d, dev:%s\n",
i,!test_bit(Faulty, &tmp->rdev->flags),
bdevname(tmp->rdev->bdev,b));
}
}
static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
{
multipath_conf_t *conf = mddev->private;
struct request_queue *q;
int err = -EEXIST;
int path;
struct multipath_info *p;
int first = 0;
int last = mddev->raid_disks - 1;
if (rdev->raid_disk >= 0)
first = last = rdev->raid_disk;
print_multipath_conf(conf);
for (path = first; path <= last; path++)
if ((p=conf->multipaths+path)->rdev == NULL) {
q = rdev->bdev->bd_disk->queue;
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
* violating it, so limit ->max_segments to one, lying
* within a single page.
* (Note: it is very unlikely that a device with
* merge_bvec_fn will be involved in multipath.)
*/
if (q->merge_bvec_fn) {
blk_queue_max_segments(mddev->queue, 1);
blk_queue_segment_boundary(mddev->queue,
PAGE_CACHE_SIZE - 1);
}
conf->working_disks++;
mddev->degraded--;
rdev->raid_disk = path;
set_bit(In_sync, &rdev->flags);
rcu_assign_pointer(p->rdev, rdev);
err = 0;
md_integrity_add_rdev(rdev, mddev);
break;
}
print_multipath_conf(conf);
return err;
}
static int multipath_remove_disk(mddev_t *mddev, int number)
{
multipath_conf_t *conf = mddev->private;
int err = 0;
mdk_rdev_t *rdev;
struct multipath_info *p = conf->multipaths + number;
print_multipath_conf(conf);
rdev = p->rdev;
if (rdev) {
if (test_bit(In_sync, &rdev->flags) ||
atomic_read(&rdev->nr_pending)) {
printk(KERN_ERR "hot-remove-disk, slot %d is identified"
" but is still operational!\n", number);
err = -EBUSY;
goto abort;
}
p->rdev = NULL;
synchronize_rcu();
if (atomic_read(&rdev->nr_pending)) {
/* lost the race, try later */
err = -EBUSY;
p->rdev = rdev;
goto abort;
}
err = md_integrity_register(mddev);
}
abort:
print_multipath_conf(conf);
return err;
}
/*
* This is a kernel thread which:
*
* 1. Retries failed read operations on working multipaths.
* 2. Updates the raid superblock when problems encounter.
* 3. Performs writes following reads for array syncronising.
*/
static void multipathd (mddev_t *mddev)
{
struct multipath_bh *mp_bh;
struct bio *bio;
unsigned long flags;
multipath_conf_t *conf = mddev->private;
struct list_head *head = &conf->retry_list;
md_check_recovery(mddev);
for (;;) {
char b[BDEVNAME_SIZE];
spin_lock_irqsave(&conf->device_lock, flags);
if (list_empty(head))
break;
mp_bh = list_entry(head->prev, struct multipath_bh, retry_list);
list_del(head->prev);
spin_unlock_irqrestore(&conf->device_lock, flags);
bio = &mp_bh->bio;
bio->bi_sector = mp_bh->master_bio->bi_sector;
if ((mp_bh->path = multipath_map (conf))<0) {
printk(KERN_ALERT "multipath: %s: unrecoverable IO read"
" error for block %llu\n",
bdevname(bio->bi_bdev,b),
(unsigned long long)bio->bi_sector);
multipath_end_bh_io(mp_bh, -EIO);
} else {
printk(KERN_ERR "multipath: %s: redirecting sector %llu"
" to another IO path\n",
bdevname(bio->bi_bdev,b),
(unsigned long long)bio->bi_sector);
*bio = *(mp_bh->master_bio);
bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset;
bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
bio->bi_rw |= REQ_FAILFAST_TRANSPORT;
bio->bi_end_io = multipath_end_request;
bio->bi_private = mp_bh;
generic_make_request(bio);
}
}
spin_unlock_irqrestore(&conf->device_lock, flags);
}
static sector_t multipath_size(mddev_t *mddev, sector_t sectors, int raid_disks)
{
WARN_ONCE(sectors || raid_disks,
"%s does not support generic reshape\n", __func__);
return mddev->dev_sectors;
}
static int multipath_run (mddev_t *mddev)
{
multipath_conf_t *conf;
int disk_idx;
struct multipath_info *disk;
mdk_rdev_t *rdev;
if (md_check_no_bitmap(mddev))
return -EINVAL;
if (mddev->level != LEVEL_MULTIPATH) {
printk("multipath: %s: raid level not set to multipath IO (%d)\n",
mdname(mddev), mddev->level);
goto out;
}
/*
* copy the already verified devices into our private MULTIPATH
* bookkeeping area. [whatever we allocate in multipath_run(),
* should be freed in multipath_stop()]
*/
conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL);
mddev->private = conf;
if (!conf) {
printk(KERN_ERR
"multipath: couldn't allocate memory for %s\n",
mdname(mddev));
goto out;
}
conf->multipaths = kzalloc(sizeof(struct multipath_info)*mddev->raid_disks,
GFP_KERNEL);
if (!conf->multipaths) {
printk(KERN_ERR
"multipath: couldn't allocate memory for %s\n",
mdname(mddev));
goto out_free_conf;
}
conf->working_disks = 0;
list_for_each_entry(rdev, &mddev->disks, same_set) {
disk_idx = rdev->raid_disk;
if (disk_idx < 0 ||
disk_idx >= mddev->raid_disks)
continue;
disk = conf->multipaths + disk_idx;
disk->rdev = rdev;
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
* violating it, not that we ever expect a device with
* a merge_bvec_fn to be involved in multipath */
if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
blk_queue_max_segments(mddev->queue, 1);
blk_queue_segment_boundary(mddev->queue,
PAGE_CACHE_SIZE - 1);
}
if (!test_bit(Faulty, &rdev->flags))
conf->working_disks++;
}
conf->raid_disks = mddev->raid_disks;
conf->mddev = mddev;
spin_lock_init(&conf->device_lock);
INIT_LIST_HEAD(&conf->retry_list);
if (!conf->working_disks) {
printk(KERN_ERR "multipath: no operational IO paths for %s\n",
mdname(mddev));
goto out_free_conf;
}
mddev->degraded = conf->raid_disks - conf->working_disks;
conf->pool = mempool_create_kmalloc_pool(NR_RESERVED_BUFS,
sizeof(struct multipath_bh));
if (conf->pool == NULL) {
printk(KERN_ERR
"multipath: couldn't allocate memory for %s\n",
mdname(mddev));
goto out_free_conf;
}
{
mddev->thread = md_register_thread(multipathd, mddev, NULL);
if (!mddev->thread) {
printk(KERN_ERR "multipath: couldn't allocate thread"
" for %s\n", mdname(mddev));
goto out_free_conf;
}
}
printk(KERN_INFO
"multipath: array %s active with %d out of %d IO paths\n",
mdname(mddev), conf->working_disks, mddev->raid_disks);
/*
* Ok, everything is just fine now
*/
md_set_array_sectors(mddev, multipath_size(mddev, 0, 0));
mddev->queue->backing_dev_info.congested_fn = multipath_congested;
mddev->queue->backing_dev_info.congested_data = mddev;
if (md_integrity_register(mddev))
goto out_free_conf;
return 0;
out_free_conf:
if (conf->pool)
mempool_destroy(conf->pool);
kfree(conf->multipaths);
kfree(conf);
mddev->private = NULL;
out:
return -EIO;
}
static int multipath_stop (mddev_t *mddev)
{
multipath_conf_t *conf = mddev->private;
md_unregister_thread(mddev->thread);
mddev->thread = NULL;
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
mempool_destroy(conf->pool);
kfree(conf->multipaths);
kfree(conf);
mddev->private = NULL;
return 0;
}
static struct mdk_personality multipath_personality =
{
.name = "multipath",
.level = LEVEL_MULTIPATH,
.owner = THIS_MODULE,
.make_request = multipath_make_request,
.run = multipath_run,
.stop = multipath_stop,
.status = multipath_status,
.error_handler = multipath_error,
.hot_add_disk = multipath_add_disk,
.hot_remove_disk= multipath_remove_disk,
.size = multipath_size,
};
static int __init multipath_init (void)
{
return register_md_personality (&multipath_personality);
}
static void __exit multipath_exit (void)
{
unregister_md_personality (&multipath_personality);
}
module_init(multipath_init);
module_exit(multipath_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("simple multi-path personality for MD");
MODULE_ALIAS("md-personality-7"); /* MULTIPATH */
MODULE_ALIAS("md-multipath");
MODULE_ALIAS("md-level--4");
| gpl-2.0 |
virtuous/kernel-vivow-gingerbread-v2 | drivers/serial/msm_serial_hs.c | 130 | 43842 | /* drivers/serial/msm_serial_hs.c
*
* MSM 7k/8k High speed uart driver
*
* Copyright (c) 2007-2008 QUALCOMM Incorporated.
* Copyright (c) 2008 QUALCOMM USA, INC.
* Copyright (c) 2008 Google Inc.
* Modified: Nick Pelly <npelly@google.com>
*
* All source code in this file is licensed under the following license
* except where indicated.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can find it at http://www.fsf.org
*/
/*
* MSM 7k/8k High speed uart driver
*
* Has optional support for uart power management independent of linux
* suspend/resume:
*
* RX wakeup.
* UART wakeup can be triggered by RX activity (using a wakeup GPIO on the
* UART RX pin). This should only be used if there is not a wakeup
* GPIO on the UART CTS, and the first RX byte is known (for example, with the
* Bluetooth Texas Instruments HCILL protocol), since the first RX byte will
* always be lost. RTS will be asserted even while the UART is off in this mode
* of operation. See msm_serial_hs_platform_data.rx_wakeup_irq.
*/
#include <linux/module.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/wait.h>
#include <linux/wakelock.h>
#include <linux/workqueue.h>
#include <asm/atomic.h>
#include <linux/irq.h>
#include <asm/system.h>
#include <mach/hardware.h>
#include <mach/dma.h>
#include <mach/msm_serial_hs.h>
#include "msm_serial_hs_hwreg.h"
enum flush_reason {
FLUSH_NONE,
FLUSH_DATA_READY,
FLUSH_DATA_INVALID, /* values after this indicate invalid data */
FLUSH_IGNORE = FLUSH_DATA_INVALID,
FLUSH_STOP,
FLUSH_SHUTDOWN,
};
enum msm_hs_clk_states_e {
MSM_HS_CLK_PORT_OFF, /* port not in use */
MSM_HS_CLK_OFF, /* clock disabled */
MSM_HS_CLK_REQUEST_OFF, /* disable after TX and RX flushed */
MSM_HS_CLK_ON, /* clock enabled */
};
/* Track the forced RXSTALE flush during clock off sequence.
* These states are only valid during MSM_HS_CLK_REQUEST_OFF */
enum msm_hs_clk_req_off_state_e {
CLK_REQ_OFF_START,
CLK_REQ_OFF_RXSTALE_ISSUED,
CLK_REQ_OFF_FLUSH_ISSUED,
CLK_REQ_OFF_RXSTALE_FLUSHED,
};
struct msm_hs_tx {
unsigned int tx_ready_int_en; /* ok to dma more tx */
unsigned int dma_in_flight; /* tx dma in progress */
struct msm_dmov_cmd xfer;
dmov_box *command_ptr;
u32 *command_ptr_ptr;
dma_addr_t mapped_cmd_ptr;
dma_addr_t mapped_cmd_ptr_ptr;
int tx_count;
dma_addr_t dma_base;
};
struct msm_hs_rx {
enum flush_reason flush;
struct msm_dmov_cmd xfer;
dma_addr_t cmdptr_dmaaddr;
dmov_box *command_ptr;
u32 *command_ptr_ptr;
dma_addr_t mapped_cmd_ptr;
wait_queue_head_t wait;
dma_addr_t rbuffer;
unsigned char *buffer;
struct dma_pool *pool;
struct wake_lock wake_lock;
struct work_struct tty_work;
};
/* optional RX GPIO IRQ low power wakeup */
struct msm_hs_rx_wakeup {
int irq; /* < 0 indicates low power wakeup disabled */
unsigned char ignore; /* bool */
/* bool: inject char into rx tty on wakeup */
unsigned char inject_rx;
char rx_to_inject;
};
struct msm_hs_port {
struct uart_port uport;
unsigned long imr_reg; /* shadow value of UARTDM_IMR */
struct clk *clk;
struct msm_hs_tx tx;
struct msm_hs_rx rx;
int dma_tx_channel;
int dma_rx_channel;
int dma_tx_crci;
int dma_rx_crci;
struct hrtimer clk_off_timer; /* to poll TXEMT before clock off */
ktime_t clk_off_delay;
enum msm_hs_clk_states_e clk_state;
enum msm_hs_clk_req_off_state_e clk_req_off_state;
struct msm_hs_rx_wakeup rx_wakeup;
/* optional callback to exit low power mode */
void (*exit_lpm_cb)(struct uart_port *);
struct wake_lock dma_wake_lock; /* held while any DMA active */
};
#define MSM_UARTDM_BURST_SIZE 16 /* DM burst size (in bytes) */
#define UARTDM_TX_BUF_SIZE UART_XMIT_SIZE
#define UARTDM_RX_BUF_SIZE 512
#define UARTDM_NR 2
static struct msm_hs_port q_uart_port[UARTDM_NR];
static struct platform_driver msm_serial_hs_platform_driver;
static struct uart_driver msm_hs_driver;
static struct uart_ops msm_hs_ops;
static struct workqueue_struct *msm_hs_workqueue;
#define UARTDM_TO_MSM(uart_port) \
container_of((uart_port), struct msm_hs_port, uport)
static inline
unsigned int use_low_power_rx_wakeup(struct msm_hs_port *msm_uport)
{
return (msm_uport->rx_wakeup.irq >= 0);
}
static inline unsigned int msm_hs_read(struct uart_port *uport,
unsigned int offset)
{
return ioread32(uport->membase + offset);
}
static inline void msm_hs_write(struct uart_port *uport, unsigned int offset,
unsigned int value)
{
iowrite32(value, uport->membase + offset);
}
static void msm_hs_release_port(struct uart_port *port)
{
}
static int msm_hs_request_port(struct uart_port *port)
{
return 0;
}
static int __devexit msm_hs_remove(struct platform_device *pdev)
{
struct msm_hs_port *msm_uport;
struct device *dev;
if (pdev->id < 0 || pdev->id >= UARTDM_NR) {
printk(KERN_ERR "Invalid plaform device ID = %d\n", pdev->id);
return -EINVAL;
}
msm_uport = &q_uart_port[pdev->id];
dev = msm_uport->uport.dev;
dma_unmap_single(dev, msm_uport->rx.mapped_cmd_ptr, sizeof(dmov_box),
DMA_TO_DEVICE);
dma_pool_free(msm_uport->rx.pool, msm_uport->rx.buffer,
msm_uport->rx.rbuffer);
dma_pool_destroy(msm_uport->rx.pool);
dma_unmap_single(dev, msm_uport->rx.cmdptr_dmaaddr, sizeof(u32 *),
DMA_TO_DEVICE);
dma_unmap_single(dev, msm_uport->tx.mapped_cmd_ptr_ptr, sizeof(u32 *),
DMA_TO_DEVICE);
dma_unmap_single(dev, msm_uport->tx.mapped_cmd_ptr, sizeof(dmov_box),
DMA_TO_DEVICE);
wake_lock_destroy(&msm_uport->rx.wake_lock);
wake_lock_destroy(&msm_uport->dma_wake_lock);
uart_remove_one_port(&msm_hs_driver, &msm_uport->uport);
clk_put(msm_uport->clk);
/* Free the tx resources */
kfree(msm_uport->tx.command_ptr);
kfree(msm_uport->tx.command_ptr_ptr);
/* Free the rx resources */
kfree(msm_uport->rx.command_ptr);
kfree(msm_uport->rx.command_ptr_ptr);
iounmap(msm_uport->uport.membase);
return 0;
}
static int msm_hs_init_clk_locked(struct uart_port *uport)
{
int ret;
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
wake_lock(&msm_uport->dma_wake_lock);
ret = clk_enable(msm_uport->clk);
if (ret) {
printk(KERN_ERR "Error could not turn on UART clk\n");
return ret;
}
/* Set up the MREG/NREG/DREG/MNDREG */
ret = clk_set_rate(msm_uport->clk, uport->uartclk);
if (ret) {
printk(KERN_WARNING "Error setting clock rate on UART\n");
return ret;
}
msm_uport->clk_state = MSM_HS_CLK_ON;
return 0;
}
/* Enable and Disable clocks (Used for power management) */
static void msm_hs_pm(struct uart_port *uport, unsigned int state,
unsigned int oldstate)
{
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
if (use_low_power_rx_wakeup(msm_uport) || msm_uport->exit_lpm_cb)
return; /* ignore linux PM states, use msm_hs_request_clock API */
switch (state) {
case 0:
clk_enable(msm_uport->clk);
break;
case 3:
clk_disable(msm_uport->clk);
break;
default:
printk(KERN_ERR "msm_serial: Unknown PM state %d\n", state);
}
}
/*
* programs the UARTDM_CSR register with correct bit rates
*
* Interrupts should be disabled before we are called, as
* we modify Set Baud rate
* Set receive stale interrupt level, dependant on Bit Rate
* Goal is to have around 8 ms before indicate stale.
* roundup (((Bit Rate * .008) / 10) + 1
*/
static void msm_hs_set_bps_locked(struct uart_port *uport,
unsigned int bps)
{
unsigned long rxstale;
unsigned long data;
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
switch (bps) {
case 300:
msm_hs_write(uport, UARTDM_CSR_ADDR, 0x00);
rxstale = 1;
break;
case 600:
msm_hs_write(uport, UARTDM_CSR_ADDR, 0x11);
rxstale = 1;
break;
case 1200:
msm_hs_write(uport, UARTDM_CSR_ADDR, 0x22);
rxstale = 1;
break;
case 2400:
msm_hs_write(uport, UARTDM_CSR_ADDR, 0x33);
rxstale = 1;
break;
case 4800:
msm_hs_write(uport, UARTDM_CSR_ADDR, 0x44);
rxstale = 1;
break;
case 9600:
msm_hs_write(uport, UARTDM_CSR_ADDR, 0x55);
rxstale = 2;
break;
case 14400:
msm_hs_write(uport, UARTDM_CSR_ADDR, 0x66);
rxstale = 3;
break;
case 19200:
msm_hs_write(uport, UARTDM_CSR_ADDR, 0x77);
rxstale = 4;
break;
case 28800:
msm_hs_write(uport, UARTDM_CSR_ADDR, 0x88);
rxstale = 6;
break;
case 38400:
msm_hs_write(uport, UARTDM_CSR_ADDR, 0x99);
rxstale = 8;
break;
case 57600:
msm_hs_write(uport, UARTDM_CSR_ADDR, 0xaa);
rxstale = 16;
break;
case 76800:
msm_hs_write(uport, UARTDM_CSR_ADDR, 0xbb);
rxstale = 16;
break;
case 115200:
msm_hs_write(uport, UARTDM_CSR_ADDR, 0xcc);
rxstale = 31;
break;
case 230400:
msm_hs_write(uport, UARTDM_CSR_ADDR, 0xee);
rxstale = 31;
break;
case 460800:
msm_hs_write(uport, UARTDM_CSR_ADDR, 0xff);
rxstale = 31;
break;
case 4000000:
case 3686400:
case 3200000:
case 3500000:
case 3000000:
case 2500000:
case 1500000:
case 1152000:
case 1000000:
case 921600:
msm_hs_write(uport, UARTDM_CSR_ADDR, 0xff);
rxstale = 31;
break;
default:
msm_hs_write(uport, UARTDM_CSR_ADDR, 0xff);
/* default to 9600 */
bps = 9600;
rxstale = 2;
break;
}
if (bps > 460800)
uport->uartclk = bps * 16;
else
uport->uartclk = 7372800;
if (clk_set_rate(msm_uport->clk, uport->uartclk)) {
printk(KERN_WARNING "Error setting clock rate on UART\n");
return;
}
data = rxstale & UARTDM_IPR_STALE_LSB_BMSK;
data |= UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK & (rxstale << 2);
msm_hs_write(uport, UARTDM_IPR_ADDR, data);
}
/*
* termios : new ktermios
* oldtermios: old ktermios previous setting
*
* Configure the serial port
*/
static void msm_hs_set_termios(struct uart_port *uport,
struct ktermios *termios,
struct ktermios *oldtermios)
{
unsigned int bps;
unsigned long data;
unsigned long flags;
unsigned int c_cflag = termios->c_cflag;
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
spin_lock_irqsave(&uport->lock, flags);
clk_enable(msm_uport->clk);
/* 300 is the minimum baud support by the driver */
bps = uart_get_baud_rate(uport, termios, oldtermios, 200, 4000000);
/* Temporary remapping 200 BAUD to 3.2 mbps */
if (bps == 200)
bps = 3200000;
msm_hs_set_bps_locked(uport, bps);
data = msm_hs_read(uport, UARTDM_MR2_ADDR);
data &= ~UARTDM_MR2_PARITY_MODE_BMSK;
/* set parity */
if (PARENB == (c_cflag & PARENB)) {
if (PARODD == (c_cflag & PARODD))
data |= ODD_PARITY;
else if (CMSPAR == (c_cflag & CMSPAR))
data |= SPACE_PARITY;
else
data |= EVEN_PARITY;
}
/* Set bits per char */
data &= ~UARTDM_MR2_BITS_PER_CHAR_BMSK;
switch (c_cflag & CSIZE) {
case CS5:
data |= FIVE_BPC;
break;
case CS6:
data |= SIX_BPC;
break;
case CS7:
data |= SEVEN_BPC;
break;
default:
data |= EIGHT_BPC;
break;
}
/* stop bits */
if (c_cflag & CSTOPB) {
data |= STOP_BIT_TWO;
} else {
/* otherwise 1 stop bit */
data |= STOP_BIT_ONE;
}
data |= UARTDM_MR2_ERROR_MODE_BMSK;
/* write parity/bits per char/stop bit configuration */
msm_hs_write(uport, UARTDM_MR2_ADDR, data);
/* Configure HW flow control */
data = msm_hs_read(uport, UARTDM_MR1_ADDR);
data &= ~(UARTDM_MR1_CTS_CTL_BMSK | UARTDM_MR1_RX_RDY_CTL_BMSK);
if (c_cflag & CRTSCTS) {
data |= UARTDM_MR1_CTS_CTL_BMSK;
data |= UARTDM_MR1_RX_RDY_CTL_BMSK;
}
msm_hs_write(uport, UARTDM_MR1_ADDR, data);
uport->ignore_status_mask = termios->c_iflag & INPCK;
uport->ignore_status_mask |= termios->c_iflag & IGNPAR;
uport->read_status_mask = (termios->c_cflag & CREAD);
msm_hs_write(uport, UARTDM_IMR_ADDR, 0);
/* Set Transmit software time out */
uart_update_timeout(uport, c_cflag, bps);
msm_hs_write(uport, UARTDM_CR_ADDR, RESET_RX);
msm_hs_write(uport, UARTDM_CR_ADDR, RESET_TX);
if (msm_uport->rx.flush == FLUSH_NONE) {
wake_lock(&msm_uport->rx.wake_lock);
msm_uport->rx.flush = FLUSH_IGNORE;
msm_dmov_flush(msm_uport->dma_rx_channel);
}
msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
clk_disable(msm_uport->clk);
spin_unlock_irqrestore(&uport->lock, flags);
}
/*
* Standard API, Transmitter
* Any character in the transmit shift register is sent
*/
static unsigned int msm_hs_tx_empty(struct uart_port *uport)
{
unsigned int data;
unsigned int ret = 0;
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
clk_enable(msm_uport->clk);
data = msm_hs_read(uport, UARTDM_SR_ADDR);
if (data & UARTDM_SR_TXEMT_BMSK)
ret = TIOCSER_TEMT;
clk_disable(msm_uport->clk);
return ret;
}
/*
* Standard API, Stop transmitter.
* Any character in the transmit shift register is sent as
* well as the current data mover transfer .
*/
static void msm_hs_stop_tx_locked(struct uart_port *uport)
{
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
msm_uport->tx.tx_ready_int_en = 0;
}
/*
* Standard API, Stop receiver as soon as possible.
*
* Function immediately terminates the operation of the
* channel receiver and any incoming characters are lost. None
* of the receiver status bits are affected by this command and
* characters that are already in the receive FIFO there.
*/
static void msm_hs_stop_rx_locked(struct uart_port *uport)
{
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
unsigned int data;
clk_enable(msm_uport->clk);
/* disable dlink */
data = msm_hs_read(uport, UARTDM_DMEN_ADDR);
data &= ~UARTDM_RX_DM_EN_BMSK;
msm_hs_write(uport, UARTDM_DMEN_ADDR, data);
/* Disable the receiver */
if (msm_uport->rx.flush == FLUSH_NONE) {
wake_lock(&msm_uport->rx.wake_lock);
msm_dmov_flush(msm_uport->dma_rx_channel);
}
if (msm_uport->rx.flush != FLUSH_SHUTDOWN)
msm_uport->rx.flush = FLUSH_STOP;
clk_disable(msm_uport->clk);
}
/* Transmit the next chunk of data */
static void msm_hs_submit_tx_locked(struct uart_port *uport)
{
int left;
int tx_count;
dma_addr_t src_addr;
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
struct msm_hs_tx *tx = &msm_uport->tx;
struct circ_buf *tx_buf = &msm_uport->uport.state->xmit;
if (uart_circ_empty(tx_buf) || uport->state->port.tty->stopped) {
msm_hs_stop_tx_locked(uport);
return;
}
tx->dma_in_flight = 1;
tx_count = uart_circ_chars_pending(tx_buf);
if (UARTDM_TX_BUF_SIZE < tx_count)
tx_count = UARTDM_TX_BUF_SIZE;
left = UART_XMIT_SIZE - tx_buf->tail;
if (tx_count > left)
tx_count = left;
src_addr = tx->dma_base + tx_buf->tail;
dma_sync_single_for_device(uport->dev, src_addr, tx_count,
DMA_TO_DEVICE);
tx->command_ptr->num_rows = (((tx_count + 15) >> 4) << 16) |
((tx_count + 15) >> 4);
tx->command_ptr->src_row_addr = src_addr;
dma_sync_single_for_device(uport->dev, tx->mapped_cmd_ptr,
sizeof(dmov_box), DMA_TO_DEVICE);
*tx->command_ptr_ptr = CMD_PTR_LP | DMOV_CMD_ADDR(tx->mapped_cmd_ptr);
dma_sync_single_for_device(uport->dev, tx->mapped_cmd_ptr_ptr,
sizeof(u32 *), DMA_TO_DEVICE);
/* Save tx_count to use in Callback */
tx->tx_count = tx_count;
msm_hs_write(uport, UARTDM_NCF_TX_ADDR, tx_count);
/* Disable the tx_ready interrupt */
msm_uport->imr_reg &= ~UARTDM_ISR_TX_READY_BMSK;
msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
msm_dmov_enqueue_cmd(msm_uport->dma_tx_channel, &tx->xfer);
}
/* Start to receive the next chunk of data */
static void msm_hs_start_rx_locked(struct uart_port *uport)
{
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
msm_hs_write(uport, UARTDM_CR_ADDR, RESET_STALE_INT);
msm_hs_write(uport, UARTDM_DMRX_ADDR, UARTDM_RX_BUF_SIZE);
msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_ENABLE);
msm_uport->imr_reg |= UARTDM_ISR_RXLEV_BMSK;
msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
msm_uport->rx.flush = FLUSH_NONE;
msm_dmov_enqueue_cmd(msm_uport->dma_rx_channel, &msm_uport->rx.xfer);
/* might have finished RX and be ready to clock off */
hrtimer_start(&msm_uport->clk_off_timer, msm_uport->clk_off_delay,
HRTIMER_MODE_REL);
}
/* Enable the transmitter Interrupt */
static void msm_hs_start_tx_locked(struct uart_port *uport )
{
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
clk_enable(msm_uport->clk);
if (msm_uport->exit_lpm_cb)
msm_uport->exit_lpm_cb(uport);
if (msm_uport->tx.tx_ready_int_en == 0) {
msm_uport->tx.tx_ready_int_en = 1;
msm_hs_submit_tx_locked(uport);
}
clk_disable(msm_uport->clk);
}
/*
* This routine is called when we are done with a DMA transfer
*
* This routine is registered with Data mover when we set
* up a Data Mover transfer. It is called from Data mover ISR
* when the DMA transfer is done.
*/
static void msm_hs_dmov_tx_callback(struct msm_dmov_cmd *cmd_ptr,
unsigned int result,
struct msm_dmov_errdata *err)
{
unsigned long flags;
struct msm_hs_port *msm_uport;
WARN_ON(result != 0x80000002); /* DMA did not finish properly */
msm_uport = container_of(cmd_ptr, struct msm_hs_port, tx.xfer);
spin_lock_irqsave(&msm_uport->uport.lock, flags);
clk_enable(msm_uport->clk);
msm_uport->imr_reg |= UARTDM_ISR_TX_READY_BMSK;
msm_hs_write(&msm_uport->uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
clk_disable(msm_uport->clk);
spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
}
/*
* This routine is called when we are done with a DMA transfer or the
* a flush has been sent to the data mover driver.
*
* This routine is registered with Data mover when we set up a Data Mover
* transfer. It is called from Data mover ISR when the DMA transfer is done.
*/
static void msm_hs_dmov_rx_callback(struct msm_dmov_cmd *cmd_ptr,
unsigned int result,
struct msm_dmov_errdata *err)
{
int retval;
int rx_count;
unsigned long status;
unsigned int error_f = 0;
unsigned long flags;
unsigned int flush;
struct tty_struct *tty;
struct uart_port *uport;
struct msm_hs_port *msm_uport;
msm_uport = container_of(cmd_ptr, struct msm_hs_port, rx.xfer);
uport = &msm_uport->uport;
spin_lock_irqsave(&uport->lock, flags);
clk_enable(msm_uport->clk);
tty = uport->state->port.tty;
msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_DISABLE);
status = msm_hs_read(uport, UARTDM_SR_ADDR);
/* overflow is not connect to data in a FIFO */
if (unlikely((status & UARTDM_SR_OVERRUN_BMSK) &&
(uport->read_status_mask & CREAD))) {
tty_insert_flip_char(tty, 0, TTY_OVERRUN);
uport->icount.buf_overrun++;
error_f = 1;
}
if (!(uport->ignore_status_mask & INPCK))
status = status & ~(UARTDM_SR_PAR_FRAME_BMSK);
if (unlikely(status & UARTDM_SR_PAR_FRAME_BMSK)) {
/* Can not tell difference between parity & frame error */
uport->icount.parity++;
error_f = 1;
if (uport->ignore_status_mask & IGNPAR)
tty_insert_flip_char(tty, 0, TTY_PARITY);
}
if (error_f)
msm_hs_write(uport, UARTDM_CR_ADDR, RESET_ERROR_STATUS);
if (msm_uport->clk_req_off_state == CLK_REQ_OFF_FLUSH_ISSUED)
msm_uport->clk_req_off_state = CLK_REQ_OFF_RXSTALE_FLUSHED;
flush = msm_uport->rx.flush;
if (flush == FLUSH_IGNORE)
msm_hs_start_rx_locked(uport);
if (flush == FLUSH_STOP)
msm_uport->rx.flush = FLUSH_SHUTDOWN;
if (flush >= FLUSH_DATA_INVALID)
goto out;
rx_count = msm_hs_read(uport, UARTDM_RX_TOTAL_SNAP_ADDR);
if (0 != (uport->read_status_mask & CREAD)) {
retval = tty_insert_flip_string(tty, msm_uport->rx.buffer,
rx_count);
BUG_ON(retval != rx_count);
}
msm_hs_start_rx_locked(uport);
out:
clk_disable(msm_uport->clk);
/* release wakelock in 500ms, not immediately, because higher layers
* don't always take wakelocks when they should */
wake_lock_timeout(&msm_uport->rx.wake_lock, HZ / 2);
spin_unlock_irqrestore(&uport->lock, flags);
if (flush < FLUSH_DATA_INVALID)
queue_work(msm_hs_workqueue, &msm_uport->rx.tty_work);
}
static void msm_hs_tty_flip_buffer_work(struct work_struct *work)
{
struct msm_hs_port *msm_uport =
container_of(work, struct msm_hs_port, rx.tty_work);
struct tty_struct *tty = msm_uport->uport.state->port.tty;
tty_flip_buffer_push(tty);
}
/*
* Standard API, Current states of modem control inputs
*
* Since CTS can be handled entirely by HARDWARE we always
* indicate clear to send and count on the TX FIFO to block when
* it fills up.
*
* - TIOCM_DCD
* - TIOCM_CTS
* - TIOCM_DSR
* - TIOCM_RI
* (Unsupported) DCD and DSR will return them high. RI will return low.
*/
static unsigned int msm_hs_get_mctrl_locked(struct uart_port *uport)
{
return TIOCM_DSR | TIOCM_CAR | TIOCM_CTS;
}
/*
* True enables UART auto RFR, which indicates we are ready for data if the RX
* buffer is not full. False disables auto RFR, and deasserts RFR to indicate
* we are not ready for data. Must be called with UART clock on.
*/
static void set_rfr_locked(struct uart_port *uport, int auto_rfr)
{
unsigned int data;
data = msm_hs_read(uport, UARTDM_MR1_ADDR);
if (auto_rfr) {
/* enable auto ready-for-receiving */
data |= UARTDM_MR1_RX_RDY_CTL_BMSK;
msm_hs_write(uport, UARTDM_MR1_ADDR, data);
} else {
/* disable auto ready-for-receiving */
data &= ~UARTDM_MR1_RX_RDY_CTL_BMSK;
msm_hs_write(uport, UARTDM_MR1_ADDR, data);
/* RFR is active low, set high */
msm_hs_write(uport, UARTDM_CR_ADDR, RFR_HIGH);
}
}
/*
* Standard API, used to set or clear RFR
*/
static void msm_hs_set_mctrl_locked(struct uart_port *uport,
unsigned int mctrl)
{
unsigned int auto_rfr;
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
clk_enable(msm_uport->clk);
auto_rfr = TIOCM_RTS & mctrl ? 1 : 0;
set_rfr_locked(uport, auto_rfr);
clk_disable(msm_uport->clk);
}
/* Standard API, Enable modem status (CTS) interrupt */
static void msm_hs_enable_ms_locked(struct uart_port *uport)
{
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
clk_enable(msm_uport->clk);
/* Enable DELTA_CTS Interrupt */
msm_uport->imr_reg |= UARTDM_ISR_DELTA_CTS_BMSK;
msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
clk_disable(msm_uport->clk);
}
/*
* Standard API, Break Signal
*
* Control the transmission of a break signal. ctl eq 0 => break
* signal terminate ctl ne 0 => start break signal
*/
static void msm_hs_break_ctl(struct uart_port *uport, int ctl)
{
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
clk_enable(msm_uport->clk);
msm_hs_write(uport, UARTDM_CR_ADDR, ctl ? START_BREAK : STOP_BREAK);
clk_disable(msm_uport->clk);
}
static void msm_hs_config_port(struct uart_port *uport, int cfg_flags)
{
unsigned long flags;
spin_lock_irqsave(&uport->lock, flags);
if (cfg_flags & UART_CONFIG_TYPE) {
uport->type = PORT_MSM;
msm_hs_request_port(uport);
}
spin_unlock_irqrestore(&uport->lock, flags);
}
/* Handle CTS changes (Called from interrupt handler) */
static void msm_hs_handle_delta_cts(struct uart_port *uport)
{
unsigned long flags;
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
spin_lock_irqsave(&uport->lock, flags);
clk_enable(msm_uport->clk);
/* clear interrupt */
msm_hs_write(uport, UARTDM_CR_ADDR, RESET_CTS);
uport->icount.cts++;
clk_disable(msm_uport->clk);
spin_unlock_irqrestore(&uport->lock, flags);
/* clear the IOCTL TIOCMIWAIT if called */
wake_up_interruptible(&uport->state->port.delta_msr_wait);
}
/* check if the TX path is flushed, and if so clock off
* returns 0 did not clock off, need to retry (still sending final byte)
* -1 did not clock off, do not retry
* 1 if we clocked off
*/
static int msm_hs_check_clock_off_locked(struct uart_port *uport)
{
unsigned long sr_status;
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
struct circ_buf *tx_buf = &uport->state->xmit;
/* Cancel if tx tty buffer is not empty, dma is in flight,
* or tx fifo is not empty, or rx fifo is not empty */
if (msm_uport->clk_state != MSM_HS_CLK_REQUEST_OFF ||
!uart_circ_empty(tx_buf) || msm_uport->tx.dma_in_flight ||
(msm_uport->imr_reg & UARTDM_ISR_TXLEV_BMSK) ||
!(msm_uport->imr_reg & UARTDM_ISR_RXLEV_BMSK)) {
return -1;
}
/* Make sure the uart is finished with the last byte */
sr_status = msm_hs_read(uport, UARTDM_SR_ADDR);
if (!(sr_status & UARTDM_SR_TXEMT_BMSK))
return 0; /* retry */
/* Make sure forced RXSTALE flush complete */
switch (msm_uport->clk_req_off_state) {
case CLK_REQ_OFF_START:
msm_uport->clk_req_off_state = CLK_REQ_OFF_RXSTALE_ISSUED;
msm_hs_write(uport, UARTDM_CR_ADDR, FORCE_STALE_EVENT);
return 0; /* RXSTALE flush not complete - retry */
case CLK_REQ_OFF_RXSTALE_ISSUED:
case CLK_REQ_OFF_FLUSH_ISSUED:
return 0; /* RXSTALE flush not complete - retry */
case CLK_REQ_OFF_RXSTALE_FLUSHED:
break; /* continue */
}
if (msm_uport->rx.flush != FLUSH_SHUTDOWN) {
if (msm_uport->rx.flush == FLUSH_NONE)
msm_hs_stop_rx_locked(uport);
return 0; /* come back later to really clock off */
}
/* we really want to clock off */
clk_disable(msm_uport->clk);
msm_uport->clk_state = MSM_HS_CLK_OFF;
wake_unlock(&msm_uport->dma_wake_lock);
if (use_low_power_rx_wakeup(msm_uport)) {
msm_uport->rx_wakeup.ignore = 1;
enable_irq(msm_uport->rx_wakeup.irq);
}
return 1;
}
static enum hrtimer_restart msm_hs_clk_off_retry(struct hrtimer *timer)
{
unsigned long flags;
int ret = HRTIMER_NORESTART;
struct msm_hs_port *msm_uport = container_of(timer, struct msm_hs_port,
clk_off_timer);
struct uart_port *uport = &msm_uport->uport;
spin_lock_irqsave(&uport->lock, flags);
if (!msm_hs_check_clock_off_locked(uport)) {
hrtimer_forward_now(timer, msm_uport->clk_off_delay);
ret = HRTIMER_RESTART;
}
spin_unlock_irqrestore(&uport->lock, flags);
return ret;
}
static irqreturn_t msm_hs_isr(int irq, void *dev)
{
unsigned long flags;
unsigned long isr_status;
struct msm_hs_port *msm_uport = (struct msm_hs_port *)dev;
struct uart_port *uport = &msm_uport->uport;
struct circ_buf *tx_buf = &uport->state->xmit;
struct msm_hs_tx *tx = &msm_uport->tx;
struct msm_hs_rx *rx = &msm_uport->rx;
spin_lock_irqsave(&uport->lock, flags);
isr_status = msm_hs_read(uport, UARTDM_MISR_ADDR);
/* Uart RX starting */
if (isr_status & UARTDM_ISR_RXLEV_BMSK) {
wake_lock(&rx->wake_lock); /* hold wakelock while rx dma */
msm_uport->imr_reg &= ~UARTDM_ISR_RXLEV_BMSK;
msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
}
/* Stale rx interrupt */
if (isr_status & UARTDM_ISR_RXSTALE_BMSK) {
msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_DISABLE);
msm_hs_write(uport, UARTDM_CR_ADDR, RESET_STALE_INT);
if (msm_uport->clk_req_off_state == CLK_REQ_OFF_RXSTALE_ISSUED)
msm_uport->clk_req_off_state =
CLK_REQ_OFF_FLUSH_ISSUED;
if (rx->flush == FLUSH_NONE) {
rx->flush = FLUSH_DATA_READY;
msm_dmov_flush(msm_uport->dma_rx_channel);
}
}
/* tx ready interrupt */
if (isr_status & UARTDM_ISR_TX_READY_BMSK) {
/* Clear TX Ready */
msm_hs_write(uport, UARTDM_CR_ADDR, CLEAR_TX_READY);
if (msm_uport->clk_state == MSM_HS_CLK_REQUEST_OFF) {
msm_uport->imr_reg |= UARTDM_ISR_TXLEV_BMSK;
msm_hs_write(uport, UARTDM_IMR_ADDR,
msm_uport->imr_reg);
}
/* Complete DMA TX transactions and submit new transactions */
tx_buf->tail = (tx_buf->tail + tx->tx_count) & ~UART_XMIT_SIZE;
tx->dma_in_flight = 0;
uport->icount.tx += tx->tx_count;
if (tx->tx_ready_int_en)
msm_hs_submit_tx_locked(uport);
if (uart_circ_chars_pending(tx_buf) < WAKEUP_CHARS)
uart_write_wakeup(uport);
}
if (isr_status & UARTDM_ISR_TXLEV_BMSK) {
/* TX FIFO is empty */
msm_uport->imr_reg &= ~UARTDM_ISR_TXLEV_BMSK;
msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
if (!msm_hs_check_clock_off_locked(uport))
hrtimer_start(&msm_uport->clk_off_timer,
msm_uport->clk_off_delay,
HRTIMER_MODE_REL);
}
/* Change in CTS interrupt */
if (isr_status & UARTDM_ISR_DELTA_CTS_BMSK)
msm_hs_handle_delta_cts(uport);
spin_unlock_irqrestore(&uport->lock, flags);
return IRQ_HANDLED;
}
void msm_hs_request_clock_off_locked(struct uart_port *uport)
{
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
if (msm_uport->clk_state == MSM_HS_CLK_ON) {
msm_uport->clk_state = MSM_HS_CLK_REQUEST_OFF;
msm_uport->clk_req_off_state = CLK_REQ_OFF_START;
if (!use_low_power_rx_wakeup(msm_uport))
set_rfr_locked(uport, 0);
msm_uport->imr_reg |= UARTDM_ISR_TXLEV_BMSK;
msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
}
}
EXPORT_SYMBOL(msm_hs_request_clock_off_locked);
/* request to turn off uart clock once pending TX is flushed */
void msm_hs_request_clock_off(struct uart_port *uport)
{
unsigned long flags;
spin_lock_irqsave(&uport->lock, flags);
msm_hs_request_clock_off_locked(uport);
spin_unlock_irqrestore(&uport->lock, flags);
}
EXPORT_SYMBOL(msm_hs_request_clock_off);
void msm_hs_request_clock_on_locked(struct uart_port *uport)
{
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
unsigned int data;
switch (msm_uport->clk_state) {
case MSM_HS_CLK_OFF:
wake_lock(&msm_uport->dma_wake_lock);
clk_enable(msm_uport->clk);
disable_irq_nosync(msm_uport->rx_wakeup.irq);
/* fall-through */
case MSM_HS_CLK_REQUEST_OFF:
if (msm_uport->rx.flush == FLUSH_STOP ||
msm_uport->rx.flush == FLUSH_SHUTDOWN) {
msm_hs_write(uport, UARTDM_CR_ADDR, RESET_RX);
data = msm_hs_read(uport, UARTDM_DMEN_ADDR);
data |= UARTDM_RX_DM_EN_BMSK;
msm_hs_write(uport, UARTDM_DMEN_ADDR, data);
}
hrtimer_try_to_cancel(&msm_uport->clk_off_timer);
if (msm_uport->rx.flush == FLUSH_SHUTDOWN)
msm_hs_start_rx_locked(uport);
if (!use_low_power_rx_wakeup(msm_uport))
set_rfr_locked(uport, 1);
if (msm_uport->rx.flush == FLUSH_STOP)
msm_uport->rx.flush = FLUSH_IGNORE;
msm_uport->clk_state = MSM_HS_CLK_ON;
break;
case MSM_HS_CLK_ON: break;
case MSM_HS_CLK_PORT_OFF: break;
}
}
EXPORT_SYMBOL(msm_hs_request_clock_on_locked);
void msm_hs_request_clock_on(struct uart_port *uport)
{
unsigned long flags;
spin_lock_irqsave(&uport->lock, flags);
msm_hs_request_clock_on_locked(uport);
spin_unlock_irqrestore(&uport->lock, flags);
}
EXPORT_SYMBOL(msm_hs_request_clock_on);
static irqreturn_t msm_hs_rx_wakeup_isr(int irq, void *dev)
{
unsigned int wakeup = 0;
unsigned long flags;
struct msm_hs_port *msm_uport = (struct msm_hs_port *)dev;
struct uart_port *uport = &msm_uport->uport;
struct tty_struct *tty = NULL;
spin_lock_irqsave(&uport->lock, flags);
if (msm_uport->clk_state == MSM_HS_CLK_OFF) {
/* ignore the first irq - it is a pending irq that occured
* before enable_irq() */
if (msm_uport->rx_wakeup.ignore)
msm_uport->rx_wakeup.ignore = 0;
else
wakeup = 1;
}
if (wakeup) {
/* the uart was clocked off during an rx, wake up and
* optionally inject char into tty rx */
msm_hs_request_clock_on_locked(uport);
if (msm_uport->rx_wakeup.inject_rx) {
tty = uport->state->port.tty;
tty_insert_flip_char(tty,
msm_uport->rx_wakeup.rx_to_inject,
TTY_NORMAL);
queue_work(msm_hs_workqueue, &msm_uport->rx.tty_work);
}
}
spin_unlock_irqrestore(&uport->lock, flags);
return IRQ_HANDLED;
}
static const char *msm_hs_type(struct uart_port *port)
{
return ("MSM HS UART");
}
/* Called when port is opened */
static int msm_hs_startup(struct uart_port *uport)
{
int ret;
int rfr_level;
unsigned long flags;
unsigned int data;
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
struct circ_buf *tx_buf = &uport->state->xmit;
struct msm_hs_tx *tx = &msm_uport->tx;
struct msm_hs_rx *rx = &msm_uport->rx;
rfr_level = uport->fifosize;
if (rfr_level > 16)
rfr_level -= 16;
tx->dma_base = dma_map_single(uport->dev, tx_buf->buf, UART_XMIT_SIZE,
DMA_TO_DEVICE);
/* do not let tty layer execute RX in global workqueue, use a
* dedicated workqueue managed by this driver */
uport->state->port.tty->low_latency = 1;
/* turn on uart clk */
ret = msm_hs_init_clk_locked(uport);
if (unlikely(ret))
return ret;
/* Set auto RFR Level */
data = msm_hs_read(uport, UARTDM_MR1_ADDR);
data &= ~UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK;
data &= ~UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK;
data |= (UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK & (rfr_level << 2));
data |= (UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK & rfr_level);
msm_hs_write(uport, UARTDM_MR1_ADDR, data);
/* Make sure RXSTALE count is non-zero */
data = msm_hs_read(uport, UARTDM_IPR_ADDR);
if (!data) {
data |= 0x1f & UARTDM_IPR_STALE_LSB_BMSK;
msm_hs_write(uport, UARTDM_IPR_ADDR, data);
}
/* Enable Data Mover Mode */
data = UARTDM_TX_DM_EN_BMSK | UARTDM_RX_DM_EN_BMSK;
msm_hs_write(uport, UARTDM_DMEN_ADDR, data);
/* Reset TX */
msm_hs_write(uport, UARTDM_CR_ADDR, RESET_TX);
msm_hs_write(uport, UARTDM_CR_ADDR, RESET_RX);
msm_hs_write(uport, UARTDM_CR_ADDR, RESET_ERROR_STATUS);
msm_hs_write(uport, UARTDM_CR_ADDR, RESET_BREAK_INT);
msm_hs_write(uport, UARTDM_CR_ADDR, RESET_STALE_INT);
msm_hs_write(uport, UARTDM_CR_ADDR, RESET_CTS);
msm_hs_write(uport, UARTDM_CR_ADDR, RFR_LOW);
/* Turn on Uart Receiver */
msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_RX_EN_BMSK);
/* Turn on Uart Transmitter */
msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_TX_EN_BMSK);
/* Initialize the tx */
tx->tx_ready_int_en = 0;
tx->dma_in_flight = 0;
tx->xfer.complete_func = msm_hs_dmov_tx_callback;
tx->xfer.exec_func = NULL;
tx->command_ptr->cmd = CMD_LC |
CMD_DST_CRCI(msm_uport->dma_tx_crci) | CMD_MODE_BOX;
tx->command_ptr->src_dst_len = (MSM_UARTDM_BURST_SIZE << 16)
| (MSM_UARTDM_BURST_SIZE);
tx->command_ptr->row_offset = (MSM_UARTDM_BURST_SIZE << 16);
tx->command_ptr->dst_row_addr =
msm_uport->uport.mapbase + UARTDM_TF_ADDR;
/* Turn on Uart Receive */
rx->xfer.complete_func = msm_hs_dmov_rx_callback;
rx->xfer.exec_func = NULL;
rx->command_ptr->cmd = CMD_LC |
CMD_SRC_CRCI(msm_uport->dma_rx_crci) | CMD_MODE_BOX;
rx->command_ptr->src_dst_len = (MSM_UARTDM_BURST_SIZE << 16)
| (MSM_UARTDM_BURST_SIZE);
rx->command_ptr->row_offset = MSM_UARTDM_BURST_SIZE;
rx->command_ptr->src_row_addr = uport->mapbase + UARTDM_RF_ADDR;
msm_uport->imr_reg |= UARTDM_ISR_RXSTALE_BMSK;
/* Enable reading the current CTS, no harm even if CTS is ignored */
msm_uport->imr_reg |= UARTDM_ISR_CURRENT_CTS_BMSK;
msm_hs_write(uport, UARTDM_TFWR_ADDR, 0); /* TXLEV on empty TX fifo */
ret = request_irq(uport->irq, msm_hs_isr, IRQF_TRIGGER_HIGH,
"msm_hs_uart", msm_uport);
if (unlikely(ret))
return ret;
if (use_low_power_rx_wakeup(msm_uport)) {
/* move from startup **/
if (unlikely(set_irq_wake(msm_uport->rx_wakeup.irq, 1)))
return -ENXIO;
ret = request_irq(msm_uport->rx_wakeup.irq,
msm_hs_rx_wakeup_isr,
IRQF_TRIGGER_FALLING,
"msm_hs_rx_wakeup", msm_uport);
if (unlikely(ret))
return ret;
disable_irq(msm_uport->rx_wakeup.irq);
}
spin_lock_irqsave(&uport->lock, flags);
msm_hs_write(uport, UARTDM_RFWR_ADDR, 0);
msm_hs_start_rx_locked(uport);
spin_unlock_irqrestore(&uport->lock, flags);
return 0;
}
/* Initialize tx and rx data structures */
static int uartdm_init_port(struct uart_port *uport)
{
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
struct msm_hs_tx *tx = &msm_uport->tx;
struct msm_hs_rx *rx = &msm_uport->rx;
/* Allocate the command pointer. Needs to be 64 bit aligned */
tx->command_ptr = kmalloc(sizeof(dmov_box), GFP_KERNEL | __GFP_DMA);
tx->command_ptr_ptr = kmalloc(sizeof(u32 *), GFP_KERNEL | __GFP_DMA);
if (!tx->command_ptr || !tx->command_ptr_ptr)
return -ENOMEM;
tx->mapped_cmd_ptr = dma_map_single(uport->dev, tx->command_ptr,
sizeof(dmov_box), DMA_TO_DEVICE);
tx->mapped_cmd_ptr_ptr = dma_map_single(uport->dev,
tx->command_ptr_ptr,
sizeof(u32 *), DMA_TO_DEVICE);
tx->xfer.cmdptr = DMOV_CMD_ADDR(tx->mapped_cmd_ptr_ptr);
init_waitqueue_head(&rx->wait);
wake_lock_init(&rx->wake_lock, WAKE_LOCK_SUSPEND, "msm_serial_hs_rx");
wake_lock_init(&msm_uport->dma_wake_lock, WAKE_LOCK_SUSPEND,
"msm_serial_hs_dma");
rx->pool = dma_pool_create("rx_buffer_pool", uport->dev,
UARTDM_RX_BUF_SIZE, 16, 0);
rx->buffer = dma_pool_alloc(rx->pool, GFP_KERNEL, &rx->rbuffer);
/* Allocate the command pointer. Needs to be 64 bit aligned */
rx->command_ptr = kmalloc(sizeof(dmov_box), GFP_KERNEL | __GFP_DMA);
rx->command_ptr_ptr = kmalloc(sizeof(u32 *), GFP_KERNEL | __GFP_DMA);
if (!rx->command_ptr || !rx->command_ptr_ptr || !rx->pool ||
!rx->buffer)
return -ENOMEM;
rx->command_ptr->num_rows = ((UARTDM_RX_BUF_SIZE >> 4) << 16) |
(UARTDM_RX_BUF_SIZE >> 4);
rx->command_ptr->dst_row_addr = rx->rbuffer;
rx->mapped_cmd_ptr = dma_map_single(uport->dev, rx->command_ptr,
sizeof(dmov_box), DMA_TO_DEVICE);
*rx->command_ptr_ptr = CMD_PTR_LP | DMOV_CMD_ADDR(rx->mapped_cmd_ptr);
rx->cmdptr_dmaaddr = dma_map_single(uport->dev, rx->command_ptr_ptr,
sizeof(u32 *), DMA_TO_DEVICE);
rx->xfer.cmdptr = DMOV_CMD_ADDR(rx->cmdptr_dmaaddr);
INIT_WORK(&rx->tty_work, msm_hs_tty_flip_buffer_work);
return 0;
}
static int msm_hs_probe(struct platform_device *pdev)
{
int ret;
struct uart_port *uport;
struct msm_hs_port *msm_uport;
struct resource *resource;
struct msm_serial_hs_platform_data *pdata = pdev->dev.platform_data;
/* for debug */
printk(KERN_INFO "PRUE G\n");
if (pdev->id < 0 || pdev->id >= UARTDM_NR) {
printk(KERN_ERR "Invalid plaform device ID = %d\n", pdev->id);
return -EINVAL;
}
msm_uport = &q_uart_port[pdev->id];
uport = &msm_uport->uport;
uport->dev = &pdev->dev;
resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (unlikely(!resource))
return -ENXIO;
uport->mapbase = resource->start; /* virtual address */
uport->membase = ioremap(uport->mapbase, PAGE_SIZE);
if (unlikely(!uport->membase))
return -ENOMEM;
uport->irq = platform_get_irq(pdev, 0);
/*
if (unlikely(uport->irq < 0))
return -ENXIO;
*/
if (unlikely(set_irq_wake(uport->irq, 1)))
return -ENXIO;
if (pdata == NULL || pdata->rx_wakeup_irq < 0)
msm_uport->rx_wakeup.irq = -1;
else {
msm_uport->rx_wakeup.irq = pdata->rx_wakeup_irq;
msm_uport->rx_wakeup.ignore = 1;
msm_uport->rx_wakeup.inject_rx = pdata->inject_rx_on_wakeup;
msm_uport->rx_wakeup.rx_to_inject = pdata->rx_to_inject;
if (unlikely(msm_uport->rx_wakeup.irq < 0))
return -ENXIO;
/* move this to startup
* if (unlikely(set_irq_wake(msm_uport->rx_wakeup.irq, 1)))
* return -ENXIO;
*/
}
if (pdata == NULL)
msm_uport->exit_lpm_cb = NULL;
else
msm_uport->exit_lpm_cb = pdata->exit_lpm_cb;
resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
"uartdm_channels");
if (unlikely(!resource))
return -ENXIO;
msm_uport->dma_tx_channel = resource->start;
msm_uport->dma_rx_channel = resource->end;
resource = platform_get_resource_byname(pdev, IORESOURCE_DMA,
"uartdm_crci");
if (unlikely(!resource))
return -ENXIO;
msm_uport->dma_tx_crci = resource->start;
msm_uport->dma_rx_crci = resource->end;
uport->iotype = UPIO_MEM;
uport->fifosize = 64;
uport->ops = &msm_hs_ops;
uport->flags = UPF_BOOT_AUTOCONF;
uport->uartclk = 7372800;
msm_uport->imr_reg = 0x0;
msm_uport->clk = clk_get(&pdev->dev, "uartdm_clk");
if (IS_ERR(msm_uport->clk))
return PTR_ERR(msm_uport->clk);
ret = uartdm_init_port(uport);
if (unlikely(ret))
return ret;
/* configure the CR Protection to Enable */
msm_hs_write(uport, UARTDM_CR_ADDR, CR_PROTECTION_EN);
msm_uport->clk_state = MSM_HS_CLK_PORT_OFF;
hrtimer_init(&msm_uport->clk_off_timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
msm_uport->clk_off_timer.function = msm_hs_clk_off_retry;
msm_uport->clk_off_delay = ktime_set(0, 1000000); /* 1ms */
uport->line = pdev->id;
return uart_add_one_port(&msm_hs_driver, uport);
}
static int __init msm_serial_hs_init(void)
{
int ret;
int i;
/* Init all UARTS as non-configured */
for (i = 0; i < UARTDM_NR; i++)
q_uart_port[i].uport.type = PORT_UNKNOWN;
msm_hs_workqueue = create_singlethread_workqueue("msm_serial_hs");
ret = uart_register_driver(&msm_hs_driver);
if (unlikely(ret)) {
printk(KERN_ERR "%s failed to load\n", __func__);
return ret;
}
ret = platform_driver_register(&msm_serial_hs_platform_driver);
if (ret) {
printk(KERN_ERR "%s failed to load\n", __func__);
uart_unregister_driver(&msm_hs_driver);
return ret;
}
printk(KERN_INFO "msm_serial_hs module loaded\n");
return ret;
}
/*
* Called by the upper layer when port is closed.
* - Disables the port
* - Unhook the ISR
*/
static void msm_hs_shutdown(struct uart_port *uport)
{
unsigned long flags;
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
BUG_ON(msm_uport->rx.flush < FLUSH_STOP);
spin_lock_irqsave(&uport->lock, flags);
clk_enable(msm_uport->clk);
/* Disable the transmitter */
msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_TX_DISABLE_BMSK);
/* Disable the receiver */
msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_RX_DISABLE_BMSK);
/* disable irq wakeup when shutdown **/
if (use_low_power_rx_wakeup(msm_uport))
if (unlikely(set_irq_wake(msm_uport->rx_wakeup.irq, 0)))
return;
/* Free the interrupt */
free_irq(uport->irq, msm_uport);
if (use_low_power_rx_wakeup(msm_uport))
free_irq(msm_uport->rx_wakeup.irq, msm_uport);
msm_uport->imr_reg = 0;
msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
wait_event(msm_uport->rx.wait, msm_uport->rx.flush == FLUSH_SHUTDOWN);
clk_disable(msm_uport->clk); /* to balance local clk_enable() */
if (msm_uport->clk_state != MSM_HS_CLK_OFF) {
wake_unlock(&msm_uport->dma_wake_lock);
clk_disable(msm_uport->clk); /* to balance clk_state */
}
msm_uport->clk_state = MSM_HS_CLK_PORT_OFF;
dma_unmap_single(uport->dev, msm_uport->tx.dma_base,
UART_XMIT_SIZE, DMA_TO_DEVICE);
spin_unlock_irqrestore(&uport->lock, flags);
if (cancel_work_sync(&msm_uport->rx.tty_work))
msm_hs_tty_flip_buffer_work(&msm_uport->rx.tty_work);
/* make sure wake_lock is released */
wake_lock_timeout(&msm_uport->rx.wake_lock, HZ / 10);
}
static void __exit msm_serial_hs_exit(void)
{
printk(KERN_INFO "msm_serial_hs module removed\n");
platform_driver_unregister(&msm_serial_hs_platform_driver);
uart_unregister_driver(&msm_hs_driver);
destroy_workqueue(msm_hs_workqueue);
}
static struct platform_driver msm_serial_hs_platform_driver = {
.probe = msm_hs_probe,
.remove = msm_hs_remove,
.driver = {
.name = "msm_serial_hs",
},
};
static struct uart_driver msm_hs_driver = {
.owner = THIS_MODULE,
.driver_name = "msm_serial_hs",
.dev_name = "ttyHS",
.nr = UARTDM_NR,
.cons = 0,
};
static struct uart_ops msm_hs_ops = {
.tx_empty = msm_hs_tx_empty,
.set_mctrl = msm_hs_set_mctrl_locked,
.get_mctrl = msm_hs_get_mctrl_locked,
.stop_tx = msm_hs_stop_tx_locked,
.start_tx = msm_hs_start_tx_locked,
.stop_rx = msm_hs_stop_rx_locked,
.enable_ms = msm_hs_enable_ms_locked,
.break_ctl = msm_hs_break_ctl,
.startup = msm_hs_startup,
.shutdown = msm_hs_shutdown,
.set_termios = msm_hs_set_termios,
.pm = msm_hs_pm,
.type = msm_hs_type,
.config_port = msm_hs_config_port,
.release_port = msm_hs_release_port,
.request_port = msm_hs_request_port,
};
module_init(msm_serial_hs_init);
module_exit(msm_serial_hs_exit);
MODULE_DESCRIPTION("High Speed UART Driver for the MSM chipset");
MODULE_VERSION("1.2");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
lirokoa/android_kernel_samsung_smdk4412 | arch/arm/mach-exynos/mach-smdk4x12.c | 130 | 108806 | /* linux/arch/arm/mach-exynos/mach-smdk4x12.c
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/platform_device.h>
#include <linux/serial_core.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_gpio.h>
#include <linux/clk.h>
#include <linux/lcd.h>
#include <linux/gpio.h>
#include <linux/gpio_event.h>
#include <linux/i2c.h>
#include <linux/pwm_backlight.h>
#include <linux/input.h>
#include <linux/mmc/host.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/max8649.h>
#include <linux/regulator/fixed.h>
#include <linux/mfd/wm8994/pdata.h>
#include <linux/mfd/max8997.h>
#include <linux/mfd/max77686.h>
#include <linux/v4l2-mediabus.h>
#include <linux/memblock.h>
#include <linux/delay.h>
#include <linux/smsc911x.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <asm/mach/arch.h>
#include <asm/mach-types.h>
#include <plat/regs-serial.h>
#include <plat/exynos4.h>
#include <plat/cpu.h>
#include <plat/clock.h>
#include <plat/keypad.h>
#include <plat/devs.h>
#include <plat/fb.h>
#include <plat/fb-s5p.h>
#include <plat/fb-core.h>
#include <plat/regs-fb-v4.h>
#include <plat/backlight.h>
#include <plat/gpio-cfg.h>
#include <plat/regs-adc.h>
#include <plat/adc.h>
#include <plat/iic.h>
#include <plat/pd.h>
#include <plat/sdhci.h>
#include <plat/mshci.h>
#include <plat/ehci.h>
#include <plat/usbgadget.h>
#include <plat/usb-switch.h>
#include <plat/s3c64xx-spi.h>
#if defined(CONFIG_VIDEO_FIMC)
#include <plat/fimc.h>
#elif defined(CONFIG_VIDEO_SAMSUNG_S5P_FIMC)
#include <plat/fimc-core.h>
#include <media/s5p_fimc.h>
#endif
#if defined(CONFIG_VIDEO_FIMC_MIPI)
#include <plat/csis.h>
#elif defined(CONFIG_VIDEO_S5P_MIPI_CSIS)
#include <plat/mipi_csis.h>
#endif
#include <plat/tvout.h>
#include <plat/media.h>
#include <plat/regs-srom.h>
#include <plat/s5p-sysmmu.h>
#include <plat/tv-core.h>
#if defined(CONFIG_VIDEO_SAMSUNG_S5P_MFC) || defined(CONFIG_VIDEO_MFC5X)
#include <plat/s5p-mfc.h>
#endif
#include <media/s5k4ba_platform.h>
#include <media/s5k4ea_platform.h>
#include <media/exynos_flite.h>
#include <media/exynos_fimc_is.h>
#include <video/platform_lcd.h>
#include <media/m5mo_platform.h>
#include <media/m5mols.h>
#include <mach/board_rev.h>
#include <mach/map.h>
#include <mach/spi-clocks.h>
#include <mach/exynos-ion.h>
#include <mach/regs-pmu.h>
#ifdef CONFIG_EXYNOS4_DEV_DWMCI
#include <mach/dwmci.h>
#endif
#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
#include <mach/secmem.h>
#endif
#include <mach/dev.h>
#include <mach/ppmu.h>
#ifdef CONFIG_EXYNOS_C2C
#include <mach/c2c.h>
#endif
#ifdef CONFIG_FB_S5P_MIPI_DSIM
#include <mach/mipi_ddi.h>
#include <mach/dsim.h>
#endif
#include <plat/fb-s5p.h>
#ifdef CONFIG_FB_S5P_EXTDSP
struct s3cfb_extdsp_lcd {
int width;
int height;
int bpp;
};
#endif
#include <plat/fimg2d.h>
#include <mach/dev-sysmmu.h>
#ifdef CONFIG_VIDEO_SAMSUNG_S5P_FIMC
#include <plat/fimc-core.h>
#include <media/s5p_fimc.h>
#endif
#ifdef CONFIG_VIDEO_JPEG_V2X
#include <plat/jpeg.h>
#endif
#ifdef CONFIG_REGULATOR_S5M8767
#include <linux/mfd/s5m87xx/s5m-core.h>
#include <linux/mfd/s5m87xx/s5m-pmic.h>
#endif
#if defined(CONFIG_EXYNOS_SETUP_THERMAL)
#include <plat/s5p-tmu.h>
#endif
#define REG_INFORM4 (S5P_INFORM4)
/* Following are default values for UCON, ULCON and UFCON UART registers */
#define SMDK4X12_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
S3C2410_UCON_RXILEVEL | \
S3C2410_UCON_TXIRQMODE | \
S3C2410_UCON_RXIRQMODE | \
S3C2410_UCON_RXFIFO_TOI | \
S3C2443_UCON_RXERR_IRQEN)
#define SMDK4X12_ULCON_DEFAULT S3C2410_LCON_CS8
#define SMDK4X12_UFCON_DEFAULT (S3C2410_UFCON_FIFOMODE | \
S5PV210_UFCON_TXTRIG4 | \
S5PV210_UFCON_RXTRIG4)
static struct s3c2410_uartcfg smdk4x12_uartcfgs[] __initdata = {
[0] = {
.hwport = 0,
.flags = 0,
.ucon = SMDK4X12_UCON_DEFAULT,
.ulcon = SMDK4X12_ULCON_DEFAULT,
.ufcon = SMDK4X12_UFCON_DEFAULT,
},
[1] = {
.hwport = 1,
.flags = 0,
.ucon = SMDK4X12_UCON_DEFAULT,
.ulcon = SMDK4X12_ULCON_DEFAULT,
.ufcon = SMDK4X12_UFCON_DEFAULT,
},
[2] = {
.hwport = 2,
.flags = 0,
.ucon = SMDK4X12_UCON_DEFAULT,
.ulcon = SMDK4X12_ULCON_DEFAULT,
.ufcon = SMDK4X12_UFCON_DEFAULT,
},
[3] = {
.hwport = 3,
.flags = 0,
.ucon = SMDK4X12_UCON_DEFAULT,
.ulcon = SMDK4X12_ULCON_DEFAULT,
.ufcon = SMDK4X12_UFCON_DEFAULT,
},
};
static struct resource smdk4x12_smsc911x_resources[] = {
[0] = {
.start = EXYNOS4_PA_SROM_BANK(1),
.end = EXYNOS4_PA_SROM_BANK(1) + SZ_64K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_EINT(5),
.end = IRQ_EINT(5),
.flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW,
},
};
static struct smsc911x_platform_config smsc9215_config = {
.irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
.irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL,
.flags = SMSC911X_USE_16BIT | SMSC911X_FORCE_INTERNAL_PHY,
.phy_interface = PHY_INTERFACE_MODE_MII,
.mac = {0x00, 0x80, 0x00, 0x23, 0x45, 0x67},
};
static struct platform_device smdk4x12_smsc911x = {
.name = "smsc911x",
.id = -1,
.num_resources = ARRAY_SIZE(smdk4x12_smsc911x_resources),
.resource = smdk4x12_smsc911x_resources,
.dev = {
.platform_data = &smsc9215_config,
},
};
#ifdef CONFIG_EXYNOS_MEDIA_DEVICE
struct platform_device exynos_device_md0 = {
.name = "exynos-mdev",
.id = -1,
};
#endif
#define WRITEBACK_ENABLED
#if defined(CONFIG_VIDEO_FIMC) || defined(CONFIG_VIDEO_SAMSUNG_S5P_FIMC)
/*
* External camera reset
* Because the most of cameras take i2c bus signal, so that
* you have to reset at the boot time for other i2c slave devices.
* This function also called at fimc_init_camera()
* Do optimization for cameras on your platform.
*/
#if defined(CONFIG_ITU_A) || defined(CONFIG_CSI_C) \
|| defined(CONFIG_S5K3H2_CSI_C) || defined(CONFIG_S5K3H7_CSI_C) \
|| defined(CONFIG_S5K4E5_CSI_C) || defined(CONFIG_S5K6A3_CSI_C)
static int smdk4x12_cam0_reset(int dummy)
{
int err;
/* Camera A */
err = gpio_request(EXYNOS4_GPX1(2), "GPX1");
if (err)
printk(KERN_ERR "#### failed to request GPX1_2 ####\n");
s3c_gpio_setpull(EXYNOS4_GPX1(2), S3C_GPIO_PULL_NONE);
gpio_direction_output(EXYNOS4_GPX1(2), 0);
gpio_direction_output(EXYNOS4_GPX1(2), 1);
gpio_free(EXYNOS4_GPX1(2));
return 0;
}
#endif
#if defined(CONFIG_ITU_B) || defined(CONFIG_CSI_D) \
|| defined(CONFIG_S5K3H2_CSI_D) || defined(CONFIG_S5K3H7_CSI_D) \
|| defined(CONFIG_S5K4E5_CSI_D) || defined(CONFIG_S5K6A3_CSI_D)
static int smdk4x12_cam1_reset(int dummy)
{
int err;
/* Camera B */
err = gpio_request(EXYNOS4_GPX1(0), "GPX1");
if (err)
printk(KERN_ERR "#### failed to request GPX1_0 ####\n");
s3c_gpio_setpull(EXYNOS4_GPX1(0), S3C_GPIO_PULL_NONE);
gpio_direction_output(EXYNOS4_GPX1(0), 0);
gpio_direction_output(EXYNOS4_GPX1(0), 1);
gpio_free(EXYNOS4_GPX1(0));
return 0;
}
#endif
#endif
#ifdef CONFIG_VIDEO_FIMC
#ifdef CONFIG_VIDEO_S5K4BA
static struct s5k4ba_platform_data s5k4ba_plat = {
.default_width = 800,
.default_height = 600,
.pixelformat = V4L2_PIX_FMT_YUYV,
.freq = 24000000,
.is_mipi = 0,
};
static struct i2c_board_info s5k4ba_i2c_info = {
I2C_BOARD_INFO("S5K4BA", 0x2d),
.platform_data = &s5k4ba_plat,
};
static struct s3c_platform_camera s5k4ba = {
#ifdef CONFIG_ITU_A
.id = CAMERA_PAR_A,
.clk_name = "sclk_cam0",
.i2c_busnum = 4,
.cam_power = smdk4x12_cam0_reset,
#endif
#ifdef CONFIG_ITU_B
.id = CAMERA_PAR_B,
.clk_name = "sclk_cam1",
.i2c_busnum = 5,
.cam_power = smdk4x12_cam1_reset,
#endif
.type = CAM_TYPE_ITU,
.fmt = ITU_601_YCBCR422_8BIT,
.order422 = CAM_ORDER422_8BIT_CBYCRY,
.info = &s5k4ba_i2c_info,
.pixelformat = V4L2_PIX_FMT_YUYV,
.srclk_name = "xusbxti",
.clk_rate = 24000000,
.line_length = 1920,
.width = 1600,
.height = 1200,
.window = {
.left = 0,
.top = 0,
.width = 1600,
.height = 1200,
},
/* Polarity */
.inv_pclk = 0,
.inv_vsync = 1,
.inv_href = 0,
.inv_hsync = 0,
.reset_camera = 1,
.initialized = 0,
};
#endif
/* 2 MIPI Cameras */
#ifdef CONFIG_VIDEO_S5K4EA
static struct s5k4ea_platform_data s5k4ea_plat = {
.default_width = 1920,
.default_height = 1080,
.pixelformat = V4L2_PIX_FMT_UYVY,
.freq = 24000000,
.is_mipi = 1,
};
static struct i2c_board_info s5k4ea_i2c_info = {
I2C_BOARD_INFO("S5K4EA", 0x2d),
.platform_data = &s5k4ea_plat,
};
static struct s3c_platform_camera s5k4ea = {
#ifdef CONFIG_CSI_C
.id = CAMERA_CSI_C,
.clk_name = "sclk_cam0",
.i2c_busnum = 4,
.cam_power = smdk4x12_cam0_reset,
#endif
#ifdef CONFIG_CSI_D
.id = CAMERA_CSI_D,
.clk_name = "sclk_cam1",
.i2c_busnum = 5,
.cam_power = smdk4x12_cam1_reset,
#endif
.type = CAM_TYPE_MIPI,
.fmt = MIPI_CSI_YCBCR422_8BIT,
.order422 = CAM_ORDER422_8BIT_YCBYCR,
.info = &s5k4ea_i2c_info,
.pixelformat = V4L2_PIX_FMT_UYVY,
.srclk_name = "xusbxti",
.clk_rate = 24000000,
.line_length = 1920,
.width = 1920,
.height = 1080,
.window = {
.left = 0,
.top = 0,
.width = 1920,
.height = 1080,
},
.mipi_lanes = 2,
.mipi_settle = 12,
.mipi_align = 32,
/* Polarity */
.inv_pclk = 0,
.inv_vsync = 1,
.inv_href = 0,
.inv_hsync = 0,
.initialized = 0,
};
#endif
#ifdef WRITEBACK_ENABLED
static struct i2c_board_info writeback_i2c_info = {
I2C_BOARD_INFO("WriteBack", 0x0),
};
static struct s3c_platform_camera writeback = {
.id = CAMERA_WB,
.fmt = ITU_601_YCBCR422_8BIT,
.order422 = CAM_ORDER422_8BIT_CBYCRY,
.i2c_busnum = 0,
.info = &writeback_i2c_info,
.pixelformat = V4L2_PIX_FMT_YUV444,
.line_length = 800,
.width = 480,
.height = 800,
.window = {
.left = 0,
.top = 0,
.width = 480,
.height = 800,
},
.initialized = 0,
};
#endif
#ifdef CONFIG_VIDEO_EXYNOS_FIMC_IS
#ifdef CONFIG_VIDEO_S5K3H2
static struct i2c_board_info s5k3h2_sensor_info = {
.type = "S5K3H2",
};
static struct s3c_platform_camera s5k3h2 = {
#ifdef CONFIG_S5K3H2_CSI_C
.id = CAMERA_CSI_C,
.clk_name = "sclk_cam0",
.cam_power = smdk4x12_cam0_reset,
#endif
#ifdef CONFIG_S5K3H2_CSI_D
.id = CAMERA_CSI_D,
.clk_name = "sclk_cam1",
.cam_power = smdk4x12_cam1_reset,
#endif
.type = CAM_TYPE_MIPI,
.fmt = MIPI_CSI_RAW10,
.info = &s5k3h2_sensor_info,
.order422 = CAM_ORDER422_8BIT_YCBYCR,
.pixelformat = V4L2_PIX_FMT_UYVY,
.line_length = 1920,
.width = 1920,
.height = 1080,
.window = {
.left = 0,
.top = 0,
.width = 1920,
.height = 1080,
},
.srclk_name = "xusbxti",
.clk_rate = 24000000,
.mipi_lanes = 2,
.mipi_settle = 12,
.mipi_align = 24,
.initialized = 0,
#ifdef CONFIG_S5K3H2_CSI_C
.flite_id = FLITE_IDX_A,
#endif
#ifdef CONFIG_S5K3H2_CSI_D
.flite_id = FLITE_IDX_B,
#endif
.use_isp = true,
#ifdef CONFIG_S5K3H2_CSI_C
.sensor_index = 1,
#endif
#ifdef CONFIG_S5K3H2_CSI_D
.sensor_index = 101,
#endif
};
#endif
#ifdef CONFIG_VIDEO_S5K3H7
static struct i2c_board_info s5k3h7_sensor_info = {
.type = "S5K3H7",
};
static struct s3c_platform_camera s5k3h7 = {
#ifdef CONFIG_S5K3H7_CSI_C
.id = CAMERA_CSI_C,
.clk_name = "sclk_cam0",
.cam_power = smdk4x12_cam0_reset,
#endif
#ifdef CONFIG_S5K3H7_CSI_D
.id = CAMERA_CSI_D,
.clk_name = "sclk_cam1",
.cam_power = smdk4x12_cam1_reset,
#endif
.type = CAM_TYPE_MIPI,
.fmt = MIPI_CSI_RAW10,
.info = &s5k3h7_sensor_info,
.order422 = CAM_ORDER422_8BIT_YCBYCR,
.pixelformat = V4L2_PIX_FMT_UYVY,
.line_length = 1920,
.width = 1920,
.height = 1080,
.window = {
.left = 0,
.top = 0,
.width = 1920,
.height = 1080,
},
.srclk_name = "xusbxti",
.clk_rate = 24000000,
.mipi_lanes = 2,
.mipi_settle = 12,
.mipi_align = 24,
.initialized = 0,
#ifdef CONFIG_S5K3H7_CSI_C
.flite_id = FLITE_IDX_A,
#endif
#ifdef CONFIG_S5K3H7_CSI_D
.flite_id = FLITE_IDX_B,
#endif
.use_isp = true,
#ifdef CONFIG_S5K3H7_CSI_C
.sensor_index = 4,
#endif
#ifdef CONFIG_S5K3H7_CSI_D
.sensor_index = 104,
#endif
};
#endif
#ifdef CONFIG_VIDEO_S5K4E5
static struct i2c_board_info s5k4e5_sensor_info = {
.type = "S5K4E5",
};
static struct s3c_platform_camera s5k4e5 = {
#ifdef CONFIG_S5K4E5_CSI_C
.id = CAMERA_CSI_C,
.clk_name = "sclk_cam0",
.cam_power = smdk4x12_cam0_reset,
#endif
#ifdef CONFIG_S5K4E5_CSI_D
.id = CAMERA_CSI_D,
.clk_name = "sclk_cam1",
.cam_power = smdk4x12_cam1_reset,
#endif
.type = CAM_TYPE_MIPI,
.fmt = MIPI_CSI_RAW10,
.info = &s5k4e5_sensor_info,
.order422 = CAM_ORDER422_8BIT_YCBYCR,
.pixelformat = V4L2_PIX_FMT_UYVY,
.line_length = 1920,
.width = 1920,
.height = 1080,
.window = {
.left = 0,
.top = 0,
.width = 1920,
.height = 1080,
},
.srclk_name = "xusbxti",
.clk_rate = 24000000,
.mipi_lanes = 2,
.mipi_settle = 12,
.mipi_align = 24,
.initialized = 0,
#ifdef CONFIG_S5K4E5_CSI_C
.flite_id = FLITE_IDX_A,
#endif
#ifdef CONFIG_S5K4E5_CSI_D
.flite_id = FLITE_IDX_B,
#endif
.use_isp = true,
#ifdef CONFIG_S5K4E5_CSI_C
.sensor_index = 3,
#endif
#ifdef CONFIG_S5K4E5_CSI_D
.sensor_index = 103,
#endif
};
#endif
#ifdef CONFIG_VIDEO_S5K6A3
static struct i2c_board_info s5k6a3_sensor_info = {
.type = "S5K6A3",
};
static struct s3c_platform_camera s5k6a3 = {
#ifdef CONFIG_S5K6A3_CSI_C
.id = CAMERA_CSI_C,
.clk_name = "sclk_cam0",
.cam_power = smdk4x12_cam0_reset,
#endif
#ifdef CONFIG_S5K6A3_CSI_D
.id = CAMERA_CSI_D,
.clk_name = "sclk_cam1",
.cam_power = smdk4x12_cam1_reset,
#endif
.type = CAM_TYPE_MIPI,
.fmt = MIPI_CSI_RAW10,
.info = &s5k6a3_sensor_info,
.order422 = CAM_ORDER422_8BIT_YCBYCR,
.pixelformat = V4L2_PIX_FMT_UYVY,
.line_length = 1920,
.width = 1920,
.height = 1080,
.window = {
.left = 0,
.top = 0,
.width = 1920,
.height = 1080,
},
.srclk_name = "xusbxti",
.clk_rate = 24000000,
.mipi_lanes = 1,
.mipi_settle = 18,
.mipi_align = 24,
.initialized = 0,
#ifdef CONFIG_S5K6A3_CSI_C
.flite_id = FLITE_IDX_A,
#endif
#ifdef CONFIG_S5K6A3_CSI_D
.flite_id = FLITE_IDX_B,
#endif
.use_isp = true,
#ifdef CONFIG_S5K6A3_CSI_C
.sensor_index = 2,
#endif
#ifdef CONFIG_S5K6A3_CSI_D
.sensor_index = 102,
#endif
};
#endif
#if defined(CONFIG_VIDEO_S5K6A3) && defined(CONFIG_S5K6A3_CSI_D)
static struct i2c_board_info s5k6a3_fd_sensor_info = {
.type = "S5K6A3_FD",
};
static struct s3c_platform_camera s5k6a3_fd = {
.id = CAMERA_CSI_D,
.clk_name = "sclk_cam1",
.cam_power = smdk4x12_cam1_reset,
.type = CAM_TYPE_MIPI,
.fmt = MIPI_CSI_RAW10,
.info = &s5k6a3_fd_sensor_info,
.order422 = CAM_ORDER422_8BIT_YCBYCR,
.pixelformat = V4L2_PIX_FMT_UYVY,
.line_length = 1920,
.width = 1920,
.height = 1080,
.window = {
.left = 0,
.top = 0,
.width = 1920,
.height = 1080,
},
.srclk_name = "xusbxti",
.clk_rate = 24000000,
.mipi_lanes = 1,
.mipi_settle = 18,
.mipi_align = 24,
.initialized = 0,
.flite_id = FLITE_IDX_B,
.use_isp = true,
.sensor_index = 200
};
#endif
#endif
/* legacy M5MOLS Camera driver configuration */
#ifdef CONFIG_VIDEO_M5MO
#define CAM_CHECK_ERR_RET(x, msg) \
if (unlikely((x) < 0)) { \
printk(KERN_ERR "\nfail to %s: err = %d\n", msg, x); \
return x; \
}
#define CAM_CHECK_ERR(x, msg) \
if (unlikely((x) < 0)) { \
printk(KERN_ERR "\nfail to %s: err = %d\n", msg, x); \
}
static int m5mo_config_isp_irq(void)
{
s3c_gpio_cfgpin(EXYNOS4_GPX3(3), S3C_GPIO_SFN(0xF));
s3c_gpio_setpull(EXYNOS4_GPX3(3), S3C_GPIO_PULL_NONE);
return 0;
}
static struct m5mo_platform_data m5mo_plat = {
.default_width = 640, /* 1920 */
.default_height = 480, /* 1080 */
.pixelformat = V4L2_PIX_FMT_UYVY,
.freq = 24000000,
.is_mipi = 1,
.config_isp_irq = m5mo_config_isp_irq,
.irq = IRQ_EINT(27),
};
static struct i2c_board_info m5mo_i2c_info = {
I2C_BOARD_INFO("M5MO", 0x1F),
.platform_data = &m5mo_plat,
.irq = IRQ_EINT(27),
};
static struct s3c_platform_camera m5mo = {
#ifdef CONFIG_CSI_C
.id = CAMERA_CSI_C,
.clk_name = "sclk_cam0",
.i2c_busnum = 4,
.cam_power = smdk4x12_cam0_reset,
#endif
#ifdef CONFIG_CSI_D
.id = CAMERA_CSI_D,
.clk_name = "sclk_cam1",
.i2c_busnum = 5,
.cam_power = smdk4x12_cam1_reset,
#endif
.type = CAM_TYPE_MIPI,
.fmt = MIPI_CSI_YCBCR422_8BIT,
.order422 = CAM_ORDER422_8BIT_YCBYCR,
.info = &m5mo_i2c_info,
.pixelformat = V4L2_PIX_FMT_UYVY,
.srclk_name = "xusbxti", /* "mout_mpll" */
.clk_rate = 24000000, /* 48000000 */
.line_length = 1920,
.width = 640,
.height = 480,
.window = {
.left = 0,
.top = 0,
.width = 640,
.height = 480,
},
.mipi_lanes = 2,
.mipi_settle = 12,
.mipi_align = 32,
/* Polarity */
.inv_pclk = 1,
.inv_vsync = 1,
.inv_href = 0,
.inv_hsync = 0,
.reset_camera = 0,
.initialized = 0,
};
#endif
/* Interface setting */
static struct s3c_platform_fimc fimc_plat = {
#ifdef CONFIG_ITU_A
.default_cam = CAMERA_PAR_A,
#endif
#ifdef CONFIG_ITU_B
.default_cam = CAMERA_PAR_B,
#endif
#ifdef CONFIG_CSI_C
.default_cam = CAMERA_CSI_C,
#endif
#ifdef CONFIG_CSI_D
.default_cam = CAMERA_CSI_D,
#endif
#ifdef WRITEBACK_ENABLED
.default_cam = CAMERA_WB,
#endif
.camera = {
#ifdef CONFIG_VIDEO_S5K4BA
&s5k4ba,
#endif
#ifdef CONFIG_VIDEO_S5K4EA
&s5k4ea,
#endif
#ifdef CONFIG_VIDEO_M5MO
&m5mo,
#endif
#ifdef CONFIG_VIDEO_S5K3H2
&s5k3h2,
#endif
#ifdef CONFIG_VIDEO_S5K3H7
&s5k3h7,
#endif
#ifdef CONFIG_VIDEO_S5K4E5
&s5k4e5,
#endif
#ifdef CONFIG_VIDEO_S5K6A3
&s5k6a3,
#endif
#ifdef WRITEBACK_ENABLED
&writeback,
#endif
#if defined(CONFIG_VIDEO_S5K6A3) && defined(CONFIG_S5K6A3_CSI_D)
&s5k6a3_fd,
#endif
},
.hw_ver = 0x51,
};
#endif /* CONFIG_VIDEO_FIMC */
/* for mainline fimc interface */
#ifdef CONFIG_VIDEO_SAMSUNG_S5P_FIMC
#ifdef WRITEBACK_ENABLED
struct writeback_mbus_platform_data {
int id;
struct v4l2_mbus_framefmt fmt;
};
static struct i2c_board_info __initdata writeback_info = {
I2C_BOARD_INFO("writeback", 0x0),
};
#endif
#ifdef CONFIG_VIDEO_S5K4BA
static struct s5k4ba_mbus_platform_data s5k4ba_mbus_plat = {
.id = 0,
.fmt = {
.width = 1600,
.height = 1200,
/*.code = V4L2_MBUS_FMT_UYVY8_2X8, */
.code = V4L2_MBUS_FMT_VYUY8_2X8,
},
.clk_rate = 24000000UL,
#ifdef CONFIG_ITU_A
.set_power = smdk4x12_cam0_reset,
#endif
#ifdef CONFIG_ITU_B
.set_power = smdk4x12_cam1_reset,
#endif
};
static struct i2c_board_info s5k4ba_info = {
I2C_BOARD_INFO("S5K4BA", 0x2d),
.platform_data = &s5k4ba_mbus_plat,
};
#endif
/* 2 MIPI Cameras */
#ifdef CONFIG_VIDEO_S5K4EA
static struct s5k4ea_mbus_platform_data s5k4ea_mbus_plat = {
#ifdef CONFIG_CSI_C
.id = 0,
.set_power = smdk4x12_cam0_reset,
#endif
#ifdef CONFIG_CSI_D
.id = 1,
.set_power = smdk4x12_cam1_reset,
#endif
.fmt = {
.width = 1920,
.height = 1080,
.code = V4L2_MBUS_FMT_VYUY8_2X8,
},
.clk_rate = 24000000UL,
};
static struct i2c_board_info s5k4ea_info = {
I2C_BOARD_INFO("S5K4EA", 0x2d),
.platform_data = &s5k4ea_mbus_plat,
};
#endif
#ifdef CONFIG_VIDEO_M5MOLS
static struct m5mols_platform_data m5mols_platdata = {
#ifdef CONFIG_CSI_C
.gpio_rst = EXYNOS4_GPX1(2), /* ISP_RESET */
#endif
#ifdef CONFIG_CSI_D
.gpio_rst = EXYNOS4_GPX1(0), /* ISP_RESET */
#endif
.enable_rst = true, /* positive reset */
.irq = IRQ_EINT(27),
};
static struct i2c_board_info m5mols_board_info = {
I2C_BOARD_INFO("M5MOLS", 0x1F),
.platform_data = &m5mols_platdata,
};
#endif
#ifdef CONFIG_VIDEO_EXYNOS_FIMC_IS
#ifdef CONFIG_VIDEO_S5K3H2
static struct i2c_board_info s5k3h2_sensor_info = {
.type = "S5K3H2",
};
#endif
#ifdef CONFIG_VIDEO_S5K3H7
static struct i2c_board_info s5k3h7_sensor_info = {
.type = "S5K3H7",
};
#endif
#ifdef CONFIG_VIDEO_S5K4E5
static struct i2c_board_info s5k4e5_sensor_info = {
.type = "S5K4E5",
};
#endif
#ifdef CONFIG_VIDEO_S5K6A3
static struct i2c_board_info s5k6a3_sensor_info = {
.type = "S5K6A3",
};
#endif
#endif
#ifdef CONFIG_VIDEO_EXYNOS_FIMC_LITE
/* This is for platdata of fimc-lite */
#ifdef CONFIG_VIDEO_S5K3H2
static struct s3c_platform_camera s5k3h2 = {
.type = CAM_TYPE_MIPI,
.use_isp = true,
.inv_pclk = 0,
.inv_vsync = 0,
.inv_href = 0,
.inv_hsync = 0,
};
#endif
#ifdef CONFIG_VIDEO_S5K3H7
static struct s3c_platform_camera s5k3h7 = {
.type = CAM_TYPE_MIPI,
.use_isp = true,
.inv_pclk = 0,
.inv_vsync = 0,
.inv_href = 0,
.inv_hsync = 0,
};
#endif
#ifdef CONFIG_VIDEO_S5K4E5
static struct s3c_platform_camera s5k4e5 = {
.type = CAM_TYPE_MIPI,
.use_isp = true,
.inv_pclk = 0,
.inv_vsync = 0,
.inv_href = 0,
.inv_hsync = 0,
};
#endif
#ifdef CONFIG_VIDEO_S5K6A3
static struct s3c_platform_camera s5k6a3 = {
.type = CAM_TYPE_MIPI,
.use_isp = true,
.inv_pclk = 0,
.inv_vsync = 0,
.inv_href = 0,
.inv_hsync = 0,
};
#endif
#endif
#endif /* CONFIG_VIDEO_SAMSUNG_S5P_FIMC */
#ifdef CONFIG_S3C64XX_DEV_SPI
static struct s3c64xx_spi_csinfo spi0_csi[] = {
[0] = {
.line = EXYNOS4_GPB(1),
.set_level = gpio_set_value,
.fb_delay = 0x2,
},
};
static struct spi_board_info spi0_board_info[] __initdata = {
{
.modalias = "spidev",
.platform_data = NULL,
.max_speed_hz = 10*1000*1000,
.bus_num = 0,
.chip_select = 0,
.mode = SPI_MODE_0,
.controller_data = &spi0_csi[0],
}
};
#ifndef CONFIG_FB_S5P_LMS501KF03
static struct s3c64xx_spi_csinfo spi1_csi[] = {
[0] = {
.line = EXYNOS4_GPB(5),
.set_level = gpio_set_value,
.fb_delay = 0x2,
},
};
static struct spi_board_info spi1_board_info[] __initdata = {
{
.modalias = "spidev",
.platform_data = NULL,
.max_speed_hz = 10*1000*1000,
.bus_num = 1,
.chip_select = 0,
.mode = SPI_MODE_3,
.controller_data = &spi1_csi[0],
}
};
#endif
static struct s3c64xx_spi_csinfo spi2_csi[] = {
[0] = {
.line = EXYNOS4_GPC1(2),
.set_level = gpio_set_value,
.fb_delay = 0x2,
},
};
static struct spi_board_info spi2_board_info[] __initdata = {
{
.modalias = "spidev",
.platform_data = NULL,
.max_speed_hz = 10*1000*1000,
.bus_num = 2,
.chip_select = 0,
.mode = SPI_MODE_0,
.controller_data = &spi2_csi[0],
}
};
#endif
#ifdef CONFIG_FB_S3C
#if defined(CONFIG_LCD_AMS369FG06)
static int lcd_power_on(struct lcd_device *ld, int enable)
{
return 1;
}
static int reset_lcd(struct lcd_device *ld)
{
int err = 0;
err = gpio_request_one(EXYNOS4_GPX0(6), GPIOF_OUT_INIT_HIGH, "GPX0");
if (err) {
printk(KERN_ERR "failed to request GPX0 for "
"lcd reset control\n");
return err;
}
gpio_set_value(EXYNOS4_GPX0(6), 0);
mdelay(1);
gpio_set_value(EXYNOS4_GPX0(6), 1);
gpio_free(EXYNOS4_GPX0(6));
return 1;
}
static struct lcd_platform_data ams369fg06_platform_data = {
.reset = reset_lcd,
.power_on = lcd_power_on,
.lcd_enabled = 0,
.reset_delay = 100, /* 100ms */
};
#define LCD_BUS_NUM 3
#define DISPLAY_CS EXYNOS4_GPB(5)
#define DISPLAY_CLK EXYNOS4_GPB(4)
#define DISPLAY_SI EXYNOS4_GPB(7)
static struct spi_board_info spi_board_info[] __initdata = {
{
.modalias = "ams369fg06",
.platform_data = (void *)&ams369fg06_platform_data,
.max_speed_hz = 1200000,
.bus_num = LCD_BUS_NUM,
.chip_select = 0,
.mode = SPI_MODE_3,
.controller_data = (void *)DISPLAY_CS,
}
};
static struct spi_gpio_platform_data ams369fg06_spi_gpio_data = {
.sck = DISPLAY_CLK,
.mosi = DISPLAY_SI,
.miso = -1,
.num_chipselect = 1,
};
static struct platform_device s3c_device_spi_gpio = {
.name = "spi_gpio",
.id = LCD_BUS_NUM,
.dev = {
.parent = &s5p_device_fimd0.dev,
.platform_data = &ams369fg06_spi_gpio_data,
},
};
static struct s3c_fb_pd_win smdk4x12_fb_win0 = {
.win_mode = {
.left_margin = 9,
.right_margin = 9,
.upper_margin = 5,
.lower_margin = 5,
.hsync_len = 2,
.vsync_len = 2,
.xres = 480,
.yres = 800,
},
.virtual_x = 480,
.virtual_y = 1600,
.width = 48,
.height = 80,
.max_bpp = 32,
.default_bpp = 24,
};
static struct s3c_fb_pd_win smdk4x12_fb_win1 = {
.win_mode = {
.left_margin = 9,
.right_margin = 9,
.upper_margin = 5,
.lower_margin = 5,
.hsync_len = 2,
.vsync_len = 2,
.xres = 480,
.yres = 800,
},
.virtual_x = 480,
.virtual_y = 1600,
.width = 48,
.height = 80,
.max_bpp = 32,
.default_bpp = 24,
};
static struct s3c_fb_pd_win smdk4x12_fb_win2 = {
.win_mode = {
.left_margin = 9,
.right_margin = 9,
.upper_margin = 5,
.lower_margin = 5,
.hsync_len = 2,
.vsync_len = 2,
.xres = 480,
.yres = 800,
},
.virtual_x = 480,
.virtual_y = 1600,
.width = 48,
.height = 80,
.max_bpp = 32,
.default_bpp = 24,
};
#elif defined(CONFIG_LCD_LMS501KF03)
static int lcd_power_on(struct lcd_device *ld, int enable)
{
return 1;
}
static int reset_lcd(struct lcd_device *ld)
{
int err = 0;
if (samsung_board_rev_is_0_1()) {
err = gpio_request_one(EXYNOS4212_GPM3(6),
GPIOF_OUT_INIT_HIGH, "GPM3");
if (err) {
printk(KERN_ERR "failed to request GPM3 for "
"lcd reset control\n");
return err;
}
gpio_set_value(EXYNOS4212_GPM3(6), 0);
mdelay(1);
gpio_set_value(EXYNOS4212_GPM3(6), 1);
gpio_free(EXYNOS4212_GPM3(6));
} else {
err = gpio_request_one(EXYNOS4_GPX1(5),
GPIOF_OUT_INIT_HIGH, "GPX1");
if (err) {
printk(KERN_ERR "failed to request GPX1 for "
"lcd reset control\n");
return err;
}
gpio_set_value(EXYNOS4_GPX1(5), 0);
mdelay(1);
gpio_set_value(EXYNOS4_GPX1(5), 1);
gpio_free(EXYNOS4_GPX1(5));
}
return 1;
}
static struct lcd_platform_data lms501kf03_platform_data = {
.reset = reset_lcd,
.power_on = lcd_power_on,
.lcd_enabled = 0,
.reset_delay = 100, /* 100ms */
};
#define LCD_BUS_NUM 3
#define DISPLAY_CS EXYNOS4_GPB(5)
#define DISPLAY_CLK EXYNOS4_GPB(4)
#define DISPLAY_SI EXYNOS4_GPB(7)
static struct spi_board_info spi_board_info[] __initdata = {
{
.modalias = "lms501kf03",
.platform_data = (void *)&lms501kf03_platform_data,
.max_speed_hz = 1200000,
.bus_num = LCD_BUS_NUM,
.chip_select = 0,
.mode = SPI_MODE_3,
.controller_data = (void *)DISPLAY_CS,
}
};
static struct spi_gpio_platform_data lms501kf03_spi_gpio_data = {
.sck = DISPLAY_CLK,
.mosi = DISPLAY_SI,
.miso = -1,
.num_chipselect = 1,
};
static struct platform_device s3c_device_spi_gpio = {
.name = "spi_gpio",
.id = LCD_BUS_NUM,
.dev = {
.parent = &s5p_device_fimd0.dev,
.platform_data = &lms501kf03_spi_gpio_data,
},
};
static struct s3c_fb_pd_win smdk4x12_fb_win0 = {
.win_mode = {
.left_margin = 8, /* HBPD */
.right_margin = 8, /* HFPD */
.upper_margin = 6, /* VBPD */
.lower_margin = 6, /* VFPD */
.hsync_len = 6, /* HSPW */
.vsync_len = 4, /* VSPW */
.xres = 480,
.yres = 800,
},
.virtual_x = 480,
.virtual_y = 1600,
.width = 48,
.height = 80,
.max_bpp = 32,
.default_bpp = 24,
};
static struct s3c_fb_pd_win smdk4x12_fb_win1 = {
.win_mode = {
.left_margin = 8, /* HBPD */
.right_margin = 8, /* HFPD */
.upper_margin = 6, /* VBPD */
.lower_margin = 6, /* VFPD */
.hsync_len = 6, /* HSPW */
.vsync_len = 4, /* VSPW */
.xres = 480,
.yres = 800,
},
.virtual_x = 480,
.virtual_y = 1600,
.width = 48,
.height = 80,
.max_bpp = 32,
.default_bpp = 24,
};
static struct s3c_fb_pd_win smdk4x12_fb_win2 = {
.win_mode = {
.left_margin = 8, /* HBPD */
.right_margin = 8, /* HFPD */
.upper_margin = 6, /* VBPD */
.lower_margin = 6, /* VFPD */
.hsync_len = 6, /* HSPW */
.vsync_len = 4, /* VSPW */
.xres = 480,
.yres = 800,
},
.virtual_x = 480,
.virtual_y = 1600,
.width = 48,
.height = 80,
.max_bpp = 32,
.default_bpp = 24,
};
#elif defined(CONFIG_LCD_WA101S)
static void lcd_wa101s_set_power(struct plat_lcd_data *pd,
unsigned int power)
{
if (power) {
#if !defined(CONFIG_BACKLIGHT_PWM)
gpio_request_one(EXYNOS4_GPD0(1), GPIOF_OUT_INIT_HIGH, "GPD0");
gpio_free(EXYNOS4_GPD0(1));
#endif
} else {
#if !defined(CONFIG_BACKLIGHT_PWM)
gpio_request_one(EXYNOS4_GPD0(1), GPIOF_OUT_INIT_LOW, "GPD0");
gpio_free(EXYNOS4_GPD0(1));
#endif
}
}
static struct plat_lcd_data smdk4x12_lcd_wa101s_data = {
.set_power = lcd_wa101s_set_power,
};
static struct platform_device smdk4x12_lcd_wa101s = {
.name = "platform-lcd",
.dev.parent = &s5p_device_fimd0.dev,
.dev.platform_data = &smdk4x12_lcd_wa101s_data,
};
static struct s3c_fb_pd_win smdk4x12_fb_win0 = {
.win_mode = {
.left_margin = 80,
.right_margin = 48,
.upper_margin = 14,
.lower_margin = 3,
.hsync_len = 32,
.vsync_len = 5,
.xres = 1360, /* real size : 1366 */
.yres = 768,
},
.virtual_x = 1360, /* real size : 1366 */
.virtual_y = 768 * 2,
.width = 223,
.height = 125,
.max_bpp = 32,
.default_bpp = 24,
};
static struct s3c_fb_pd_win smdk4x12_fb_win1 = {
.win_mode = {
.left_margin = 80,
.right_margin = 48,
.upper_margin = 14,
.lower_margin = 3,
.hsync_len = 32,
.vsync_len = 5,
.xres = 1360, /* real size : 1366 */
.yres = 768,
},
.virtual_x = 1360, /* real size : 1366 */
.virtual_y = 768 * 2,
.width = 223,
.height = 125,
.max_bpp = 32,
.default_bpp = 24,
};
static struct s3c_fb_pd_win smdk4x12_fb_win2 = {
.win_mode = {
.left_margin = 80,
.right_margin = 48,
.upper_margin = 14,
.lower_margin = 3,
.hsync_len = 32,
.vsync_len = 5,
.xres = 1360, /* real size : 1366 */
.yres = 768,
},
.virtual_x = 1360, /* real size : 1366 */
.virtual_y = 768 * 2,
.width = 223,
.height = 125,
.max_bpp = 32,
.default_bpp = 24,
};
#elif defined(CONFIG_LCD_LTE480WV)
static void lcd_lte480wv_set_power(struct plat_lcd_data *pd,
unsigned int power)
{
if (power) {
#if !defined(CONFIG_BACKLIGHT_PWM)
gpio_request_one(EXYNOS4_GPD0(1), GPIOF_OUT_INIT_HIGH, "GPD0");
gpio_free(EXYNOS4_GPD0(1));
#endif
/* fire nRESET on power up */
gpio_request_one(EXYNOS4_GPX0(6), GPIOF_OUT_INIT_HIGH, "GPX0");
mdelay(100);
gpio_set_value(EXYNOS4_GPX0(6), 0);
mdelay(10);
gpio_set_value(EXYNOS4_GPX0(6), 1);
mdelay(10);
gpio_free(EXYNOS4_GPX0(6));
} else {
#if !defined(CONFIG_BACKLIGHT_PWM)
gpio_request_one(EXYNOS4_GPD0(1), GPIOF_OUT_INIT_LOW, "GPD0");
gpio_free(EXYNOS4_GPD0(1));
#endif
}
}
static struct plat_lcd_data smdk4x12_lcd_lte480wv_data = {
.set_power = lcd_lte480wv_set_power,
};
static struct platform_device smdk4x12_lcd_lte480wv = {
.name = "platform-lcd",
.dev.parent = &s5p_device_fimd0.dev,
.dev.platform_data = &smdk4x12_lcd_lte480wv_data,
};
static struct s3c_fb_pd_win smdk4x12_fb_win0 = {
.win_mode = {
.left_margin = 13,
.right_margin = 8,
.upper_margin = 7,
.lower_margin = 5,
.hsync_len = 3,
.vsync_len = 1,
.xres = 800,
.yres = 480,
},
.virtual_x = 800,
.virtual_y = 960,
.width = 104,
.height = 62,
.max_bpp = 32,
.default_bpp = 24,
};
static struct s3c_fb_pd_win smdk4x12_fb_win1 = {
.win_mode = {
.left_margin = 13,
.right_margin = 8,
.upper_margin = 7,
.lower_margin = 5,
.hsync_len = 3,
.vsync_len = 1,
.xres = 800,
.yres = 480,
},
.virtual_x = 800,
.virtual_y = 960,
.width = 104,
.height = 62,
.max_bpp = 32,
.default_bpp = 24,
};
static struct s3c_fb_pd_win smdk4x12_fb_win2 = {
.win_mode = {
.left_margin = 13,
.right_margin = 8,
.upper_margin = 7,
.lower_margin = 5,
.hsync_len = 3,
.vsync_len = 1,
.xres = 800,
.yres = 480,
},
.virtual_x = 800,
.virtual_y = 960,
.width = 104,
.height = 62,
.max_bpp = 32,
.default_bpp = 24,
};
#elif defined(CONFIG_LCD_MIPI_S6E63M0)
static void mipi_lcd_set_power(struct plat_lcd_data *pd,
unsigned int power)
{
gpio_request_one(EXYNOS4_GPX2(7), GPIOF_OUT_INIT_HIGH, "GPX2");
mdelay(100);
if (power) {
/* fire nRESET on power up */
gpio_set_value(EXYNOS4_GPX2(7), 0);
mdelay(100);
gpio_set_value(EXYNOS4_GPX2(7), 1);
mdelay(100);
gpio_free(EXYNOS4_GPX2(7));
} else {
/* fire nRESET on power off */
gpio_set_value(EXYNOS4_GPX2(7), 0);
mdelay(100);
gpio_set_value(EXYNOS4_GPX2(7), 1);
mdelay(100);
gpio_free(EXYNOS4_GPX2(7));
}
}
static struct plat_lcd_data smdk4x12_mipi_lcd_data = {
.set_power = mipi_lcd_set_power,
};
static struct platform_device smdk4x12_mipi_lcd = {
.name = "platform-lcd",
.dev.parent = &s5p_device_fimd0.dev,
.dev.platform_data = &smdk4x12_mipi_lcd_data,
};
static struct s3c_fb_pd_win smdk4x12_fb_win0 = {
.win_mode = {
.left_margin = 0x16,
.right_margin = 0x16,
.upper_margin = 0x1,
.lower_margin = 0x28,
.hsync_len = 0x2,
.vsync_len = 0x3,
.xres = 480,
.yres = 800,
},
.virtual_x = 480,
.virtual_y = 1600,
.width = 48,
.height = 80,
.max_bpp = 32,
.default_bpp = 24,
};
static struct s3c_fb_pd_win smdk4x12_fb_win1 = {
.win_mode = {
.left_margin = 0x16,
.right_margin = 0x16,
.upper_margin = 0x1,
.lower_margin = 0x28,
.hsync_len = 0x2,
.vsync_len = 0x3,
.xres = 480,
.yres = 800,
},
.virtual_x = 480,
.virtual_y = 1600,
.width = 48,
.height = 80,
.max_bpp = 32,
.default_bpp = 24,
};
static struct s3c_fb_pd_win smdk4x12_fb_win2 = {
.win_mode = {
.left_margin = 0x16,
.right_margin = 0x16,
.upper_margin = 0x1,
.lower_margin = 0x28,
.hsync_len = 0x2,
.vsync_len = 0x3,
.xres = 480,
.yres = 800,
},
.virtual_x = 480,
.virtual_y = 1600,
.width = 48,
.height = 80,
.max_bpp = 32,
.default_bpp = 24,
};
#endif
static struct s3c_fb_platdata smdk4x12_lcd0_pdata __initdata = {
#if defined(CONFIG_LCD_AMS369FG06) || defined(CONFIG_LCD_WA101S) || \
defined(CONFIG_LCD_LTE480WV) || defined(CONFIG_LCD_LMS501KF03) || \
defined(CONFIG_LCD_MIPI_S6E63M0)
.win[0] = &smdk4x12_fb_win0,
.win[1] = &smdk4x12_fb_win1,
.win[2] = &smdk4x12_fb_win2,
#endif
.default_win = 2,
.vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
#if defined(CONFIG_LCD_AMS369FG06)
.vidcon1 = VIDCON1_INV_VCLK | VIDCON1_INV_VDEN |
VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
#elif defined(CONFIG_LCD_LMS501KF03)
.vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
#elif defined(CONFIG_LCD_WA101S)
.vidcon1 = VIDCON1_INV_VCLK | VIDCON1_INV_HSYNC |
VIDCON1_INV_VSYNC,
#elif defined(CONFIG_LCD_LTE480WV)
.vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
#endif
.setup_gpio = exynos4_fimd0_gpio_setup_24bpp,
};
#endif
#ifdef CONFIG_FB_S5P
#ifdef CONFIG_FB_S5P_LMS501KF03
static struct s3c_platform_fb lms501kf03_data __initdata = {
.hw_ver = 0x70,
.clk_name = "sclk_lcd",
.nr_wins = 5,
.default_win = CONFIG_FB_S5P_DEFAULT_WINDOW,
.swap = FB_SWAP_HWORD | FB_SWAP_WORD,
};
#define LCD_BUS_NUM 3
#define DISPLAY_CS EXYNOS4_GPB(5)
#define DISPLAY_CLK EXYNOS4_GPB(4)
#define DISPLAY_SI EXYNOS4_GPB(7)
static struct spi_board_info spi_board_info[] __initdata = {
{
.modalias = "lms501kf03",
.platform_data = NULL,
.max_speed_hz = 1200000,
.bus_num = LCD_BUS_NUM,
.chip_select = 0,
.mode = SPI_MODE_3,
.controller_data = (void *)DISPLAY_CS,
}
};
static struct spi_gpio_platform_data lms501kf03_spi_gpio_data = {
.sck = DISPLAY_CLK,
.mosi = DISPLAY_SI,
.miso = -1,
.num_chipselect = 1,
};
static struct platform_device s3c_device_spi_gpio = {
.name = "spi_gpio",
.id = LCD_BUS_NUM,
.dev = {
.parent = &s3c_device_fb.dev,
.platform_data = &lms501kf03_spi_gpio_data,
},
};
#elif defined(CONFIG_FB_S5P_DUMMY_MIPI_LCD)
#define LCD_BUS_NUM 3
#define DISPLAY_CS EXYNOS4_GPB(5)
#define DISPLAY_CLK EXYNOS4_GPB(4)
#define DISPLAY_SI EXYNOS4_GPB(7)
static struct s3cfb_lcd dummy_mipi_lcd = {
.width = 480,
.height = 800,
.bpp = 24,
.freq = 60,
.timing = {
.h_fp = 0x16,
.h_bp = 0x16,
.h_sw = 0x2,
.v_fp = 0x28,
.v_fpe = 2,
.v_bp = 0x1,
.v_bpe = 1,
.v_sw = 3,
.cmd_allow_len = 0x4,
},
.polarity = {
.rise_vclk = 0,
.inv_hsync = 0,
.inv_vsync = 0,
.inv_vden = 0,
},
};
static struct s3c_platform_fb fb_platform_data __initdata = {
.hw_ver = 0x70,
.clk_name = "sclk_lcd",
.nr_wins = 5,
.default_win = CONFIG_FB_S5P_DEFAULT_WINDOW,
.swap = FB_SWAP_HWORD | FB_SWAP_WORD,
};
static void lcd_cfg_gpio(void)
{
return;
}
static int reset_lcd(void)
{
int err = 0;
/* fire nRESET on power off */
err = gpio_request(EXYNOS4_GPX3(1), "GPX3");
if (err) {
printk(KERN_ERR "failed to request GPX0 for lcd reset control\n");
return err;
}
#ifdef CONFIG_CPU_EXYNOS4212
gpio_direction_output(EXYNOS4_GPX2(7), 1);
mdelay(100);
gpio_set_value(EXYNOS4_GPX2(7), 0);
mdelay(100);
gpio_set_value(EXYNOS4_GPX2(7), 1);
mdelay(100);
gpio_free(EXYNOS4_GPX2(7));
#else
gpio_direction_output(EXYNOS4_GPX3(1), 1);
mdelay(100);
gpio_set_value(EXYNOS4_GPX3(1), 0);
mdelay(100);
gpio_set_value(EXYNOS4_GPX3(1), 1);
mdelay(100);
gpio_free(EXYNOS4_GPX3(1));
#endif
return 0;
}
static int lcd_power_on(void *pdev, int enable)
{
return 1;
}
static void __init mipi_fb_init(void)
{
struct s5p_platform_dsim *dsim_pd = NULL;
struct mipi_ddi_platform_data *mipi_ddi_pd = NULL;
struct dsim_lcd_config *dsim_lcd_info = NULL;
/* gpio pad configuration for rgb and spi interface. */
lcd_cfg_gpio();
/*
* register lcd panel data.
*/
dsim_pd = (struct s5p_platform_dsim *)
s5p_device_dsim.dev.platform_data;
strcpy(dsim_pd->lcd_panel_name, "dummy_mipi_lcd");
dsim_lcd_info = dsim_pd->dsim_lcd_info;
dsim_lcd_info->lcd_panel_info = (void *)&dummy_mipi_lcd;
mipi_ddi_pd = (struct mipi_ddi_platform_data *)
dsim_lcd_info->mipi_ddi_pd;
mipi_ddi_pd->lcd_reset = reset_lcd;
mipi_ddi_pd->lcd_power_on = lcd_power_on;
platform_device_register(&s5p_device_dsim);
s3cfb_set_platdata(&fb_platform_data);
printk(KERN_INFO "platform data of %s lcd panel has been registered.\n",
dsim_pd->lcd_panel_name);
}
#endif
#endif
static int exynos4_notifier_call(struct notifier_block *this,
unsigned long code, void *_cmd)
{
int mode = 0;
if ((code == SYS_RESTART) && _cmd)
if (!strcmp((char *)_cmd, "recovery"))
mode = 0xf;
__raw_writel(mode, REG_INFORM4);
return NOTIFY_DONE;
}
static struct notifier_block exynos4_reboot_notifier = {
.notifier_call = exynos4_notifier_call,
};
#ifdef CONFIG_EXYNOS4_DEV_DWMCI
static void exynos_dwmci_cfg_gpio(int width)
{
unsigned int gpio;
for (gpio = EXYNOS4_GPK0(0); gpio < EXYNOS4_GPK0(2); gpio++) {
s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(3));
s3c_gpio_setpull(gpio, S3C_GPIO_PULL_NONE);
s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV2);
}
switch (width) {
case MMC_BUS_WIDTH_8:
for (gpio = EXYNOS4_GPK1(3); gpio <= EXYNOS4_GPK1(6); gpio++) {
s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(4));
s3c_gpio_setpull(gpio, S3C_GPIO_PULL_UP);
s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV2);
}
case MMC_BUS_WIDTH_4:
for (gpio = EXYNOS4_GPK0(3); gpio <= EXYNOS4_GPK0(6); gpio++) {
s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(3));
s3c_gpio_setpull(gpio, S3C_GPIO_PULL_UP);
s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV2);
}
break;
case MMC_BUS_WIDTH_1:
gpio = EXYNOS4_GPK0(3);
s3c_gpio_cfgpin(gpio, S3C_GPIO_SFN(3));
s3c_gpio_setpull(gpio, S3C_GPIO_PULL_UP);
s5p_gpio_set_drvstr(gpio, S5P_GPIO_DRVSTR_LV2);
default:
break;
}
}
static struct dw_mci_board exynos_dwmci_pdata __initdata = {
.num_slots = 1,
.quirks = DW_MCI_QUIRK_BROKEN_CARD_DETECTION | DW_MCI_QUIRK_HIGHSPEED,
.bus_hz = 100 * 1000 * 1000,
.caps = MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR |
MMC_CAP_8_BIT_DATA | MMC_CAP_CMD23,
.fifo_depth = 0x80,
.detect_delay_ms = 200,
.hclk_name = "dwmci",
.cclk_name = "sclk_dwmci",
.cfg_gpio = exynos_dwmci_cfg_gpio,
};
#endif
#ifdef CONFIG_S3C_DEV_HSMMC
static struct s3c_sdhci_platdata smdk4x12_hsmmc0_pdata __initdata = {
.cd_type = S3C_SDHCI_CD_INTERNAL,
.clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL,
#ifdef CONFIG_EXYNOS4_SDHCI_CH0_8BIT
.max_width = 8,
.host_caps = MMC_CAP_8_BIT_DATA,
#endif
};
#endif
#ifdef CONFIG_S3C_DEV_HSMMC1
static struct s3c_sdhci_platdata smdk4x12_hsmmc1_pdata __initdata = {
.cd_type = S3C_SDHCI_CD_INTERNAL,
.clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL,
};
#endif
#ifdef CONFIG_S3C_DEV_HSMMC2
static struct s3c_sdhci_platdata smdk4x12_hsmmc2_pdata __initdata = {
.cd_type = S3C_SDHCI_CD_INTERNAL,
.clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL,
#ifdef CONFIG_EXYNOS4_SDHCI_CH2_8BIT
.max_width = 8,
.host_caps = MMC_CAP_8_BIT_DATA,
#endif
};
#endif
#ifdef CONFIG_S3C_DEV_HSMMC3
static struct s3c_sdhci_platdata smdk4x12_hsmmc3_pdata __initdata = {
.cd_type = S3C_SDHCI_CD_INTERNAL,
.clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL,
};
#endif
#ifdef CONFIG_S5P_DEV_MSHC
static struct s3c_mshci_platdata exynos4_mshc_pdata __initdata = {
.cd_type = S3C_MSHCI_CD_PERMANENT,
.has_wp_gpio = true,
.wp_gpio = 0xffffffff,
#if defined(CONFIG_EXYNOS4_MSHC_8BIT) && \
defined(CONFIG_EXYNOS4_MSHC_DDR)
.max_width = 8,
.host_caps = MMC_CAP_8_BIT_DATA | MMC_CAP_1_8V_DDR |
MMC_CAP_UHS_DDR50,
#elif defined(CONFIG_EXYNOS4_MSHC_8BIT)
.max_width = 8,
.host_caps = MMC_CAP_8_BIT_DATA,
#elif defined(CONFIG_EXYNOS4_MSHC_DDR)
.host_caps = MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50,
#endif
};
#endif
#ifdef CONFIG_USB_EHCI_S5P
static struct s5p_ehci_platdata smdk4x12_ehci_pdata;
static void __init smdk4x12_ehci_init(void)
{
struct s5p_ehci_platdata *pdata = &smdk4x12_ehci_pdata;
s5p_ehci_set_platdata(pdata);
}
#endif
#ifdef CONFIG_USB_OHCI_S5P
static struct s5p_ohci_platdata smdk4x12_ohci_pdata;
static void __init smdk4x12_ohci_init(void)
{
struct s5p_ohci_platdata *pdata = &smdk4x12_ohci_pdata;
s5p_ohci_set_platdata(pdata);
}
#endif
/* USB GADGET */
#ifdef CONFIG_USB_GADGET
static struct s5p_usbgadget_platdata smdk4x12_usbgadget_pdata;
static void __init smdk4x12_usbgadget_init(void)
{
struct s5p_usbgadget_platdata *pdata = &smdk4x12_usbgadget_pdata;
s5p_usbgadget_set_platdata(pdata);
}
#endif
static struct regulator_consumer_supply max8952_supply =
REGULATOR_SUPPLY("vdd_mif", NULL);
static struct regulator_init_data max8952_init_data = {
.constraints = {
.name = "vdd_mif range",
.min_uV = 800000,
.max_uV = 1100000,
.always_on = 1,
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,
.state_mem = {
.uV = 1100000,
.disabled = 1,
},
},
.num_consumer_supplies = 1,
.consumer_supplies = &max8952_supply,
};
static struct max8649_platform_data exynos4_max8952_info = {
.mode = 1, /* VID1 = 0, VID0 = 1 */
.extclk = 0,
.ramp_timing = MAX8649_RAMP_32MV,
.regulator = &max8952_init_data,
};
/* max8997 */
static struct regulator_consumer_supply max8997_buck1 =
REGULATOR_SUPPLY("vdd_arm", NULL);
static struct regulator_consumer_supply max8997_buck2 =
REGULATOR_SUPPLY("vdd_int", NULL);
static struct regulator_consumer_supply max8997_buck3 =
REGULATOR_SUPPLY("vdd_g3d", NULL);
static struct regulator_consumer_supply __initdata ldo2_consumer =
REGULATOR_SUPPLY("vdd_ldo2", NULL);
static struct regulator_consumer_supply __initdata ldo3_consumer =
REGULATOR_SUPPLY("vdd_ldo3", NULL);
static struct regulator_consumer_supply __initdata ldo4_consumer =
REGULATOR_SUPPLY("vdd_ldo4", NULL);
static struct regulator_consumer_supply __initdata ldo5_consumer =
REGULATOR_SUPPLY("vdd_ldo5", NULL);
static struct regulator_consumer_supply __initdata ldo6_consumer =
REGULATOR_SUPPLY("vdd_ldo6", NULL);
static struct regulator_consumer_supply __initdata ldo7_consumer =
REGULATOR_SUPPLY("vdd_ldo7", NULL);
static struct regulator_consumer_supply __initdata ldo8_consumer =
REGULATOR_SUPPLY("vdd_ldo8", NULL);
static struct regulator_consumer_supply __initdata ldo9_consumer =
REGULATOR_SUPPLY("vdd_ldo9", NULL);
static struct regulator_consumer_supply __initdata ldo10_consumer =
REGULATOR_SUPPLY("vdd_ldo10", NULL);
static struct regulator_consumer_supply __initdata ldo11_consumer =
REGULATOR_SUPPLY("vdd_ldo11", NULL);
static struct regulator_consumer_supply __initdata ldo12_consumer =
REGULATOR_SUPPLY("vdd_adc", NULL);
static struct regulator_consumer_supply __initdata ldo14_consumer =
REGULATOR_SUPPLY("vdd_ldo14", NULL);
static struct regulator_consumer_supply __initdata ldo21_consumer =
REGULATOR_SUPPLY("vdd_ldo21", NULL);
static struct regulator_init_data __initdata max8997_ldo2_data = {
.constraints = {
.name = "vdd_ldo2 range",
.min_uV = 1000000,
.max_uV = 1000000,
.apply_uV = 1,
.always_on = 1,
.state_mem = {
.enabled = 1,
},
},
.num_consumer_supplies = 1,
.consumer_supplies = &ldo2_consumer,
};
static struct regulator_init_data __initdata max8997_ldo3_data = {
.constraints = {
.name = "vdd_ldo3 range",
.min_uV = 1000000,
.max_uV = 1000000,
.apply_uV = 1,
.always_on = 1,
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
.state_mem = {
.disabled = 1,
},
},
.num_consumer_supplies = 1,
.consumer_supplies = &ldo3_consumer,
};
static struct regulator_init_data __initdata max8997_ldo4_data = {
.constraints = {
.name = "vdd_ldo4 range",
.min_uV = 1800000,
.max_uV = 1800000,
.apply_uV = 1,
.always_on = 1,
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
.state_mem = {
.disabled = 1,
},
},
.num_consumer_supplies = 1,
.consumer_supplies = &ldo4_consumer,
};
static struct regulator_init_data __initdata max8997_ldo5_data = {
.constraints = {
.name = "vdd_ldo5 range",
.min_uV = 1000000,
.max_uV = 1000000,
.apply_uV = 1,
.always_on = 1,
.state_mem = {
.enabled = 1,
},
},
.num_consumer_supplies = 1,
.consumer_supplies = &ldo5_consumer,
};
static struct regulator_init_data __initdata max8997_ldo6_data = {
.constraints = {
.name = "vdd_ldo6 range",
.min_uV = 1800000,
.max_uV = 1800000,
.apply_uV = 1,
.always_on = 1,
.state_mem = {
.enabled = 1,
},
},
.num_consumer_supplies = 1,
.consumer_supplies = &ldo6_consumer,
};
static struct regulator_init_data __initdata max8997_ldo7_data = {
.constraints = {
.name = "vdd_ldo7 range",
.min_uV = 1800000,
.max_uV = 1800000,
.apply_uV = 1,
.always_on = 1,
.state_mem = {
.enabled = 1,
},
},
.num_consumer_supplies = 1,
.consumer_supplies = &ldo7_consumer,
};
static struct regulator_init_data __initdata max8997_ldo8_data = {
.constraints = {
.name = "vdd_ldo8 range",
.min_uV = 3300000,
.max_uV = 3300000,
.apply_uV = 1,
.always_on = 1,
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
.state_mem = {
.disabled = 1,
},
},
.num_consumer_supplies = 1,
.consumer_supplies = &ldo8_consumer,
};
static struct regulator_init_data __initdata max8997_ldo9_data = {
.constraints = {
.name = "vdd_ldo9 range",
.min_uV = 2800000,
.max_uV = 2800000,
.apply_uV = 1,
.always_on = 1,
.state_mem = {
.enabled = 1,
},
},
.num_consumer_supplies = 1,
.consumer_supplies = &ldo9_consumer,
};
static struct regulator_init_data __initdata max8997_ldo10_data = {
.constraints = {
.name = "vdd_ldo10 range",
.min_uV = 1000000,
.max_uV = 1000000,
.apply_uV = 1,
.always_on = 1,
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
.state_mem = {
.disabled = 1,
},
},
.num_consumer_supplies = 1,
.consumer_supplies = &ldo10_consumer,
};
static struct regulator_init_data __initdata max8997_ldo11_data = {
.constraints = {
.name = "vdd_ldo11 range",
.min_uV = 2800000,
.max_uV = 2800000,
.apply_uV = 1,
.always_on = 1,
.state_mem = {
.enabled = 1,
},
},
.num_consumer_supplies = 1,
.consumer_supplies = &ldo11_consumer,
};
static struct regulator_init_data __initdata max8997_ldo12_data = {
.constraints = {
.name = "vdd_adc range",
.min_uV = 1800000,
.max_uV = 1800000,
.apply_uV = 1,
.always_on = 1,
.state_mem = {
.enabled = 1,
},
},
.num_consumer_supplies = 1,
.consumer_supplies = &ldo12_consumer,
};
static struct regulator_init_data __initdata max8997_ldo14_data = {
.constraints = {
.name = "vdd_ldo14 range",
.min_uV = 1800000,
.max_uV = 1800000,
.apply_uV = 1,
.always_on = 1,
.state_mem = {
.enabled = 1,
},
},
.num_consumer_supplies = 1,
.consumer_supplies = &ldo14_consumer,
};
static struct regulator_init_data __initdata max8997_ldo21_data = {
.constraints = {
.name = "vdd_ldo21 range",
.min_uV = 1200000,
.max_uV = 1200000,
.apply_uV = 1,
.always_on = 1,
.state_mem = {
.enabled = 1,
},
},
.num_consumer_supplies = 1,
.consumer_supplies = &ldo21_consumer,
};
static struct regulator_init_data __initdata max8997_buck1_data = {
.constraints = {
.name = "vdd_arm range",
.min_uV = 800000,
.max_uV = 1500000,
.always_on = 1,
.boot_on = 1,
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,
.state_mem = {
.disabled = 1,
},
},
.num_consumer_supplies = 1,
.consumer_supplies = &max8997_buck1,
};
static struct regulator_init_data __initdata max8997_buck2_data = {
.constraints = {
.name = "vdd_int range",
.min_uV = 800000,
.max_uV = 1150000,
.always_on = 1,
.boot_on = 1,
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,
.state_mem = {
.disabled = 1,
},
},
.num_consumer_supplies = 1,
.consumer_supplies = &max8997_buck2,
};
static struct regulator_init_data __initdata max8997_buck3_data = {
.constraints = {
.name = "vdd_g3d range",
.min_uV = 800000,
.max_uV = 1200000,
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
REGULATOR_CHANGE_STATUS,
.state_mem = {
.disabled = 1,
},
},
.num_consumer_supplies = 1,
.consumer_supplies = &max8997_buck3,
};
static struct max8997_regulator_data __initdata max8997_regulators[] = {
{ MAX8997_LDO2, &max8997_ldo2_data, },
{ MAX8997_LDO3, &max8997_ldo3_data, },
{ MAX8997_LDO4, &max8997_ldo4_data, },
{ MAX8997_LDO5, &max8997_ldo5_data, },
{ MAX8997_LDO6, &max8997_ldo6_data, },
{ MAX8997_LDO7, &max8997_ldo7_data, },
{ MAX8997_LDO8, &max8997_ldo8_data, },
{ MAX8997_LDO9, &max8997_ldo9_data, },
{ MAX8997_LDO10, &max8997_ldo10_data, },
{ MAX8997_LDO11, &max8997_ldo11_data, },
{ MAX8997_LDO12, &max8997_ldo12_data, },
{ MAX8997_LDO14, &max8997_ldo14_data, },
{ MAX8997_LDO21, &max8997_ldo21_data, },
{ MAX8997_BUCK1, &max8997_buck1_data, },
{ MAX8997_BUCK2, &max8997_buck2_data, },
{ MAX8997_BUCK3, &max8997_buck3_data, },
};
static struct max8997_platform_data __initdata exynos4_max8997_info = {
.num_regulators = ARRAY_SIZE(max8997_regulators),
.regulators = max8997_regulators,
.buck1_voltage[0] = 1300000, /* 1.25V */
.buck1_voltage[1] = 1100000, /* 1.1V */
.buck1_voltage[2] = 1100000, /* 1.1V */
.buck1_voltage[3] = 1100000, /* 1.1V */
.buck1_voltage[4] = 1100000, /* 1.1V */
.buck1_voltage[5] = 1100000, /* 1.1V */
.buck1_voltage[6] = 1000000, /* 1.0V */
.buck1_voltage[7] = 950000, /* 0.95V */
.buck2_voltage[0] = 1037500, /* 1.0375V */
.buck2_voltage[1] = 1000000, /* 1.0V */
.buck2_voltage[2] = 950000, /* 0.95V */
.buck2_voltage[3] = 900000, /* 0.9V */
.buck2_voltage[4] = 1000000, /* 1.0V */
.buck2_voltage[5] = 1000000, /* 1.0V */
.buck2_voltage[6] = 950000, /* 0.95V */
.buck2_voltage[7] = 900000, /* 0.9V */
.buck5_voltage[0] = 1100000, /* 1.1V */
.buck5_voltage[1] = 1100000, /* 1.1V */
.buck5_voltage[2] = 1100000, /* 1.1V */
.buck5_voltage[3] = 1100000, /* 1.1V */
.buck5_voltage[4] = 1100000, /* 1.1V */
.buck5_voltage[5] = 1100000, /* 1.1V */
.buck5_voltage[6] = 1100000, /* 1.1V */
.buck5_voltage[7] = 1100000, /* 1.1V */
};
/* max77686 */
static struct regulator_consumer_supply max77686_buck1 =
REGULATOR_SUPPLY("vdd_mif", NULL);
static struct regulator_consumer_supply max77686_buck2 =
REGULATOR_SUPPLY("vdd_arm", NULL);
static struct regulator_consumer_supply max77686_buck3 =
REGULATOR_SUPPLY("vdd_int", NULL);
static struct regulator_consumer_supply max77686_buck4 =
REGULATOR_SUPPLY("vdd_g3d", NULL);
static struct regulator_consumer_supply max77686_ldo11_consumer =
REGULATOR_SUPPLY("vdd_ldo11", NULL);
static struct regulator_consumer_supply max77686_ldo14_consumer =
REGULATOR_SUPPLY("vdd_ldo14", NULL);
static struct regulator_init_data max77686_buck1_data = {
.constraints = {
.name = "vdd_mif range",
.min_uV = 800000,
.max_uV = 1050000,
.always_on = 1,
.boot_on = 1,
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
REGULATOR_CHANGE_STATUS,
},
.num_consumer_supplies = 1,
.consumer_supplies = &max77686_buck1,
};
static struct regulator_init_data max77686_buck2_data = {
.constraints = {
.name = "vdd_arm range",
.min_uV = 800000,
.max_uV = 1350000,
.always_on = 1,
.boot_on = 1,
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,
},
.num_consumer_supplies = 1,
.consumer_supplies = &max77686_buck2,
};
static struct regulator_init_data max77686_buck3_data = {
.constraints = {
.name = "vdd_int range",
.min_uV = 800000,
.max_uV = 1150000,
.always_on = 1,
.boot_on = 1,
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,
},
.num_consumer_supplies = 1,
.consumer_supplies = &max77686_buck3,
};
static struct regulator_init_data max77686_buck4_data = {
.constraints = {
.name = "vdd_g3d range",
.min_uV = 850000,
.max_uV = 1200000,
.boot_on = 1,
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
REGULATOR_CHANGE_STATUS,
.state_mem = {
.disabled = 1,
},
},
.num_consumer_supplies = 1,
.consumer_supplies = &max77686_buck4,
};
static struct regulator_init_data max77686_ldo11_data = {
.constraints = {
.name = "vdd_ldo11 range",
.min_uV = 1900000,
.max_uV = 1900000,
.apply_uV = 1,
.always_on = 1,
.state_mem = {
.enabled = 1,
},
},
.num_consumer_supplies = 1,
.consumer_supplies = &max77686_ldo11_consumer,
};
static struct regulator_init_data max77686_ldo14_data = {
.constraints = {
.name = "vdd_ldo14 range",
.min_uV = 1900000,
.max_uV = 1900000,
.apply_uV = 1,
.always_on = 1,
.state_mem = {
.enabled = 1,
},
},
.num_consumer_supplies = 1,
.consumer_supplies = &max77686_ldo14_consumer,
};
static struct max77686_regulator_data max77686_regulators[] = {
{MAX77686_BUCK1, &max77686_buck1_data,},
{MAX77686_BUCK2, &max77686_buck2_data,},
{MAX77686_BUCK3, &max77686_buck3_data,},
{MAX77686_BUCK4, &max77686_buck4_data,},
{MAX77686_LDO11, &max77686_ldo11_data,},
{MAX77686_LDO14, &max77686_ldo14_data,},
};
struct max77686_opmode_data max77686_opmode_data[MAX77686_REG_MAX] = {
[MAX77686_LDO11] = {MAX77686_LDO11, MAX77686_OPMODE_STANDBY},
[MAX77686_LDO14] = {MAX77686_LDO14, MAX77686_OPMODE_STANDBY},
[MAX77686_BUCK1] = {MAX77686_BUCK1, MAX77686_OPMODE_STANDBY},
[MAX77686_BUCK2] = {MAX77686_BUCK2, MAX77686_OPMODE_STANDBY},
[MAX77686_BUCK3] = {MAX77686_BUCK3, MAX77686_OPMODE_STANDBY},
[MAX77686_BUCK4] = {MAX77686_BUCK4, MAX77686_OPMODE_STANDBY},
};
static struct max77686_platform_data exynos4_max77686_info = {
.num_regulators = ARRAY_SIZE(max77686_regulators),
.regulators = max77686_regulators,
.irq_gpio = 0,
.irq_base = 0,
.wakeup = 0,
.opmode_data = max77686_opmode_data,
.ramp_rate = MAX77686_RAMP_RATE_27MV,
.buck2_voltage[0] = 1300000, /* 1.3V */
.buck2_voltage[1] = 1000000, /* 1.0V */
.buck2_voltage[2] = 950000, /* 0.95V */
.buck2_voltage[3] = 900000, /* 0.9V */
.buck2_voltage[4] = 1000000, /* 1.0V */
.buck2_voltage[5] = 1000000, /* 1.0V */
.buck2_voltage[6] = 950000, /* 0.95V */
.buck2_voltage[7] = 900000, /* 0.9V */
.buck3_voltage[0] = 1037500, /* 1.0375V */
.buck3_voltage[1] = 1000000, /* 1.0V */
.buck3_voltage[2] = 950000, /* 0.95V */
.buck3_voltage[3] = 900000, /* 0.9V */
.buck3_voltage[4] = 1000000, /* 1.0V */
.buck3_voltage[5] = 1000000, /* 1.0V */
.buck3_voltage[6] = 950000, /* 0.95V */
.buck3_voltage[7] = 900000, /* 0.9V */
.buck4_voltage[0] = 1100000, /* 1.1V */
.buck4_voltage[1] = 1000000, /* 1.0V */
.buck4_voltage[2] = 950000, /* 0.95V */
.buck4_voltage[3] = 900000, /* 0.9V */
.buck4_voltage[4] = 1000000, /* 1.0V */
.buck4_voltage[5] = 1000000, /* 1.0V */
.buck4_voltage[6] = 950000, /* 0.95V */
.buck4_voltage[7] = 900000, /* 0.9V */
};
#ifdef CONFIG_REGULATOR_S5M8767
/* S5M8767 Regulator */
static int s5m_cfg_irq(void)
{
/* AP_PMIC_IRQ: EINT26 */
s3c_gpio_cfgpin(EXYNOS4_GPX3(2), S3C_GPIO_SFN(0xF));
s3c_gpio_setpull(EXYNOS4_GPX3(2), S3C_GPIO_PULL_UP);
return 0;
}
static struct regulator_consumer_supply s5m8767_buck1_consumer =
REGULATOR_SUPPLY("vdd_mif", NULL);
static struct regulator_consumer_supply s5m8767_buck2_consumer =
REGULATOR_SUPPLY("vdd_arm", NULL);
static struct regulator_consumer_supply s5m8767_buck3_consumer =
REGULATOR_SUPPLY("vdd_int", NULL);
static struct regulator_consumer_supply s5m8767_buck4_consumer =
REGULATOR_SUPPLY("vdd_g3d", NULL);
static struct regulator_init_data s5m8767_buck1_data = {
.constraints = {
.name = "vdd_mif range",
.min_uV = 800000,
.max_uV = 1100000,
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
REGULATOR_CHANGE_STATUS,
.state_mem = {
.disabled = 1,
},
},
.num_consumer_supplies = 1,
.consumer_supplies = &s5m8767_buck1_consumer,
};
static struct regulator_init_data s5m8767_buck2_data = {
.constraints = {
.name = "vdd_arm range",
.min_uV = 800000,
.max_uV = 1350000,
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
REGULATOR_CHANGE_STATUS,
.state_mem = {
.disabled = 1,
},
},
.num_consumer_supplies = 1,
.consumer_supplies = &s5m8767_buck2_consumer,
};
static struct regulator_init_data s5m8767_buck3_data = {
.constraints = {
.name = "vdd_int range",
.min_uV = 800000,
.max_uV = 1150000,
.apply_uV = 1,
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
REGULATOR_CHANGE_STATUS,
.state_mem = {
.uV = 1100000,
.mode = REGULATOR_MODE_NORMAL,
.disabled = 1,
},
},
.num_consumer_supplies = 1,
.consumer_supplies = &s5m8767_buck3_consumer,
};
static struct regulator_init_data s5m8767_buck4_data = {
.constraints = {
.name = "vdd_g3d range",
.min_uV = 850000,
.max_uV = 1200000,
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
REGULATOR_CHANGE_STATUS,
.state_mem = {
.disabled = 1,
},
},
.num_consumer_supplies = 1,
.consumer_supplies = &s5m8767_buck4_consumer,
};
static struct s5m_regulator_data pegasus_regulators[] = {
{ S5M8767_BUCK1, &s5m8767_buck1_data },
{ S5M8767_BUCK2, &s5m8767_buck2_data },
{ S5M8767_BUCK3, &s5m8767_buck3_data },
{ S5M8767_BUCK4, &s5m8767_buck4_data },
};
static struct s5m_platform_data exynos4_s5m8767_pdata = {
.device_type = S5M8767X,
.irq_base = IRQ_BOARD_START,
.num_regulators = ARRAY_SIZE(pegasus_regulators),
.regulators = pegasus_regulators,
.cfg_pmic_irq = s5m_cfg_irq,
.wakeup = 1,
.opmode_data = s5m8767_opmode_data,
.wtsr_smpl = 1,
.buck2_voltage[0] = 1250000,
.buck2_voltage[1] = 1200000,
.buck2_voltage[2] = 1150000,
.buck2_voltage[3] = 1100000,
.buck2_voltage[4] = 1050000,
.buck2_voltage[5] = 1000000,
.buck2_voltage[6] = 950000,
.buck2_voltage[7] = 900000,
.buck3_voltage[0] = 1100000,
.buck3_voltage[1] = 1000000,
.buck3_voltage[2] = 950000,
.buck3_voltage[3] = 900000,
.buck3_voltage[4] = 1100000,
.buck3_voltage[5] = 1000000,
.buck3_voltage[6] = 950000,
.buck3_voltage[7] = 900000,
.buck4_voltage[0] = 1200000,
.buck4_voltage[1] = 1150000,
.buck4_voltage[2] = 1200000,
.buck4_voltage[3] = 1100000,
.buck4_voltage[4] = 1100000,
.buck4_voltage[5] = 1100000,
.buck4_voltage[6] = 1100000,
.buck4_voltage[7] = 1100000,
.buck_default_idx = 3,
.buck_gpios[0] = EXYNOS4_GPX2(3),
.buck_gpios[1] = EXYNOS4_GPX2(4),
.buck_gpios[2] = EXYNOS4_GPX2(5),
.buck_ramp_delay = 25,
.buck2_ramp_enable = true,
.buck3_ramp_enable = true,
.buck4_ramp_enable = true,
};
/* End of S5M8767 */
#endif
#ifdef CONFIG_VIDEO_S5P_MIPI_CSIS
static struct regulator_consumer_supply mipi_csi_fixed_voltage_supplies[] = {
REGULATOR_SUPPLY("mipi_csi", "s5p-mipi-csis.0"),
REGULATOR_SUPPLY("mipi_csi", "s5p-mipi-csis.1"),
};
static struct regulator_init_data mipi_csi_fixed_voltage_init_data = {
.constraints = {
.always_on = 1,
},
.num_consumer_supplies = ARRAY_SIZE(mipi_csi_fixed_voltage_supplies),
.consumer_supplies = mipi_csi_fixed_voltage_supplies,
};
static struct fixed_voltage_config mipi_csi_fixed_voltage_config = {
.supply_name = "DC_5V",
.microvolts = 5000000,
.gpio = -EINVAL,
.init_data = &mipi_csi_fixed_voltage_init_data,
};
static struct platform_device mipi_csi_fixed_voltage = {
.name = "reg-fixed-voltage",
.id = 3,
.dev = {
.platform_data = &mipi_csi_fixed_voltage_config,
},
};
#endif
#ifdef CONFIG_VIDEO_M5MOLS
static struct regulator_consumer_supply m5mols_fixed_voltage_supplies[] = {
REGULATOR_SUPPLY("core", NULL),
REGULATOR_SUPPLY("dig_18", NULL),
REGULATOR_SUPPLY("d_sensor", NULL),
REGULATOR_SUPPLY("dig_28", NULL),
REGULATOR_SUPPLY("a_sensor", NULL),
REGULATOR_SUPPLY("dig_12", NULL),
};
static struct regulator_init_data m5mols_fixed_voltage_init_data = {
.constraints = {
.always_on = 1,
},
.num_consumer_supplies = ARRAY_SIZE(m5mols_fixed_voltage_supplies),
.consumer_supplies = m5mols_fixed_voltage_supplies,
};
static struct fixed_voltage_config m5mols_fixed_voltage_config = {
.supply_name = "CAM_SENSOR",
.microvolts = 1800000,
.gpio = -EINVAL,
.init_data = &m5mols_fixed_voltage_init_data,
};
static struct platform_device m5mols_fixed_voltage = {
.name = "reg-fixed-voltage",
.id = 4,
.dev = {
.platform_data = &m5mols_fixed_voltage_config,
},
};
#endif
static struct regulator_consumer_supply wm8994_fixed_voltage0_supplies[] = {
REGULATOR_SUPPLY("AVDD2", "1-001a"),
REGULATOR_SUPPLY("CPVDD", "1-001a"),
};
static struct regulator_consumer_supply wm8994_fixed_voltage1_supplies[] = {
REGULATOR_SUPPLY("SPKVDD1", "1-001a"),
REGULATOR_SUPPLY("SPKVDD2", "1-001a"),
};
static struct regulator_consumer_supply wm8994_fixed_voltage2_supplies =
REGULATOR_SUPPLY("DBVDD", "1-001a");
static struct regulator_init_data wm8994_fixed_voltage0_init_data = {
.constraints = {
.always_on = 1,
},
.num_consumer_supplies = ARRAY_SIZE(wm8994_fixed_voltage0_supplies),
.consumer_supplies = wm8994_fixed_voltage0_supplies,
};
static struct regulator_init_data wm8994_fixed_voltage1_init_data = {
.constraints = {
.always_on = 1,
},
.num_consumer_supplies = ARRAY_SIZE(wm8994_fixed_voltage1_supplies),
.consumer_supplies = wm8994_fixed_voltage1_supplies,
};
static struct regulator_init_data wm8994_fixed_voltage2_init_data = {
.constraints = {
.always_on = 1,
},
.num_consumer_supplies = 1,
.consumer_supplies = &wm8994_fixed_voltage2_supplies,
};
static struct fixed_voltage_config wm8994_fixed_voltage0_config = {
.supply_name = "VDD_1.8V",
.microvolts = 1800000,
.gpio = -EINVAL,
.init_data = &wm8994_fixed_voltage0_init_data,
};
static struct fixed_voltage_config wm8994_fixed_voltage1_config = {
.supply_name = "DC_5V",
.microvolts = 5000000,
.gpio = -EINVAL,
.init_data = &wm8994_fixed_voltage1_init_data,
};
static struct fixed_voltage_config wm8994_fixed_voltage2_config = {
.supply_name = "VDD_3.3V",
.microvolts = 3300000,
.gpio = -EINVAL,
.init_data = &wm8994_fixed_voltage2_init_data,
};
static struct platform_device wm8994_fixed_voltage0 = {
.name = "reg-fixed-voltage",
.id = 0,
.dev = {
.platform_data = &wm8994_fixed_voltage0_config,
},
};
static struct platform_device wm8994_fixed_voltage1 = {
.name = "reg-fixed-voltage",
.id = 1,
.dev = {
.platform_data = &wm8994_fixed_voltage1_config,
},
};
static struct platform_device wm8994_fixed_voltage2 = {
.name = "reg-fixed-voltage",
.id = 2,
.dev = {
.platform_data = &wm8994_fixed_voltage2_config,
},
};
static struct regulator_consumer_supply wm8994_avdd1_supply =
REGULATOR_SUPPLY("AVDD1", "1-001a");
static struct regulator_consumer_supply wm8994_dcvdd_supply =
REGULATOR_SUPPLY("DCVDD", "1-001a");
static struct regulator_init_data wm8994_ldo1_data = {
.constraints = {
.name = "AVDD1",
},
.num_consumer_supplies = 1,
.consumer_supplies = &wm8994_avdd1_supply,
};
static struct regulator_init_data wm8994_ldo2_data = {
.constraints = {
.name = "DCVDD",
},
.num_consumer_supplies = 1,
.consumer_supplies = &wm8994_dcvdd_supply,
};
static struct wm8994_pdata wm8994_platform_data = {
/* configure gpio1 function: 0x0001(Logic level input/output) */
.gpio_defaults[0] = 0x0001,
/* If the i2s0 and i2s2 is enabled simultaneously */
.gpio_defaults[7] = 0x8100, /* GPIO8 DACDAT3 in */
.gpio_defaults[8] = 0x0100, /* GPIO9 ADCDAT3 out */
.gpio_defaults[9] = 0x0100, /* GPIO10 LRCLK3 out */
.gpio_defaults[10] = 0x0100,/* GPIO11 BCLK3 out */
.ldo[0] = { 0, NULL, &wm8994_ldo1_data },
.ldo[1] = { 0, NULL, &wm8994_ldo2_data },
};
static struct i2c_board_info i2c_devs0[] __initdata = {
#ifdef CONFIG_REGULATOR_S5M8767
{
I2C_BOARD_INFO("s5m87xx", 0xCC >> 1),
.platform_data = &exynos4_s5m8767_pdata,
.irq = IRQ_EINT(26),
},
#else
{
I2C_BOARD_INFO("max8997", 0x66),
.platform_data = &exynos4_max8997_info,
}, {
I2C_BOARD_INFO("max77686", (0x12 >> 1)),
.platform_data = &exynos4_max77686_info,
},
#endif
};
static struct i2c_board_info i2c_devs1[] __initdata = {
{
I2C_BOARD_INFO("wm8994", 0x1a),
.platform_data = &wm8994_platform_data,
},
};
static struct i2c_board_info i2c_devs2[] __initdata = {
#ifdef CONFIG_VIDEO_TVOUT
{
I2C_BOARD_INFO("s5p_ddc", (0x74 >> 1)),
},
#endif
};
static struct i2c_board_info i2c_devs3[] __initdata = {
{
I2C_BOARD_INFO("max8952", 0x60),
.platform_data = &exynos4_max8952_info,
},
};
static struct i2c_board_info i2c_devs7[] __initdata = {
{
I2C_BOARD_INFO("pixcir-ts", 0x5C),
},
};
#ifdef CONFIG_BATTERY_SAMSUNG
static struct platform_device samsung_device_battery = {
.name = "samsung-fake-battery",
.id = -1,
};
#endif
static struct gpio_event_direct_entry smdk4x12_keypad_key_map[] = {
{
.gpio = EXYNOS4_GPX0(0),
.code = KEY_POWER,
}
};
static struct gpio_event_input_info smdk4x12_keypad_key_info = {
.info.func = gpio_event_input_func,
.info.no_suspend = true,
.debounce_time.tv64 = 5 * NSEC_PER_MSEC,
.type = EV_KEY,
.keymap = smdk4x12_keypad_key_map,
.keymap_size = ARRAY_SIZE(smdk4x12_keypad_key_map)
};
static struct gpio_event_info *smdk4x12_input_info[] = {
&smdk4x12_keypad_key_info.info,
};
static struct gpio_event_platform_data smdk4x12_input_data = {
.names = {
"smdk4x12-keypad",
NULL,
},
.info = smdk4x12_input_info,
.info_count = ARRAY_SIZE(smdk4x12_input_info),
};
static struct platform_device smdk4x12_input_device = {
.name = GPIO_EVENT_DEV_NAME,
.id = 0,
.dev = {
.platform_data = &smdk4x12_input_data,
},
};
static void __init smdk4x12_gpio_power_init(void)
{
int err = 0;
err = gpio_request_one(EXYNOS4_GPX0(0), 0, "GPX0");
if (err) {
printk(KERN_ERR "failed to request GPX0 for "
"suspend/resume control\n");
return;
}
s3c_gpio_setpull(EXYNOS4_GPX0(0), S3C_GPIO_PULL_NONE);
gpio_free(EXYNOS4_GPX0(0));
}
static uint32_t smdk4x12_keymap0[] __initdata = {
/* KEY(row, col, keycode) */
KEY(1, 0, KEY_D), KEY(1, 1, KEY_A), KEY(1, 2, KEY_B),
KEY(1, 3, KEY_E), KEY(1, 4, KEY_C)
};
static struct matrix_keymap_data smdk4x12_keymap_data0 __initdata = {
.keymap = smdk4x12_keymap0,
.keymap_size = ARRAY_SIZE(smdk4x12_keymap0),
};
static struct samsung_keypad_platdata smdk4x12_keypad_data0 __initdata = {
.keymap_data = &smdk4x12_keymap_data0,
.rows = 2,
.cols = 5,
};
static uint32_t smdk4x12_keymap1[] __initdata = {
/* KEY(row, col, keycode) */
KEY(1, 3, KEY_1), KEY(1, 4, KEY_2), KEY(1, 5, KEY_3),
KEY(1, 6, KEY_4), KEY(1, 7, KEY_5),
KEY(2, 5, KEY_D), KEY(2, 6, KEY_A), KEY(2, 7, KEY_B),
KEY(0, 7, KEY_E), KEY(0, 5, KEY_C)
};
static struct matrix_keymap_data smdk4x12_keymap_data1 __initdata = {
.keymap = smdk4x12_keymap1,
.keymap_size = ARRAY_SIZE(smdk4x12_keymap1),
};
static struct samsung_keypad_platdata smdk4x12_keypad_data1 __initdata = {
.keymap_data = &smdk4x12_keymap_data1,
.rows = 3,
.cols = 8,
};
#ifdef CONFIG_WAKEUP_ASSIST
static struct platform_device wakeup_assist_device = {
.name = "wakeup_assist",
};
#endif
#ifdef CONFIG_VIDEO_FIMG2D
static struct fimg2d_platdata fimg2d_data __initdata = {
.hw_ver = 0x41,
.parent_clkname = "mout_g2d0",
.clkname = "sclk_fimg2d",
.gate_clkname = "fimg2d",
.clkrate = 201 * 1000000, /* 200 Mhz */
};
#endif
#ifdef CONFIG_USB_EXYNOS_SWITCH
static struct s5p_usbswitch_platdata smdk4x12_usbswitch_pdata;
static void __init smdk4x12_usbswitch_init(void)
{
struct s5p_usbswitch_platdata *pdata = &smdk4x12_usbswitch_pdata;
int err;
pdata->gpio_host_detect = EXYNOS4_GPX3(5); /* low active */
err = gpio_request_one(pdata->gpio_host_detect, GPIOF_IN, "HOST_DETECT");
if (err) {
printk(KERN_ERR "failed to request gpio_host_detect\n");
return;
}
s3c_gpio_cfgpin(pdata->gpio_host_detect, S3C_GPIO_SFN(0xF));
s3c_gpio_setpull(pdata->gpio_host_detect, S3C_GPIO_PULL_NONE);
gpio_free(pdata->gpio_host_detect);
pdata->gpio_device_detect = EXYNOS4_GPX3(4); /* high active */
err = gpio_request_one(pdata->gpio_device_detect, GPIOF_IN, "DEVICE_DETECT");
if (err) {
printk(KERN_ERR "failed to request gpio_host_detect for\n");
return;
}
s3c_gpio_cfgpin(pdata->gpio_device_detect, S3C_GPIO_SFN(0xF));
s3c_gpio_setpull(pdata->gpio_device_detect, S3C_GPIO_PULL_NONE);
gpio_free(pdata->gpio_device_detect);
if (samsung_board_rev_is_0_0())
pdata->gpio_host_vbus = 0;
else {
pdata->gpio_host_vbus = EXYNOS4_GPL2(0);
err = gpio_request_one(pdata->gpio_host_vbus, GPIOF_OUT_INIT_LOW, "HOST_VBUS_CONTROL");
if (err) {
printk(KERN_ERR "failed to request gpio_host_vbus\n");
return;
}
s3c_gpio_setpull(pdata->gpio_host_vbus, S3C_GPIO_PULL_NONE);
gpio_free(pdata->gpio_host_vbus);
}
s5p_usbswitch_set_platdata(pdata);
}
#endif
#ifdef CONFIG_BUSFREQ_OPP
/* BUSFREQ to control memory/bus*/
static struct device_domain busfreq;
#endif
static struct platform_device exynos4_busfreq = {
.id = -1,
.name = "exynos-busfreq",
};
static struct platform_device *smdk4412_devices[] __initdata = {
&s3c_device_adc,
};
static struct platform_device *smdk4x12_devices[] __initdata = {
/* Samsung Power Domain */
&exynos4_device_pd[PD_MFC],
&exynos4_device_pd[PD_G3D],
&exynos4_device_pd[PD_LCD0],
&exynos4_device_pd[PD_CAM],
&exynos4_device_pd[PD_TV],
&exynos4_device_pd[PD_GPS],
&exynos4_device_pd[PD_GPS_ALIVE],
#ifdef CONFIG_VIDEO_EXYNOS_FIMC_IS
&exynos4_device_pd[PD_ISP],
#endif
#ifdef CONFIG_FB_MIPI_DSIM
&s5p_device_mipi_dsim,
#endif
/* mainline fimd */
#ifdef CONFIG_FB_S3C
&s5p_device_fimd0,
#if defined(CONFIG_LCD_AMS369FG06) || defined(CONFIG_LCD_LMS501KF03)
&s3c_device_spi_gpio,
#elif defined(CONFIG_LCD_WA101S)
&smdk4x12_lcd_wa101s,
#elif defined(CONFIG_LCD_LTE480WV)
&smdk4x12_lcd_lte480wv,
#elif defined(CONFIG_LCD_MIPI_S6E63M0)
&smdk4x12_mipi_lcd,
#endif
#endif
/* legacy fimd */
#ifdef CONFIG_FB_S5P
&s3c_device_fb,
#ifdef CONFIG_FB_S5P_LMS501KF03
&s3c_device_spi_gpio,
#endif
#endif
&s3c_device_wdt,
&s3c_device_rtc,
&s3c_device_i2c0,
&s3c_device_i2c1,
&s3c_device_i2c2,
&s3c_device_i2c3,
&s3c_device_i2c4,
&s3c_device_i2c5,
&s3c_device_i2c7,
#ifdef CONFIG_USB_EHCI_S5P
&s5p_device_ehci,
#endif
#ifdef CONFIG_USB_OHCI_S5P
&s5p_device_ohci,
#endif
#ifdef CONFIG_USB_GADGET
&s3c_device_usbgadget,
#endif
#ifdef CONFIG_USB_ANDROID_RNDIS
&s3c_device_rndis,
#endif
#ifdef CONFIG_USB_ANDROID
&s3c_device_android_usb,
&s3c_device_usb_mass_storage,
#endif
#ifdef CONFIG_S3C_DEV_HSMMC
&s3c_device_hsmmc0,
#endif
#ifdef CONFIG_S3C_DEV_HSMMC1
&s3c_device_hsmmc1,
#endif
#ifdef CONFIG_S3C_DEV_HSMMC2
&s3c_device_hsmmc2,
#endif
#ifdef CONFIG_S3C_DEV_HSMMC3
&s3c_device_hsmmc3,
#endif
#ifdef CONFIG_S5P_DEV_MSHC
&s3c_device_mshci,
#endif
#ifdef CONFIG_EXYNOS4_DEV_DWMCI
&exynos_device_dwmci,
#endif
#ifdef CONFIG_SND_SAMSUNG_AC97
&exynos_device_ac97,
#endif
#ifdef CONFIG_SND_SAMSUNG_I2S
&exynos_device_i2s0,
#endif
#ifdef CONFIG_SND_SAMSUNG_PCM
&exynos_device_pcm0,
#endif
#ifdef CONFIG_SND_SAMSUNG_SPDIF
&exynos_device_spdif,
#endif
#if defined(CONFIG_SND_SAMSUNG_RP) || defined(CONFIG_SND_SAMSUNG_ALP)
&exynos_device_srp,
#endif
#ifdef CONFIG_VIDEO_EXYNOS_FIMC_IS
&exynos4_device_fimc_is,
#endif
#ifdef CONFIG_VIDEO_TVOUT
&s5p_device_tvout,
&s5p_device_cec,
&s5p_device_hpd,
#endif
#ifdef CONFIG_FB_S5P_EXTDSP
&s3c_device_extdsp,
#endif
#ifdef CONFIG_VIDEO_EXYNOS_TV
&s5p_device_i2c_hdmiphy,
&s5p_device_hdmi,
&s5p_device_sdo,
&s5p_device_mixer,
&s5p_device_cec,
#endif
#if defined(CONFIG_VIDEO_FIMC)
&s3c_device_fimc0,
&s3c_device_fimc1,
&s3c_device_fimc2,
&s3c_device_fimc3,
/* CONFIG_VIDEO_SAMSUNG_S5P_FIMC is the feature for mainline */
#elif defined(CONFIG_VIDEO_SAMSUNG_S5P_FIMC)
&s5p_device_fimc0,
&s5p_device_fimc1,
&s5p_device_fimc2,
&s5p_device_fimc3,
#endif
#if defined(CONFIG_VIDEO_FIMC_MIPI)
&s3c_device_csis0,
&s3c_device_csis1,
#elif defined(CONFIG_VIDEO_S5P_MIPI_CSIS)
&s5p_device_mipi_csis0,
&s5p_device_mipi_csis1,
#endif
#ifdef CONFIG_VIDEO_S5P_MIPI_CSIS
&mipi_csi_fixed_voltage,
#endif
#ifdef CONFIG_VIDEO_M5MOLS
&m5mols_fixed_voltage,
#endif
#if defined(CONFIG_VIDEO_MFC5X) || defined(CONFIG_VIDEO_SAMSUNG_S5P_MFC)
&s5p_device_mfc,
#endif
#ifdef CONFIG_S5P_SYSTEM_MMU
&SYSMMU_PLATDEV(g2d_acp),
&SYSMMU_PLATDEV(fimc0),
&SYSMMU_PLATDEV(fimc1),
&SYSMMU_PLATDEV(fimc2),
&SYSMMU_PLATDEV(fimc3),
&SYSMMU_PLATDEV(jpeg),
&SYSMMU_PLATDEV(mfc_l),
&SYSMMU_PLATDEV(mfc_r),
&SYSMMU_PLATDEV(tv),
#ifdef CONFIG_VIDEO_EXYNOS_FIMC_IS
&SYSMMU_PLATDEV(is_isp),
&SYSMMU_PLATDEV(is_drc),
&SYSMMU_PLATDEV(is_fd),
&SYSMMU_PLATDEV(is_cpu),
#endif
#endif /* CONFIG_S5P_SYSTEM_MMU */
#ifdef CONFIG_ION_EXYNOS
&exynos_device_ion,
#endif
#ifdef CONFIG_VIDEO_EXYNOS_FIMC_LITE
&exynos_device_flite0,
&exynos_device_flite1,
#endif
#ifdef CONFIG_VIDEO_FIMG2D
&s5p_device_fimg2d,
#endif
#ifdef CONFIG_EXYNOS_MEDIA_DEVICE
&exynos_device_md0,
#endif
#ifdef CONFIG_VIDEO_JPEG_V2X
&s5p_device_jpeg,
#endif
&wm8994_fixed_voltage0,
&wm8994_fixed_voltage1,
&wm8994_fixed_voltage2,
&samsung_asoc_dma,
&samsung_asoc_idma,
#ifdef CONFIG_BATTERY_SAMSUNG
&samsung_device_battery,
#endif
&samsung_device_keypad,
#ifdef CONFIG_WAKEUP_ASSIST
&wakeup_assist_device,
#endif
&smdk4x12_input_device,
&smdk4x12_smsc911x,
#ifdef CONFIG_S3C64XX_DEV_SPI
&exynos_device_spi0,
#ifndef CONFIG_FB_S5P_LMS501KF03
&exynos_device_spi1,
#endif
&exynos_device_spi2,
#endif
#ifdef CONFIG_EXYNOS_SETUP_THERMAL
&exynos_device_tmu,
#endif
#ifdef CONFIG_S5P_DEV_ACE
&s5p_device_ace,
#endif
&exynos4_busfreq,
};
#ifdef CONFIG_EXYNOS_SETUP_THERMAL
/* below temperature base on the celcius degree */
struct tmu_data exynos_tmu_data __initdata = {
.ts = {
.stop_throttle = 82,
.start_throttle = 85,
.stop_warning = 95,
.start_warning = 103,
.start_tripping = 110, /* temp to do tripping */
},
.efuse_value = 55,
.slope = 0x10008802,
.mode = 0,
};
#endif
#if defined(CONFIG_VIDEO_TVOUT)
static struct s5p_platform_hpd hdmi_hpd_data __initdata = {
};
static struct s5p_platform_cec hdmi_cec_data __initdata = {
};
#endif
#ifdef CONFIG_VIDEO_EXYNOS_HDMI_CEC
static struct s5p_platform_cec hdmi_cec_data __initdata = {
};
#endif
#ifdef CONFIG_VIDEO_SAMSUNG_S5P_FIMC
static struct s5p_fimc_isp_info isp_info[] = {
#if defined(CONFIG_VIDEO_S5K4BA)
{
.board_info = &s5k4ba_info,
.clk_frequency = 24000000UL,
.bus_type = FIMC_ITU_601,
#ifdef CONFIG_ITU_A
.i2c_bus_num = 0,
.mux_id = 0, /* A-Port : 0, B-Port : 1 */
#endif
#ifdef CONFIG_ITU_B
.i2c_bus_num = 1,
.mux_id = 1, /* A-Port : 0, B-Port : 1 */
#endif
.flags = FIMC_CLK_INV_VSYNC,
},
#endif
#if defined(CONFIG_VIDEO_S5K4EA)
{
.board_info = &s5k4ea_info,
.clk_frequency = 24000000UL,
.bus_type = FIMC_MIPI_CSI2,
#ifdef CONFIG_CSI_C
.i2c_bus_num = 0,
.mux_id = 0, /* A-Port : 0, B-Port : 1 */
#endif
#ifdef CONFIG_CSI_D
.i2c_bus_num = 1,
.mux_id = 1, /* A-Port : 0, B-Port : 1 */
#endif
.flags = FIMC_CLK_INV_VSYNC,
.csi_data_align = 32,
},
#endif
#if defined(CONFIG_VIDEO_M5MOLS)
{
.board_info = &m5mols_board_info,
.clk_frequency = 24000000UL,
.bus_type = FIMC_MIPI_CSI2,
#ifdef CONFIG_CSI_C
.i2c_bus_num = 4,
.mux_id = 0, /* A-Port : 0, B-Port : 1 */
#endif
#ifdef CONFIG_CSI_D
.i2c_bus_num = 5,
.mux_id = 1, /* A-Port : 0, B-Port : 1 */
#endif
.flags = FIMC_CLK_INV_PCLK | FIMC_CLK_INV_VSYNC,
.csi_data_align = 32,
},
#endif
#ifdef CONFIG_VIDEO_EXYNOS_FIMC_IS
#if defined(CONFIG_VIDEO_S5K3H2)
{
.board_info = &s5k3h2_sensor_info,
.clk_frequency = 24000000UL,
.bus_type = FIMC_MIPI_CSI2,
#ifdef CONFIG_S5K3H2_CSI_C
.i2c_bus_num = 0,
.mux_id = 0, /* A-Port : 0, B-Port : 1 */
.flite_id = FLITE_IDX_A,
.cam_power = smdk4x12_cam0_reset,
#endif
#ifdef CONFIG_S5K3H2_CSI_D
.i2c_bus_num = 1,
.mux_id = 1, /* A-Port : 0, B-Port : 1 */
.flite_id = FLITE_IDX_B,
.cam_power = smdk4x12_cam1_reset,
#endif
.flags = 0,
.csi_data_align = 24,
.use_isp = true,
},
#endif
#if defined(CONFIG_VIDEO_S5K3H7)
{
.board_info = &s5k3h7_sensor_info,
.clk_frequency = 24000000UL,
.bus_type = FIMC_MIPI_CSI2,
#ifdef CONFIG_S5K3H7_CSI_C
.i2c_bus_num = 0,
.mux_id = 0, /* A-Port : 0, B-Port : 1 */
.flite_id = FLITE_IDX_A,
.cam_power = smdk4x12_cam0_reset,
#endif
#ifdef CONFIG_S5K3H7_CSI_D
.i2c_bus_num = 1,
.mux_id = 1, /* A-Port : 0, B-Port : 1 */
.flite_id = FLITE_IDX_B,
.cam_power = smdk4x12_cam1_reset,
#endif
.csi_data_align = 24,
.use_isp = true,
},
#endif
#if defined(CONFIG_VIDEO_S5K4E5)
{
.board_info = &s5k4e5_sensor_info,
.clk_frequency = 24000000UL,
.bus_type = FIMC_MIPI_CSI2,
#ifdef CONFIG_S5K4E5_CSI_C
.i2c_bus_num = 0,
.mux_id = 0, /* A-Port : 0, B-Port : 1 */
.flite_id = FLITE_IDX_A,
.cam_power = smdk4x12_cam0_reset,
#endif
#ifdef CONFIG_S5K4E5_CSI_D
.i2c_bus_num = 1,
.mux_id = 1, /* A-Port : 0, B-Port : 1 */
.flite_id = FLITE_IDX_B,
.cam_power = smdk4x12_cam1_reset,
#endif
.csi_data_align = 24,
.use_isp = true,
},
#endif
#if defined(CONFIG_VIDEO_S5K6A3)
{
.board_info = &s5k6a3_sensor_info,
.clk_frequency = 12000000UL,
.bus_type = FIMC_MIPI_CSI2,
#ifdef CONFIG_S5K6A3_CSI_C
.i2c_bus_num = 0,
.mux_id = 0, /* A-Port : 0, B-Port : 1 */
.flite_id = FLITE_IDX_A,
.cam_power = smdk4x12_cam0_reset,
#endif
#ifdef CONFIG_S5K6A3_CSI_D
.i2c_bus_num = 1,
.mux_id = 1, /* A-Port : 0, B-Port : 1 */
.flite_id = FLITE_IDX_B,
.cam_power = smdk4x12_cam1_reset,
#endif
.flags = 0,
.csi_data_align = 12,
.use_isp = true,
},
#endif
#endif
#if defined(WRITEBACK_ENABLED)
{
.board_info = &writeback_info,
.bus_type = FIMC_LCD_WB,
.i2c_bus_num = 0,
.mux_id = 0, /* A-Port : 0, B-Port : 1 */
.flags = FIMC_CLK_INV_VSYNC,
},
#endif
};
static void __init smdk4x12_subdev_config(void)
{
s3c_fimc0_default_data.isp_info[0] = &isp_info[0];
s3c_fimc0_default_data.isp_info[0]->use_cam = true;
s3c_fimc0_default_data.isp_info[1] = &isp_info[1];
s3c_fimc0_default_data.isp_info[1]->use_cam = true;
/* support using two fimc as one sensore */
{
static struct s5p_fimc_isp_info camcording1;
static struct s5p_fimc_isp_info camcording2;
memcpy(&camcording1, &isp_info[0], sizeof(struct s5p_fimc_isp_info));
memcpy(&camcording2, &isp_info[1], sizeof(struct s5p_fimc_isp_info));
s3c_fimc1_default_data.isp_info[0] = &camcording1;
s3c_fimc1_default_data.isp_info[0]->use_cam = false;
s3c_fimc1_default_data.isp_info[1] = &camcording2;
s3c_fimc1_default_data.isp_info[1]->use_cam = false;
}
#ifdef CONFIG_VIDEO_EXYNOS_FIMC_IS
#ifdef CONFIG_VIDEO_S5K3H2
#ifdef CONFIG_S5K3H2_CSI_C
s5p_mipi_csis0_default_data.clk_rate = 160000000;
s5p_mipi_csis0_default_data.lanes = 2;
s5p_mipi_csis0_default_data.alignment = 24;
s5p_mipi_csis0_default_data.hs_settle = 12;
#endif
#ifdef CONFIG_S5K3H2_CSI_D
s5p_mipi_csis1_default_data.clk_rate = 160000000;
s5p_mipi_csis1_default_data.lanes = 2;
s5p_mipi_csis1_default_data.alignment = 24;
s5p_mipi_csis1_default_data.hs_settle = 12;
#endif
#endif
#ifdef CONFIG_VIDEO_S5K3H7
#ifdef CONFIG_S5K3H7_CSI_C
s5p_mipi_csis0_default_data.clk_rate = 160000000;
s5p_mipi_csis0_default_data.lanes = 2;
s5p_mipi_csis0_default_data.alignment = 24;
s5p_mipi_csis0_default_data.hs_settle = 12;
#endif
#ifdef CONFIG_S5K3H7_CSI_D
s5p_mipi_csis1_default_data.clk_rate = 160000000;
s5p_mipi_csis1_default_data.lanes = 2;
s5p_mipi_csis1_default_data.alignment = 24;
s5p_mipi_csis1_default_data.hs_settle = 12;
#endif
#endif
#ifdef CONFIG_VIDEO_S5K4E5
#ifdef CONFIG_S5K4E5_CSI_C
s5p_mipi_csis0_default_data.clk_rate = 160000000;
s5p_mipi_csis0_default_data.lanes = 2;
s5p_mipi_csis0_default_data.alignment = 24;
s5p_mipi_csis0_default_data.hs_settle = 12;
#endif
#ifdef CONFIG_S5K4E5_CSI_D
s5p_mipi_csis1_default_data.clk_rate = 160000000;
s5p_mipi_csis1_default_data.lanes = 2;
s5p_mipi_csis1_default_data.alignment = 24;
s5p_mipi_csis1_default_data.hs_settle = 12;
#endif
#endif
#ifdef CONFIG_VIDEO_S5K6A3
#ifdef CONFIG_S5K6A3_CSI_C
s5p_mipi_csis0_default_data.clk_rate = 160000000;
s5p_mipi_csis0_default_data.lanes = 1;
s5p_mipi_csis0_default_data.alignment = 24;
s5p_mipi_csis0_default_data.hs_settle = 12;
#endif
#ifdef CONFIG_S5K6A3_CSI_D
s5p_mipi_csis1_default_data.clk_rate = 160000000;
s5p_mipi_csis1_default_data.lanes = 1;
s5p_mipi_csis1_default_data.alignment = 24;
s5p_mipi_csis1_default_data.hs_settle = 12;
#endif
#endif
#endif
}
static void __init smdk4x12_camera_config(void)
{
/* CAM A port(b0010) : PCLK, VSYNC, HREF, DATA[0-4] */
s3c_gpio_cfgrange_nopull(EXYNOS4212_GPJ0(0), 8, S3C_GPIO_SFN(2));
/* CAM A port(b0010) : DATA[5-7], CLKOUT(MIPI CAM also), FIELD */
s3c_gpio_cfgrange_nopull(EXYNOS4212_GPJ1(0), 5, S3C_GPIO_SFN(2));
/* CAM B port(b0011) : PCLK, DATA[0-6] */
s3c_gpio_cfgrange_nopull(EXYNOS4212_GPM0(0), 8, S3C_GPIO_SFN(3));
/* CAM B port(b0011) : FIELD, DATA[7]*/
s3c_gpio_cfgrange_nopull(EXYNOS4212_GPM1(0), 2, S3C_GPIO_SFN(3));
/* CAM B port(b0011) : VSYNC, HREF, CLKOUT*/
s3c_gpio_cfgrange_nopull(EXYNOS4212_GPM2(0), 3, S3C_GPIO_SFN(3));
/* note : driver strength to max is unnecessary */
#ifdef CONFIG_VIDEO_M5MOLS
s3c_gpio_cfgpin(EXYNOS4_GPX2(6), S3C_GPIO_SFN(0xF));
s3c_gpio_setpull(EXYNOS4_GPX2(6), S3C_GPIO_PULL_NONE);
#endif
}
#endif /* CONFIG_VIDEO_SAMSUNG_S5P_FIMC */
#ifdef CONFIG_VIDEO_EXYNOS_FIMC_LITE
static void __set_flite_camera_config(struct exynos_platform_flite *data,
u32 active_index, u32 max_cam)
{
data->active_cam_index = active_index;
data->num_clients = max_cam;
}
static void __init smdk4x12_set_camera_flite_platdata(void)
{
int flite0_cam_index = 0;
int flite1_cam_index = 0;
#ifdef CONFIG_VIDEO_S5K3H2
#ifdef CONFIG_S5K3H2_CSI_C
exynos_flite0_default_data.cam[flite0_cam_index++] = &s5k3h2;
#endif
#ifdef CONFIG_S5K3H2_CSI_D
exynos_flite1_default_data.cam[flite1_cam_index++] = &s5k3h2;
#endif
#endif
#ifdef CONFIG_VIDEO_S5K3H7
#ifdef CONFIG_S5K3H7_CSI_C
exynos_flite0_default_data.cam[flite0_cam_index++] = &s5k3h7;
#endif
#ifdef CONFIG_S5K3H7_CSI_D
exynos_flite1_default_data.cam[flite1_cam_index++] = &s5k3h7;
#endif
#endif
#ifdef CONFIG_VIDEO_S5K4E5
#ifdef CONFIG_S5K4E5_CSI_C
exynos_flite0_default_data.cam[flite0_cam_index++] = &s5k4e5;
#endif
#ifdef CONFIG_S5K4E5_CSI_D
exynos_flite1_default_data.cam[flite1_cam_index++] = &s5k4e5;
#endif
#endif
#ifdef CONFIG_VIDEO_S5K6A3
#ifdef CONFIG_S5K6A3_CSI_C
exynos_flite0_default_data.cam[flite0_cam_index++] = &s5k6a3;
#endif
#ifdef CONFIG_S5K6A3_CSI_D
exynos_flite1_default_data.cam[flite1_cam_index++] = &s5k6a3;
#endif
#endif
__set_flite_camera_config(&exynos_flite0_default_data, 0, flite0_cam_index);
__set_flite_camera_config(&exynos_flite1_default_data, 0, flite1_cam_index);
}
#endif
#if defined(CONFIG_CMA)
static void __init exynos4_reserve_mem(void)
{
static struct cma_region regions[] = {
#ifdef CONFIG_EXYNOS_C2C
{
.name = "c2c_shdmem",
.size = C2C_SHAREDMEM_SIZE,
{
.alignment = C2C_SHAREDMEM_SIZE,
},
.start = 0
},
#endif
#ifndef CONFIG_VIDEOBUF2_ION
#ifdef CONFIG_VIDEO_SAMSUNG_MEMSIZE_TV
{
.name = "tv",
.size = CONFIG_VIDEO_SAMSUNG_MEMSIZE_TV * SZ_1K,
.start = 0
},
#endif
#ifdef CONFIG_VIDEO_SAMSUNG_MEMSIZE_JPEG
{
.name = "jpeg",
.size = CONFIG_VIDEO_SAMSUNG_MEMSIZE_JPEG * SZ_1K,
.start = 0
},
#endif
#ifdef CONFIG_AUDIO_SAMSUNG_MEMSIZE_SRP
{
.name = "srp",
.size = CONFIG_AUDIO_SAMSUNG_MEMSIZE_SRP * SZ_1K,
.start = 0,
},
#endif
#ifdef CONFIG_VIDEO_SAMSUNG_MEMSIZE_FIMG2D
{
.name = "fimg2d",
.size = CONFIG_VIDEO_SAMSUNG_MEMSIZE_FIMG2D * SZ_1K,
.start = 0
},
#endif
#ifdef CONFIG_VIDEO_SAMSUNG_MEMSIZE_FIMD
{
.name = "fimd",
.size = CONFIG_VIDEO_SAMSUNG_MEMSIZE_FIMD * SZ_1K,
.start = 0
},
#endif
#ifdef CONFIG_VIDEO_SAMSUNG_MEMSIZE_FIMC0
{
.name = "fimc0",
.size = CONFIG_VIDEO_SAMSUNG_MEMSIZE_FIMC0 * SZ_1K,
.start = 0
},
#endif
#ifdef CONFIG_VIDEO_SAMSUNG_MEMSIZE_FIMC2
{
.name = "fimc2",
.size = CONFIG_VIDEO_SAMSUNG_MEMSIZE_FIMC2 * SZ_1K,
.start = 0
},
#endif
#if !defined(CONFIG_EXYNOS_CONTENT_PATH_PROTECTION) && \
defined(CONFIG_VIDEO_SAMSUNG_MEMSIZE_FIMC3)
{
.name = "fimc3",
.size = CONFIG_VIDEO_SAMSUNG_MEMSIZE_FIMC3 * SZ_1K,
},
#endif
#ifdef CONFIG_VIDEO_SAMSUNG_MEMSIZE_FIMC1
{
.name = "fimc1",
.size = CONFIG_VIDEO_SAMSUNG_MEMSIZE_FIMC1 * SZ_1K,
.start = 0
},
#endif
#ifdef CONFIG_VIDEO_SAMSUNG_MEMSIZE_MFC_NORMAL
{
.name = "mfc-normal",
.size = CONFIG_VIDEO_SAMSUNG_MEMSIZE_MFC_NORMAL * SZ_1K,
{ .alignment = 1 << 17 },
},
#endif
#ifdef CONFIG_VIDEO_SAMSUNG_MEMSIZE_MFC1
{
.name = "mfc1",
.size = CONFIG_VIDEO_SAMSUNG_MEMSIZE_MFC1 * SZ_1K,
{
.alignment = 1 << 17,
},
.start = 0,
},
#endif
#ifdef CONFIG_VIDEO_SAMSUNG_MEMSIZE_MFC0
{
.name = "mfc0",
.size = CONFIG_VIDEO_SAMSUNG_MEMSIZE_MFC0 * SZ_1K,
{
.alignment = 1 << 17,
}
},
#endif
#ifdef CONFIG_VIDEO_SAMSUNG_MEMSIZE_MFC
{
.name = "mfc",
.size = CONFIG_VIDEO_SAMSUNG_MEMSIZE_MFC * SZ_1K,
{
.alignment = 1 << 17,
},
.start = 0
},
#endif
#ifdef CONFIG_VIDEO_EXYNOS_FIMC_IS
{
.name = "fimc_is",
.size = CONFIG_VIDEO_EXYNOS_MEMSIZE_FIMC_IS * SZ_1K,
{
.alignment = 1 << 26,
},
.start = 0
},
#ifdef CONFIG_VIDEO_EXYNOS_FIMC_IS_BAYER
{
.name = "fimc_is_isp",
.size = CONFIG_VIDEO_EXYNOS_MEMSIZE_FIMC_IS_ISP * SZ_1K,
.start = 0
},
#endif
#endif
#if !defined(CONFIG_EXYNOS_CONTENT_PATH_PROTECTION) && \
defined(CONFIG_VIDEO_SAMSUNG_S5P_MFC)
{
.name = "b2",
.size = 32 << 20,
{ .alignment = 128 << 10 },
},
{
.name = "b1",
.size = 32 << 20,
{ .alignment = 128 << 10 },
},
{
.name = "fw",
.size = 1 << 20,
{ .alignment = 128 << 10 },
},
#endif
#else /* !CONFIG_VIDEOBUF2_ION */
#ifdef CONFIG_FB_S5P
#error CONFIG_FB_S5P is defined. Select CONFIG_FB_S3C, instead
#endif
{
.name = "ion",
.size = CONFIG_ION_EXYNOS_CONTIGHEAP_SIZE * SZ_1K,
},
#endif /* !CONFIG_VIDEOBUF2_ION */
{
.size = 0
},
};
#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
static struct cma_region regions_secure[] = {
#ifdef CONFIG_VIDEO_SAMSUNG_MEMSIZE_FIMC3
{
.name = "fimc3",
.size = CONFIG_VIDEO_SAMSUNG_MEMSIZE_FIMC3 * SZ_1K,
},
#endif
#ifdef CONFIG_VIDEO_SAMSUNG_MEMSIZE_FIMD_VIDEO
{
.name = "video",
.size = CONFIG_VIDEO_SAMSUNG_MEMSIZE_FIMD_VIDEO * SZ_1K,
},
#endif
#ifdef CONFIG_VIDEO_SAMSUNG_MEMSIZE_MFC_SECURE
{
.name = "mfc-secure",
.size = CONFIG_VIDEO_SAMSUNG_MEMSIZE_MFC_SECURE * SZ_1K,
},
#endif
{
.name = "sectbl",
.size = SZ_1M,
},
{
.size = 0
},
};
#else /* !CONFIG_EXYNOS_CONTENT_PATH_PROTECTION */
struct cma_region *regions_secure = NULL;
#endif
static const char map[] __initconst =
#ifdef CONFIG_EXYNOS_C2C
"samsung-c2c=c2c_shdmem;"
#endif
"s3cfb.0/fimd=fimd;exynos4-fb.0/fimd=fimd;samsung-pd.1=fimd;"
#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
"s3cfb.0/video=video;exynos4-fb.0/video=video;"
#endif
"s3c-fimc.0=fimc0;s3c-fimc.1=fimc1;s3c-fimc.2=fimc2;s3c-fimc.3=fimc3;"
"exynos4210-fimc.0=fimc0;exynos4210-fimc.1=fimc1;exynos4210-fimc.2=fimc2;exynos4210-fimc.3=fimc3;"
#ifdef CONFIG_VIDEO_MFC5X
"s3c-mfc=mfc,mfc0,mfc1;"
#endif
#ifdef CONFIG_VIDEO_SAMSUNG_S5P_MFC
"s5p-mfc/f=fw;"
"s5p-mfc/a=b1;"
"s5p-mfc/b=b2;"
#endif
"samsung-rp=srp;"
"s5p-jpeg=jpeg;"
"exynos4-fimc-is/f=fimc_is;"
#ifdef CONFIG_VIDEO_EXYNOS_FIMC_IS_BAYER
"exynos4-fimc-is/i=fimc_is_isp;"
#endif
"s5p-mixer=tv;"
"s5p-fimg2d=fimg2d;"
"ion-exynos=ion,fimd,fimc0,fimc1,fimc2,fimc3,mfc,mfc0,mfc1,fw,b1,b2;"
#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
"s5p-smem/video=video;"
"s5p-smem/sectbl=sectbl;"
#endif
"s5p-smem/mfc=mfc0;"
"s5p-smem/fimc=fimc3;"
"s5p-smem/mfc-shm=mfc1,mfc-normal;"
"s5p-smem/fimd=fimd;";
s5p_cma_region_reserve(regions, regions_secure, 0, map);
}
#else
static inline void exynos4_reserve_mem(void)
{
}
#endif /* CONFIG_CMA */
/* LCD Backlight data */
static struct samsung_bl_gpio_info smdk4x12_bl_gpio_info = {
.no = EXYNOS4_GPD0(1),
.func = S3C_GPIO_SFN(2),
};
static struct platform_pwm_backlight_data smdk4x12_bl_data = {
.pwm_id = 1,
#ifdef CONFIG_FB_S5P_LMS501KF03
.pwm_period_ns = 1000,
#endif
};
static void __init smdk4x12_map_io(void)
{
clk_xusbxti.rate = 24000000;
s5p_init_io(NULL, 0, S5P_VA_CHIPID);
s3c24xx_init_clocks(24000000);
s3c24xx_init_uarts(smdk4x12_uartcfgs, ARRAY_SIZE(smdk4x12_uartcfgs));
exynos4_reserve_mem();
}
static void __init smdk4x12_smsc911x_init(void)
{
u32 cs1;
/* configure nCS1 width to 16 bits */
cs1 = __raw_readl(S5P_SROM_BW) &
~(S5P_SROM_BW__CS_MASK << S5P_SROM_BW__NCS1__SHIFT);
cs1 |= ((1 << S5P_SROM_BW__DATAWIDTH__SHIFT) |
(1 << S5P_SROM_BW__WAITENABLE__SHIFT) |
(1 << S5P_SROM_BW__BYTEENABLE__SHIFT)) <<
S5P_SROM_BW__NCS1__SHIFT;
__raw_writel(cs1, S5P_SROM_BW);
/* set timing for nCS1 suitable for ethernet chip */
__raw_writel((0x1 << S5P_SROM_BCX__PMC__SHIFT) |
(0x9 << S5P_SROM_BCX__TACP__SHIFT) |
(0xc << S5P_SROM_BCX__TCAH__SHIFT) |
(0x1 << S5P_SROM_BCX__TCOH__SHIFT) |
(0x6 << S5P_SROM_BCX__TACC__SHIFT) |
(0x1 << S5P_SROM_BCX__TCOS__SHIFT) |
(0x1 << S5P_SROM_BCX__TACS__SHIFT), S5P_SROM_BC1);
}
static void __init exynos_sysmmu_init(void)
{
ASSIGN_SYSMMU_POWERDOMAIN(fimc0, &exynos4_device_pd[PD_CAM].dev);
ASSIGN_SYSMMU_POWERDOMAIN(fimc1, &exynos4_device_pd[PD_CAM].dev);
ASSIGN_SYSMMU_POWERDOMAIN(fimc2, &exynos4_device_pd[PD_CAM].dev);
ASSIGN_SYSMMU_POWERDOMAIN(fimc3, &exynos4_device_pd[PD_CAM].dev);
ASSIGN_SYSMMU_POWERDOMAIN(jpeg, &exynos4_device_pd[PD_CAM].dev);
ASSIGN_SYSMMU_POWERDOMAIN(mfc_l, &exynos4_device_pd[PD_MFC].dev);
ASSIGN_SYSMMU_POWERDOMAIN(mfc_r, &exynos4_device_pd[PD_MFC].dev);
ASSIGN_SYSMMU_POWERDOMAIN(tv, &exynos4_device_pd[PD_TV].dev);
#ifdef CONFIG_VIDEO_FIMG2D
sysmmu_set_owner(&SYSMMU_PLATDEV(g2d_acp).dev, &s5p_device_fimg2d.dev);
#endif
#if defined(CONFIG_VIDEO_SAMSUNG_S5P_MFC) || defined(CONFIG_VIDEO_MFC5X)
sysmmu_set_owner(&SYSMMU_PLATDEV(mfc_l).dev, &s5p_device_mfc.dev);
sysmmu_set_owner(&SYSMMU_PLATDEV(mfc_r).dev, &s5p_device_mfc.dev);
#endif
#if defined(CONFIG_VIDEO_FIMC)
sysmmu_set_owner(&SYSMMU_PLATDEV(fimc0).dev, &s3c_device_fimc0.dev);
sysmmu_set_owner(&SYSMMU_PLATDEV(fimc1).dev, &s3c_device_fimc1.dev);
sysmmu_set_owner(&SYSMMU_PLATDEV(fimc2).dev, &s3c_device_fimc2.dev);
sysmmu_set_owner(&SYSMMU_PLATDEV(fimc3).dev, &s3c_device_fimc3.dev);
#elif defined(CONFIG_VIDEO_SAMSUNG_S5P_FIMC)
sysmmu_set_owner(&SYSMMU_PLATDEV(fimc0).dev, &s5p_device_fimc0.dev);
sysmmu_set_owner(&SYSMMU_PLATDEV(fimc1).dev, &s5p_device_fimc1.dev);
sysmmu_set_owner(&SYSMMU_PLATDEV(fimc2).dev, &s5p_device_fimc2.dev);
sysmmu_set_owner(&SYSMMU_PLATDEV(fimc3).dev, &s5p_device_fimc3.dev);
#endif
#ifdef CONFIG_VIDEO_EXYNOS_TV
sysmmu_set_owner(&SYSMMU_PLATDEV(tv).dev, &s5p_device_mixer.dev);
#endif
#ifdef CONFIG_VIDEO_TVOUT
sysmmu_set_owner(&SYSMMU_PLATDEV(tv).dev, &s5p_device_tvout.dev);
#endif
#ifdef CONFIG_VIDEO_JPEG_V2X
sysmmu_set_owner(&SYSMMU_PLATDEV(jpeg).dev, &s5p_device_jpeg.dev);
#endif
#ifdef CONFIG_VIDEO_EXYNOS_FIMC_IS
ASSIGN_SYSMMU_POWERDOMAIN(is_isp, &exynos4_device_pd[PD_ISP].dev);
ASSIGN_SYSMMU_POWERDOMAIN(is_drc, &exynos4_device_pd[PD_ISP].dev);
ASSIGN_SYSMMU_POWERDOMAIN(is_fd, &exynos4_device_pd[PD_ISP].dev);
ASSIGN_SYSMMU_POWERDOMAIN(is_cpu, &exynos4_device_pd[PD_ISP].dev);
sysmmu_set_owner(&SYSMMU_PLATDEV(is_isp).dev,
&exynos4_device_fimc_is.dev);
sysmmu_set_owner(&SYSMMU_PLATDEV(is_drc).dev,
&exynos4_device_fimc_is.dev);
sysmmu_set_owner(&SYSMMU_PLATDEV(is_fd).dev,
&exynos4_device_fimc_is.dev);
sysmmu_set_owner(&SYSMMU_PLATDEV(is_cpu).dev,
&exynos4_device_fimc_is.dev);
#endif
}
#ifdef CONFIG_FB_S5P_EXTDSP
struct platform_device s3c_device_extdsp = {
.name = "s3cfb_extdsp",
.id = 0,
};
static struct s3cfb_extdsp_lcd dummy_buffer = {
.width = 1280,
.height = 720,
.bpp = 16,
};
static struct s3c_platform_fb default_extdsp_data __initdata = {
.hw_ver = 0x70,
.nr_wins = 1,
.default_win = 0,
.swap = FB_SWAP_WORD | FB_SWAP_HWORD,
.lcd = &dummy_buffer
};
void __init s3cfb_extdsp_set_platdata(struct s3c_platform_fb *pd)
{
struct s3c_platform_fb *npd;
int i;
if (!pd)
pd = &default_extdsp_data;
npd = kmemdup(pd, sizeof(struct s3c_platform_fb), GFP_KERNEL);
if (!npd)
printk(KERN_ERR "%s: no memory for platform data\n", __func__);
else {
for (i = 0; i < npd->nr_wins; i++)
npd->nr_buffers[i] = 1;
s3c_device_extdsp.dev.platform_data = npd;
}
}
#endif
#define SMDK4412_REV_0_0_ADC_VALUE 0
#define SMDK4412_REV_0_1_ADC_VALUE 443
int samsung_board_rev;
static int get_samsung_board_rev(void)
{
int adc_val = 0;
struct clk *adc_clk;
struct resource *res;
void __iomem *adc_regs;
unsigned int con;
int ret;
if ((soc_is_exynos4412() && samsung_rev() < EXYNOS4412_REV_1_0) ||
(soc_is_exynos4212() && samsung_rev() < EXYNOS4212_REV_1_0))
return SAMSUNG_BOARD_REV_0_0;
adc_clk = clk_get(NULL, "adc");
if (unlikely(IS_ERR(adc_clk)))
return SAMSUNG_BOARD_REV_0_0;
clk_enable(adc_clk);
res = platform_get_resource(&s3c_device_adc, IORESOURCE_MEM, 0);
if (unlikely(!res))
goto err_clk;
adc_regs = ioremap(res->start, resource_size(res));
if (unlikely(!adc_regs))
goto err_clk;
writel(S5PV210_ADCCON_SELMUX(3), adc_regs + S5P_ADCMUX);
con = readl(adc_regs + S3C2410_ADCCON);
con &= ~S3C2410_ADCCON_MUXMASK;
con &= ~S3C2410_ADCCON_STDBM;
con &= ~S3C2410_ADCCON_STARTMASK;
con |= S3C2410_ADCCON_PRSCEN;
con |= S3C2410_ADCCON_ENABLE_START;
writel(con, adc_regs + S3C2410_ADCCON);
udelay (50);
adc_val = readl(adc_regs + S3C2410_ADCDAT0) & 0xFFF;
writel(0, adc_regs + S3C64XX_ADCCLRINT);
iounmap(adc_regs);
err_clk:
clk_disable(adc_clk);
clk_put(adc_clk);
ret = (adc_val < SMDK4412_REV_0_1_ADC_VALUE/2) ?
SAMSUNG_BOARD_REV_0_0 : SAMSUNG_BOARD_REV_0_1;
pr_info ("SMDK MAIN Board Rev 0.%d (ADC value:%d)\n", ret, adc_val);
return ret;
}
static void __init smdk4x12_machine_init(void)
{
#ifdef CONFIG_S3C64XX_DEV_SPI
struct clk *sclk = NULL;
struct clk *prnt = NULL;
struct device *spi0_dev = &exynos_device_spi0.dev;
#ifndef CONFIG_FB_S5P_LMS501KF03
struct device *spi1_dev = &exynos_device_spi1.dev;
#endif
struct device *spi2_dev = &exynos_device_spi2.dev;
#endif
samsung_board_rev = get_samsung_board_rev();
#if defined(CONFIG_EXYNOS_DEV_PD) && defined(CONFIG_PM_RUNTIME)
exynos_pd_disable(&exynos4_device_pd[PD_MFC].dev);
exynos_pd_disable(&exynos4_device_pd[PD_G3D].dev);
exynos_pd_disable(&exynos4_device_pd[PD_LCD0].dev);
exynos_pd_disable(&exynos4_device_pd[PD_CAM].dev);
exynos_pd_disable(&exynos4_device_pd[PD_TV].dev);
exynos_pd_disable(&exynos4_device_pd[PD_GPS].dev);
exynos_pd_disable(&exynos4_device_pd[PD_GPS_ALIVE].dev);
exynos_pd_disable(&exynos4_device_pd[PD_ISP].dev);
#elif defined(CONFIG_EXYNOS_DEV_PD)
/*
* These power domains should be always on
* without runtime pm support.
*/
exynos_pd_enable(&exynos4_device_pd[PD_MFC].dev);
exynos_pd_enable(&exynos4_device_pd[PD_G3D].dev);
exynos_pd_enable(&exynos4_device_pd[PD_LCD0].dev);
exynos_pd_enable(&exynos4_device_pd[PD_CAM].dev);
exynos_pd_enable(&exynos4_device_pd[PD_TV].dev);
exynos_pd_enable(&exynos4_device_pd[PD_GPS].dev);
exynos_pd_enable(&exynos4_device_pd[PD_GPS_ALIVE].dev);
exynos_pd_enable(&exynos4_device_pd[PD_ISP].dev);
#endif
s3c_i2c0_set_platdata(NULL);
i2c_register_board_info(0, i2c_devs0, ARRAY_SIZE(i2c_devs0));
s3c_i2c1_set_platdata(NULL);
i2c_register_board_info(1, i2c_devs1, ARRAY_SIZE(i2c_devs1));
s3c_i2c2_set_platdata(NULL);
i2c_register_board_info(2, i2c_devs2, ARRAY_SIZE(i2c_devs2));
s3c_i2c3_set_platdata(NULL);
i2c_register_board_info(3, i2c_devs3, ARRAY_SIZE(i2c_devs3));
s3c_i2c4_set_platdata(NULL);
s3c_i2c5_set_platdata(NULL);
s3c_i2c7_set_platdata(NULL);
i2c_devs7[0].irq = samsung_board_rev_is_0_0() ? IRQ_EINT(15) : IRQ_EINT(22);
i2c_register_board_info(7, i2c_devs7, ARRAY_SIZE(i2c_devs7));
#if defined(CONFIG_FB_S5P_MIPI_DSIM)
mipi_fb_init();
#endif
#ifdef CONFIG_FB_S3C
dev_set_name(&s5p_device_fimd0.dev, "s3cfb.0");
clk_add_alias("lcd", "exynos4-fb.0", "lcd", &s5p_device_fimd0.dev);
clk_add_alias("sclk_fimd", "exynos4-fb.0", "sclk_fimd", &s5p_device_fimd0.dev);
s5p_fb_setname(0, "exynos4-fb");
#if defined(CONFIG_LCD_AMS369FG06) || defined(CONFIG_LCD_LMS501KF03)
spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info));
#endif
s5p_fimd0_set_platdata(&smdk4x12_lcd0_pdata);
#ifdef CONFIG_FB_MIPI_DSIM
s5p_device_mipi_dsim.dev.parent = &exynos4_device_pd[PD_LCD0].dev;
#endif
#ifdef CONFIG_EXYNOS_DEV_PD
s5p_device_fimd0.dev.parent = &exynos4_device_pd[PD_LCD0].dev;
#endif
#endif
#ifdef CONFIG_FB_S5P
#ifdef CONFIG_FB_S5P_LMS501KF03
spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info));
s3cfb_set_platdata(&lms501kf03_data);
#else
s3cfb_set_platdata(NULL);
#endif
#ifdef CONFIG_FB_S5P_MIPI_DSIM
s5p_device_dsim.dev.parent = &exynos4_device_pd[PD_LCD0].dev;
#endif
#ifdef CONFIG_EXYNOS_DEV_PD
s3c_device_fb.dev.parent = &exynos4_device_pd[PD_LCD0].dev;
#endif
#endif
#ifdef CONFIG_USB_EHCI_S5P
smdk4x12_ehci_init();
#endif
#ifdef CONFIG_USB_OHCI_S5P
smdk4x12_ohci_init();
#endif
#ifdef CONFIG_USB_GADGET
smdk4x12_usbgadget_init();
#endif
#ifdef CONFIG_USB_EXYNOS_SWITCH
smdk4x12_usbswitch_init();
#endif
samsung_bl_set(&smdk4x12_bl_gpio_info, &smdk4x12_bl_data);
#ifdef CONFIG_EXYNOS4_DEV_DWMCI
exynos_dwmci_set_platdata(&exynos_dwmci_pdata, 0);
#endif
#ifdef CONFIG_VIDEO_EXYNOS_FIMC_IS
exynos4_fimc_is_set_platdata(NULL);
#ifdef CONFIG_EXYNOS_DEV_PD
exynos4_device_fimc_is.dev.parent = &exynos4_device_pd[PD_ISP].dev;
#endif
#endif
#ifdef CONFIG_S3C_DEV_HSMMC
s3c_sdhci0_set_platdata(&smdk4x12_hsmmc0_pdata);
#endif
#ifdef CONFIG_S3C_DEV_HSMMC1
s3c_sdhci1_set_platdata(&smdk4x12_hsmmc1_pdata);
#endif
#ifdef CONFIG_S3C_DEV_HSMMC2
s3c_sdhci2_set_platdata(&smdk4x12_hsmmc2_pdata);
#endif
#ifdef CONFIG_S3C_DEV_HSMMC3
s3c_sdhci3_set_platdata(&smdk4x12_hsmmc3_pdata);
#endif
#ifdef CONFIG_S5P_DEV_MSHC
s3c_mshci_set_platdata(&exynos4_mshc_pdata);
#endif
#if defined(CONFIG_VIDEO_EXYNOS_TV) && defined(CONFIG_VIDEO_EXYNOS_HDMI)
dev_set_name(&s5p_device_hdmi.dev, "exynos4-hdmi");
clk_add_alias("hdmi", "s5p-hdmi", "hdmi", &s5p_device_hdmi.dev);
clk_add_alias("hdmiphy", "s5p-hdmi", "hdmiphy", &s5p_device_hdmi.dev);
s5p_tv_setup();
/* setup dependencies between TV devices */
s5p_device_hdmi.dev.parent = &exynos4_device_pd[PD_TV].dev;
s5p_device_mixer.dev.parent = &exynos4_device_pd[PD_TV].dev;
s5p_i2c_hdmiphy_set_platdata(NULL);
#ifdef CONFIG_VIDEO_EXYNOS_HDMI_CEC
s5p_hdmi_cec_set_platdata(&hdmi_cec_data);
#endif
#endif
#ifdef CONFIG_VIDEO_EXYNOS_FIMC_LITE
smdk4x12_set_camera_flite_platdata();
s3c_set_platdata(&exynos_flite0_default_data,
sizeof(exynos_flite0_default_data), &exynos_device_flite0);
s3c_set_platdata(&exynos_flite1_default_data,
sizeof(exynos_flite1_default_data), &exynos_device_flite1);
#ifdef CONFIG_EXYNOS_DEV_PD
exynos_device_flite0.dev.parent = &exynos4_device_pd[PD_ISP].dev;
exynos_device_flite1.dev.parent = &exynos4_device_pd[PD_ISP].dev;
#endif
#endif
#ifdef CONFIG_EXYNOS_SETUP_THERMAL
s5p_tmu_set_platdata(&exynos_tmu_data);
#endif
#ifdef CONFIG_VIDEO_FIMC
s3c_fimc0_set_platdata(&fimc_plat);
s3c_fimc1_set_platdata(&fimc_plat);
s3c_fimc2_set_platdata(&fimc_plat);
s3c_fimc3_set_platdata(NULL);
#ifdef CONFIG_EXYNOS_DEV_PD
s3c_device_fimc0.dev.parent = &exynos4_device_pd[PD_CAM].dev;
s3c_device_fimc1.dev.parent = &exynos4_device_pd[PD_CAM].dev;
s3c_device_fimc2.dev.parent = &exynos4_device_pd[PD_CAM].dev;
s3c_device_fimc3.dev.parent = &exynos4_device_pd[PD_CAM].dev;
#ifdef CONFIG_EXYNOS_CONTENT_PATH_PROTECTION
secmem.parent = &exynos4_device_pd[PD_CAM].dev;
#endif
#endif
#ifdef CONFIG_VIDEO_FIMC_MIPI
s3c_csis0_set_platdata(NULL);
s3c_csis1_set_platdata(NULL);
#ifdef CONFIG_EXYNOS_DEV_PD
s3c_device_csis0.dev.parent = &exynos4_device_pd[PD_CAM].dev;
s3c_device_csis1.dev.parent = &exynos4_device_pd[PD_CAM].dev;
#endif
#endif
#if defined(CONFIG_ITU_A) || defined(CONFIG_CSI_C) \
|| defined(CONFIG_S5K3H1_CSI_C) || defined(CONFIG_S5K3H2_CSI_C) \
|| defined(CONFIG_S5K6A3_CSI_C)
smdk4x12_cam0_reset(1);
#endif
#if defined(CONFIG_ITU_B) || defined(CONFIG_CSI_D) \
|| defined(CONFIG_S5K3H1_CSI_D) || defined(CONFIG_S5K3H2_CSI_D) \
|| defined(CONFIG_S5K6A3_CSI_D)
smdk4x12_cam1_reset(1);
#endif
#endif /* CONFIG_VIDEO_FIMC */
#ifdef CONFIG_FB_S5P_EXTDSP
s3cfb_extdsp_set_platdata(&default_extdsp_data);
#endif
#ifdef CONFIG_VIDEO_SAMSUNG_S5P_FIMC
smdk4x12_camera_config();
smdk4x12_subdev_config();
dev_set_name(&s5p_device_fimc0.dev, "s3c-fimc.0");
dev_set_name(&s5p_device_fimc1.dev, "s3c-fimc.1");
dev_set_name(&s5p_device_fimc2.dev, "s3c-fimc.2");
dev_set_name(&s5p_device_fimc3.dev, "s3c-fimc.3");
clk_add_alias("fimc", "exynos4210-fimc.0", "fimc", &s5p_device_fimc0.dev);
clk_add_alias("sclk_fimc", "exynos4210-fimc.0", "sclk_fimc",
&s5p_device_fimc0.dev);
clk_add_alias("fimc", "exynos4210-fimc.1", "fimc", &s5p_device_fimc1.dev);
clk_add_alias("sclk_fimc", "exynos4210-fimc.1", "sclk_fimc",
&s5p_device_fimc1.dev);
clk_add_alias("fimc", "exynos4210-fimc.2", "fimc", &s5p_device_fimc2.dev);
clk_add_alias("sclk_fimc", "exynos4210-fimc.2", "sclk_fimc",
&s5p_device_fimc2.dev);
clk_add_alias("fimc", "exynos4210-fimc.3", "fimc", &s5p_device_fimc3.dev);
clk_add_alias("sclk_fimc", "exynos4210-fimc.3", "sclk_fimc",
&s5p_device_fimc3.dev);
s3c_fimc_setname(0, "exynos4210-fimc");
s3c_fimc_setname(1, "exynos4210-fimc");
s3c_fimc_setname(2, "exynos4210-fimc");
s3c_fimc_setname(3, "exynos4210-fimc");
/* FIMC */
s3c_set_platdata(&s3c_fimc0_default_data,
sizeof(s3c_fimc0_default_data), &s5p_device_fimc0);
s3c_set_platdata(&s3c_fimc1_default_data,
sizeof(s3c_fimc1_default_data), &s5p_device_fimc1);
s3c_set_platdata(&s3c_fimc2_default_data,
sizeof(s3c_fimc2_default_data), &s5p_device_fimc2);
s3c_set_platdata(&s3c_fimc3_default_data,
sizeof(s3c_fimc3_default_data), &s5p_device_fimc3);
#ifdef CONFIG_EXYNOS_DEV_PD
s5p_device_fimc0.dev.parent = &exynos4_device_pd[PD_CAM].dev;
s5p_device_fimc1.dev.parent = &exynos4_device_pd[PD_CAM].dev;
s5p_device_fimc2.dev.parent = &exynos4_device_pd[PD_CAM].dev;
s5p_device_fimc3.dev.parent = &exynos4_device_pd[PD_CAM].dev;
#endif
#ifdef CONFIG_VIDEO_S5P_MIPI_CSIS
dev_set_name(&s5p_device_mipi_csis0.dev, "s3c-csis.0");
dev_set_name(&s5p_device_mipi_csis1.dev, "s3c-csis.1");
clk_add_alias("csis", "s5p-mipi-csis.0", "csis",
&s5p_device_mipi_csis0.dev);
clk_add_alias("sclk_csis", "s5p-mipi-csis.0", "sclk_csis",
&s5p_device_mipi_csis0.dev);
clk_add_alias("csis", "s5p-mipi-csis.1", "csis",
&s5p_device_mipi_csis1.dev);
clk_add_alias("sclk_csis", "s5p-mipi-csis.1", "sclk_csis",
&s5p_device_mipi_csis1.dev);
dev_set_name(&s5p_device_mipi_csis0.dev, "s5p-mipi-csis.0");
dev_set_name(&s5p_device_mipi_csis1.dev, "s5p-mipi-csis.1");
s3c_set_platdata(&s5p_mipi_csis0_default_data,
sizeof(s5p_mipi_csis0_default_data), &s5p_device_mipi_csis0);
s3c_set_platdata(&s5p_mipi_csis1_default_data,
sizeof(s5p_mipi_csis1_default_data), &s5p_device_mipi_csis1);
#ifdef CONFIG_EXYNOS_DEV_PD
s5p_device_mipi_csis0.dev.parent = &exynos4_device_pd[PD_CAM].dev;
s5p_device_mipi_csis1.dev.parent = &exynos4_device_pd[PD_CAM].dev;
#endif
#endif
#if defined(CONFIG_ITU_A) || defined(CONFIG_CSI_C) \
|| defined(CONFIG_S5K3H1_CSI_C) || defined(CONFIG_S5K3H2_CSI_C) \
|| defined(CONFIG_S5K6A3_CSI_C)
smdk4x12_cam0_reset(1);
#endif
#if defined(CONFIG_ITU_B) || defined(CONFIG_CSI_D) \
|| defined(CONFIG_S5K3H1_CSI_D) || defined(CONFIG_S5K3H2_CSI_D) \
|| defined(CONFIG_S5K6A3_CSI_D)
smdk4x12_cam1_reset(1);
#endif
#endif
#if defined(CONFIG_VIDEO_TVOUT)
s5p_hdmi_hpd_set_platdata(&hdmi_hpd_data);
s5p_hdmi_cec_set_platdata(&hdmi_cec_data);
#ifdef CONFIG_EXYNOS_DEV_PD
s5p_device_tvout.dev.parent = &exynos4_device_pd[PD_TV].dev;
exynos4_device_pd[PD_TV].dev.parent = &exynos4_device_pd[PD_LCD0].dev;
#endif
#endif
#ifdef CONFIG_VIDEO_JPEG_V2X
#ifdef CONFIG_EXYNOS_DEV_PD
s5p_device_jpeg.dev.parent = &exynos4_device_pd[PD_CAM].dev;
exynos4_jpeg_setup_clock(&s5p_device_jpeg.dev, 160000000);
#endif
#endif
#ifdef CONFIG_ION_EXYNOS
exynos_ion_set_platdata();
#endif
#if defined(CONFIG_VIDEO_MFC5X) || defined(CONFIG_VIDEO_SAMSUNG_S5P_MFC)
#ifdef CONFIG_EXYNOS_DEV_PD
s5p_device_mfc.dev.parent = &exynos4_device_pd[PD_MFC].dev;
#endif
if (soc_is_exynos4412() && samsung_rev() >= EXYNOS4412_REV_1_0)
exynos4_mfc_setup_clock(&s5p_device_mfc.dev, 200 * MHZ);
else
exynos4_mfc_setup_clock(&s5p_device_mfc.dev, 267 * MHZ);
#endif
#if defined(CONFIG_VIDEO_SAMSUNG_S5P_MFC)
dev_set_name(&s5p_device_mfc.dev, "s3c-mfc");
clk_add_alias("mfc", "s5p-mfc", "mfc", &s5p_device_mfc.dev);
s5p_mfc_setname(&s5p_device_mfc, "s5p-mfc");
#endif
#ifdef CONFIG_VIDEO_FIMG2D
s5p_fimg2d_set_platdata(&fimg2d_data);
#endif
if (samsung_board_rev_is_0_0())
samsung_keypad_set_platdata(&smdk4x12_keypad_data0);
else
samsung_keypad_set_platdata(&smdk4x12_keypad_data1);
smdk4x12_smsc911x_init();
exynos_sysmmu_init();
smdk4x12_gpio_power_init();
platform_add_devices(smdk4x12_devices, ARRAY_SIZE(smdk4x12_devices));
if (soc_is_exynos4412())
platform_add_devices(smdk4412_devices, ARRAY_SIZE(smdk4412_devices));
#ifdef CONFIG_FB_S3C
exynos4_fimd0_setup_clock(&s5p_device_fimd0.dev, "mout_mpll_user",
800 * MHZ);
#endif
#ifdef CONFIG_S3C64XX_DEV_SPI
sclk = clk_get(spi0_dev, "dout_spi0");
if (IS_ERR(sclk))
dev_err(spi0_dev, "failed to get sclk for SPI-0\n");
prnt = clk_get(spi0_dev, "mout_mpll_user");
if (IS_ERR(prnt))
dev_err(spi0_dev, "failed to get prnt\n");
if (clk_set_parent(sclk, prnt))
printk(KERN_ERR "Unable to set parent %s of clock %s.\n",
prnt->name, sclk->name);
clk_set_rate(sclk, 800 * 1000 * 1000);
clk_put(sclk);
clk_put(prnt);
if (!gpio_request(EXYNOS4_GPB(1), "SPI_CS0")) {
gpio_direction_output(EXYNOS4_GPB(1), 1);
s3c_gpio_cfgpin(EXYNOS4_GPB(1), S3C_GPIO_SFN(1));
s3c_gpio_setpull(EXYNOS4_GPB(1), S3C_GPIO_PULL_UP);
exynos_spi_set_info(0, EXYNOS_SPI_SRCCLK_SCLK,
ARRAY_SIZE(spi0_csi));
}
spi_register_board_info(spi0_board_info, ARRAY_SIZE(spi0_board_info));
#ifndef CONFIG_FB_S5P_LMS501KF03
sclk = clk_get(spi1_dev, "dout_spi1");
if (IS_ERR(sclk))
dev_err(spi1_dev, "failed to get sclk for SPI-1\n");
prnt = clk_get(spi1_dev, "mout_mpll_user");
if (IS_ERR(prnt))
dev_err(spi1_dev, "failed to get prnt\n");
if (clk_set_parent(sclk, prnt))
printk(KERN_ERR "Unable to set parent %s of clock %s.\n",
prnt->name, sclk->name);
clk_set_rate(sclk, 800 * 1000 * 1000);
clk_put(sclk);
clk_put(prnt);
if (!gpio_request(EXYNOS4_GPB(5), "SPI_CS1")) {
gpio_direction_output(EXYNOS4_GPB(5), 1);
s3c_gpio_cfgpin(EXYNOS4_GPB(5), S3C_GPIO_SFN(1));
s3c_gpio_setpull(EXYNOS4_GPB(5), S3C_GPIO_PULL_UP);
exynos_spi_set_info(1, EXYNOS_SPI_SRCCLK_SCLK,
ARRAY_SIZE(spi1_csi));
}
spi_register_board_info(spi1_board_info, ARRAY_SIZE(spi1_board_info));
#endif
sclk = clk_get(spi2_dev, "dout_spi2");
if (IS_ERR(sclk))
dev_err(spi2_dev, "failed to get sclk for SPI-2\n");
prnt = clk_get(spi2_dev, "mout_mpll_user");
if (IS_ERR(prnt))
dev_err(spi2_dev, "failed to get prnt\n");
if (clk_set_parent(sclk, prnt))
printk(KERN_ERR "Unable to set parent %s of clock %s.\n",
prnt->name, sclk->name);
clk_set_rate(sclk, 800 * 1000 * 1000);
clk_put(sclk);
clk_put(prnt);
if (!gpio_request(EXYNOS4_GPC1(2), "SPI_CS2")) {
gpio_direction_output(EXYNOS4_GPC1(2), 1);
s3c_gpio_cfgpin(EXYNOS4_GPC1(2), S3C_GPIO_SFN(1));
s3c_gpio_setpull(EXYNOS4_GPC1(2), S3C_GPIO_PULL_UP);
exynos_spi_set_info(2, EXYNOS_SPI_SRCCLK_SCLK,
ARRAY_SIZE(spi2_csi));
}
spi_register_board_info(spi2_board_info, ARRAY_SIZE(spi2_board_info));
#endif
#ifdef CONFIG_BUSFREQ_OPP
dev_add(&busfreq, &exynos4_busfreq.dev);
ppmu_init(&exynos_ppmu[PPMU_DMC0], &exynos4_busfreq.dev);
ppmu_init(&exynos_ppmu[PPMU_DMC1], &exynos4_busfreq.dev);
ppmu_init(&exynos_ppmu[PPMU_CPU], &exynos4_busfreq.dev);
#endif
register_reboot_notifier(&exynos4_reboot_notifier);
}
MACHINE_START(SMDK4212, "SMDK4X12")
.boot_params = S5P_PA_SDRAM + 0x100,
.init_irq = exynos4_init_irq,
.map_io = smdk4x12_map_io,
.init_machine = smdk4x12_machine_init,
.timer = &exynos4_timer,
MACHINE_END
MACHINE_START(SMDK4412, "SMDK4X12")
.boot_params = S5P_PA_SDRAM + 0x100,
.init_irq = exynos4_init_irq,
.map_io = smdk4x12_map_io,
.init_machine = smdk4x12_machine_init,
.timer = &exynos4_timer,
MACHINE_END
| gpl-2.0 |
optimsoc/gzll-gcc | gcc/testsuite/gcc.c-torture/compile/20001226-1.c | 130 | 1078 | /* { dg-do assemble } */
/* { dg-skip-if "too much code for avr" { "avr-*-*" } { "*" } { "" } } */
/* { dg-skip-if "too much code for pdp11" { "pdp11-*-*" } { "*" } { "" } } */
/* { dg-xfail-if "jump beyond 128K not supported" { xtensa*-*-* } { "-O0" } { "" } } */
/* { dg-xfail-if "PR36698" { spu-*-* } { "-O0" } { "" } } */
/* { dg-skip-if "" { m32c-*-* } { "*" } { "" } } */
/* { dg-timeout-factor 4.0 } */
/* This testcase exposed two branch shortening bugs on powerpc. */
#define C(a,b) \
if (a > b) goto gt; \
if (a < b) goto lt;
#define C4(x,b) C((x)[0], b) C((x)[1],b) C((x)[2],b) C((x)[3],b)
#define C16(x,y) C4(x, (y)[0]) C4(x, (y)[1]) C4(x, (y)[2]) C4(x, (y)[3])
#define C64(x,y) C16(x,y) C16(x+4,y) C16(x+8,y) C16(x+12,y)
#define C256(x,y) C64(x,y) C64(x,y+4) C64(x,y+8) C64(x,y+12)
#define C1024(x,y) C256(x,y) C256(x+16,y) C256(x+32,y) C256(x+48,y)
#define C4096(x,y) C1024(x,y) C1024(x,y+16) C1024(x,y+32) C1024(x,y+48)
unsigned foo(int x[64], int y[64])
{
C4096(x,y);
return 0x01234567;
gt:
return 0x12345678;
lt:
return 0xF0123456;
}
| gpl-2.0 |
maxwen/enrc2b-kernel-BLADE | mm/dmapool.c | 386 | 13283 | /*
* DMA Pool allocator
*
* Copyright 2001 David Brownell
* Copyright 2007 Intel Corporation
* Author: Matthew Wilcox <willy@linux.intel.com>
*
* This software may be redistributed and/or modified under the terms of
* the GNU General Public License ("GPL") version 2 as published by the
* Free Software Foundation.
*
* This allocator returns small blocks of a given size which are DMA-able by
* the given device. It uses the dma_alloc_coherent page allocator to get
* new pages, then splits them up into blocks of the required size.
* Many older drivers still have their own code to do this.
*
* The current design of this allocator is fairly simple. The pool is
* represented by the 'struct dma_pool' which keeps a doubly-linked list of
* allocated pages. Each page in the page_list is split into blocks of at
* least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
* list of free blocks within the page. Used blocks aren't tracked, but we
* keep a count of how many are currently allocated from each page.
*/
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/poison.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/wait.h>
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
#define DMAPOOL_DEBUG 1
#endif
struct dma_pool { /* the pool */
struct list_head page_list;
spinlock_t lock;
size_t size;
struct device *dev;
size_t allocation;
size_t boundary;
char name[32];
wait_queue_head_t waitq;
struct list_head pools;
};
struct dma_page { /* cacheable header for 'allocation' bytes */
struct list_head page_list;
void *vaddr;
dma_addr_t dma;
unsigned int in_use;
unsigned int offset;
};
#define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
static DEFINE_MUTEX(pools_lock);
static ssize_t
show_pools(struct device *dev, struct device_attribute *attr, char *buf)
{
unsigned temp;
unsigned size;
char *next;
struct dma_page *page;
struct dma_pool *pool;
next = buf;
size = PAGE_SIZE;
temp = scnprintf(next, size, "poolinfo - 0.1\n");
size -= temp;
next += temp;
mutex_lock(&pools_lock);
list_for_each_entry(pool, &dev->dma_pools, pools) {
unsigned pages = 0;
unsigned blocks = 0;
spin_lock_irq(&pool->lock);
list_for_each_entry(page, &pool->page_list, page_list) {
pages++;
blocks += page->in_use;
}
spin_unlock_irq(&pool->lock);
/* per-pool info, no real statistics yet */
temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
pool->name, blocks,
pages * (pool->allocation / pool->size),
pool->size, pages);
size -= temp;
next += temp;
}
mutex_unlock(&pools_lock);
return PAGE_SIZE - size;
}
static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
/**
* dma_pool_create - Creates a pool of consistent memory blocks, for dma.
* @name: name of pool, for diagnostics
* @dev: device that will be doing the DMA
* @size: size of the blocks in this pool.
* @align: alignment requirement for blocks; must be a power of two
* @boundary: returned blocks won't cross this power of two boundary
* Context: !in_interrupt()
*
* Returns a dma allocation pool with the requested characteristics, or
* null if one can't be created. Given one of these pools, dma_pool_alloc()
* may be used to allocate memory. Such memory will all have "consistent"
* DMA mappings, accessible by the device and its driver without using
* cache flushing primitives. The actual size of blocks allocated may be
* larger than requested because of alignment.
*
* If @boundary is nonzero, objects returned from dma_pool_alloc() won't
* cross that size boundary. This is useful for devices which have
* addressing restrictions on individual DMA transfers, such as not crossing
* boundaries of 4KBytes.
*/
struct dma_pool *dma_pool_create(const char *name, struct device *dev,
size_t size, size_t align, size_t boundary)
{
struct dma_pool *retval;
size_t allocation;
if (align == 0) {
align = 1;
} else if (align & (align - 1)) {
return NULL;
}
if (size == 0) {
return NULL;
} else if (size < 4) {
size = 4;
}
if ((size % align) != 0)
size = ALIGN(size, align);
allocation = max_t(size_t, size, PAGE_SIZE);
if (!boundary) {
boundary = allocation;
} else if ((boundary < size) || (boundary & (boundary - 1))) {
return NULL;
}
retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
if (!retval)
return retval;
strlcpy(retval->name, name, sizeof(retval->name));
retval->dev = dev;
INIT_LIST_HEAD(&retval->page_list);
spin_lock_init(&retval->lock);
retval->size = size;
retval->boundary = boundary;
retval->allocation = allocation;
init_waitqueue_head(&retval->waitq);
if (dev) {
int ret;
mutex_lock(&pools_lock);
if (list_empty(&dev->dma_pools))
ret = device_create_file(dev, &dev_attr_pools);
else
ret = 0;
/* note: not currently insisting "name" be unique */
if (!ret)
list_add(&retval->pools, &dev->dma_pools);
else {
kfree(retval);
retval = NULL;
}
mutex_unlock(&pools_lock);
} else
INIT_LIST_HEAD(&retval->pools);
return retval;
}
EXPORT_SYMBOL(dma_pool_create);
static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
{
unsigned int offset = 0;
unsigned int next_boundary = pool->boundary;
do {
unsigned int next = offset + pool->size;
if (unlikely((next + pool->size) >= next_boundary)) {
next = next_boundary;
next_boundary += pool->boundary;
}
*(int *)(page->vaddr + offset) = next;
offset = next;
} while (offset < pool->allocation);
}
static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
{
struct dma_page *page;
page = kmalloc(sizeof(*page), mem_flags);
if (!page)
return NULL;
page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
&page->dma, mem_flags);
if (page->vaddr) {
#ifdef DMAPOOL_DEBUG
memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
#endif
pool_initialise_page(pool, page);
list_add(&page->page_list, &pool->page_list);
page->in_use = 0;
page->offset = 0;
} else {
kfree(page);
page = NULL;
}
return page;
}
static inline int is_page_busy(struct dma_page *page)
{
return page->in_use != 0;
}
static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
{
dma_addr_t dma = page->dma;
#ifdef DMAPOOL_DEBUG
memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
#endif
dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
list_del(&page->page_list);
kfree(page);
}
/**
* dma_pool_destroy - destroys a pool of dma memory blocks.
* @pool: dma pool that will be destroyed
* Context: !in_interrupt()
*
* Caller guarantees that no more memory from the pool is in use,
* and that nothing will try to use the pool after this call.
*/
void dma_pool_destroy(struct dma_pool *pool)
{
mutex_lock(&pools_lock);
list_del(&pool->pools);
if (pool->dev && list_empty(&pool->dev->dma_pools))
device_remove_file(pool->dev, &dev_attr_pools);
mutex_unlock(&pools_lock);
while (!list_empty(&pool->page_list)) {
struct dma_page *page;
page = list_entry(pool->page_list.next,
struct dma_page, page_list);
if (is_page_busy(page)) {
if (pool->dev)
dev_err(pool->dev,
"dma_pool_destroy %s, %p busy\n",
pool->name, page->vaddr);
else
printk(KERN_ERR
"dma_pool_destroy %s, %p busy\n",
pool->name, page->vaddr);
/* leak the still-in-use consistent memory */
list_del(&page->page_list);
kfree(page);
} else
pool_free_page(pool, page);
}
kfree(pool);
}
EXPORT_SYMBOL(dma_pool_destroy);
/**
* dma_pool_alloc - get a block of consistent memory
* @pool: dma pool that will produce the block
* @mem_flags: GFP_* bitmask
* @handle: pointer to dma address of block
*
* This returns the kernel virtual address of a currently unused block,
* and reports its dma address through the handle.
* If such a memory block can't be allocated, %NULL is returned.
*/
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
dma_addr_t *handle)
{
unsigned long flags;
struct dma_page *page;
size_t offset;
void *retval;
might_sleep_if(mem_flags & __GFP_WAIT);
spin_lock_irqsave(&pool->lock, flags);
restart:
list_for_each_entry(page, &pool->page_list, page_list) {
if (page->offset < pool->allocation)
goto ready;
}
page = pool_alloc_page(pool, GFP_ATOMIC);
if (!page) {
if (mem_flags & __GFP_WAIT) {
DECLARE_WAITQUEUE(wait, current);
__set_current_state(TASK_UNINTERRUPTIBLE);
__add_wait_queue(&pool->waitq, &wait);
spin_unlock_irqrestore(&pool->lock, flags);
schedule_timeout(POOL_TIMEOUT_JIFFIES);
spin_lock_irqsave(&pool->lock, flags);
__remove_wait_queue(&pool->waitq, &wait);
goto restart;
}
retval = NULL;
goto done;
}
ready:
page->in_use++;
offset = page->offset;
page->offset = *(int *)(page->vaddr + offset);
retval = offset + page->vaddr;
*handle = offset + page->dma;
#ifdef DMAPOOL_DEBUG
memset(retval, POOL_POISON_ALLOCATED, pool->size);
#endif
done:
spin_unlock_irqrestore(&pool->lock, flags);
return retval;
}
EXPORT_SYMBOL(dma_pool_alloc);
static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
{
struct dma_page *page;
list_for_each_entry(page, &pool->page_list, page_list) {
if (dma < page->dma)
continue;
if (dma < (page->dma + pool->allocation))
return page;
}
return NULL;
}
/**
* dma_pool_free - put block back into dma pool
* @pool: the dma pool holding the block
* @vaddr: virtual address of block
* @dma: dma address of block
*
* Caller promises neither device nor driver will again touch this block
* unless it is first re-allocated.
*/
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
{
struct dma_page *page;
unsigned long flags;
unsigned int offset;
spin_lock_irqsave(&pool->lock, flags);
page = pool_find_page(pool, dma);
if (!page) {
spin_unlock_irqrestore(&pool->lock, flags);
if (pool->dev)
dev_err(pool->dev,
"dma_pool_free %s, %p/%lx (bad dma)\n",
pool->name, vaddr, (unsigned long)dma);
else
printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
pool->name, vaddr, (unsigned long)dma);
return;
}
offset = vaddr - page->vaddr;
#ifdef DMAPOOL_DEBUG
if ((dma - page->dma) != offset) {
spin_unlock_irqrestore(&pool->lock, flags);
if (pool->dev)
dev_err(pool->dev,
"dma_pool_free %s, %p (bad vaddr)/%Lx\n",
pool->name, vaddr, (unsigned long long)dma);
else
printk(KERN_ERR
"dma_pool_free %s, %p (bad vaddr)/%Lx\n",
pool->name, vaddr, (unsigned long long)dma);
return;
}
{
unsigned int chain = page->offset;
while (chain < pool->allocation) {
if (chain != offset) {
chain = *(int *)(page->vaddr + chain);
continue;
}
spin_unlock_irqrestore(&pool->lock, flags);
if (pool->dev)
dev_err(pool->dev, "dma_pool_free %s, dma %Lx "
"already free\n", pool->name,
(unsigned long long)dma);
else
printk(KERN_ERR "dma_pool_free %s, dma %Lx "
"already free\n", pool->name,
(unsigned long long)dma);
return;
}
}
memset(vaddr, POOL_POISON_FREED, pool->size);
#endif
page->in_use--;
*(int *)vaddr = page->offset;
page->offset = offset;
if (waitqueue_active(&pool->waitq))
wake_up_locked(&pool->waitq);
/*
* Resist a temptation to do
* if (!is_page_busy(page)) pool_free_page(pool, page);
* Better have a few empty pages hang around.
*/
spin_unlock_irqrestore(&pool->lock, flags);
}
EXPORT_SYMBOL(dma_pool_free);
/*
* Managed DMA pool
*/
static void dmam_pool_release(struct device *dev, void *res)
{
struct dma_pool *pool = *(struct dma_pool **)res;
dma_pool_destroy(pool);
}
static int dmam_pool_match(struct device *dev, void *res, void *match_data)
{
return *(struct dma_pool **)res == match_data;
}
/**
* dmam_pool_create - Managed dma_pool_create()
* @name: name of pool, for diagnostics
* @dev: device that will be doing the DMA
* @size: size of the blocks in this pool.
* @align: alignment requirement for blocks; must be a power of two
* @allocation: returned blocks won't cross this boundary (or zero)
*
* Managed dma_pool_create(). DMA pool created with this function is
* automatically destroyed on driver detach.
*/
struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
size_t size, size_t align, size_t allocation)
{
struct dma_pool **ptr, *pool;
ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
if (!ptr)
return NULL;
pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
if (pool)
devres_add(dev, ptr);
else
devres_free(ptr);
return pool;
}
EXPORT_SYMBOL(dmam_pool_create);
/**
* dmam_pool_destroy - Managed dma_pool_destroy()
* @pool: dma pool that will be destroyed
*
* Managed dma_pool_destroy().
*/
void dmam_pool_destroy(struct dma_pool *pool)
{
struct device *dev = pool->dev;
WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
dma_pool_destroy(pool);
}
EXPORT_SYMBOL(dmam_pool_destroy);
| gpl-2.0 |
sbu-fsl/fuse-kernel-instrumentation | drivers/staging/android/ashmem.c | 386 | 23141 | /* mm/ashmem.c
*
* Anonymous Shared Memory Subsystem, ashmem
*
* Copyright (C) 2008 Google, Inc.
*
* Robert Love <rlove@google.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) "ashmem: " fmt
#include <linux/module.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/falloc.h>
#include <linux/miscdevice.h>
#include <linux/security.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/uaccess.h>
#include <linux/personality.h>
#include <linux/bitops.h>
#include <linux/mutex.h>
#include <linux/shmem_fs.h>
#include "ashmem.h"
#define ASHMEM_NAME_PREFIX "dev/ashmem/"
#define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
#define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
/**
* struct ashmem_area - The anonymous shared memory area
* @name: The optional name in /proc/pid/maps
* @unpinned_list: The list of all ashmem areas
* @file: The shmem-based backing file
* @size: The size of the mapping, in bytes
* @prot_masks: The allowed protection bits, as vm_flags
*
* The lifecycle of this structure is from our parent file's open() until
* its release(). It is also protected by 'ashmem_mutex'
*
* Warning: Mappings do NOT pin this structure; It dies on close()
*/
struct ashmem_area {
char name[ASHMEM_FULL_NAME_LEN];
struct list_head unpinned_list;
struct file *file;
size_t size;
unsigned long prot_mask;
};
/**
* struct ashmem_range - A range of unpinned/evictable pages
* @lru: The entry in the LRU list
* @unpinned: The entry in its area's unpinned list
* @asma: The associated anonymous shared memory area.
* @pgstart: The starting page (inclusive)
* @pgend: The ending page (inclusive)
* @purged: The purge status (ASHMEM_NOT or ASHMEM_WAS_PURGED)
*
* The lifecycle of this structure is from unpin to pin.
* It is protected by 'ashmem_mutex'
*/
struct ashmem_range {
struct list_head lru;
struct list_head unpinned;
struct ashmem_area *asma;
size_t pgstart;
size_t pgend;
unsigned int purged;
};
/* LRU list of unpinned pages, protected by ashmem_mutex */
static LIST_HEAD(ashmem_lru_list);
/**
* long lru_count - The count of pages on our LRU list.
*
* This is protected by ashmem_mutex.
*/
static unsigned long lru_count;
/**
* ashmem_mutex - protects the list of and each individual ashmem_area
*
* Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
*/
static DEFINE_MUTEX(ashmem_mutex);
static struct kmem_cache *ashmem_area_cachep __read_mostly;
static struct kmem_cache *ashmem_range_cachep __read_mostly;
#define range_size(range) \
((range)->pgend - (range)->pgstart + 1)
#define range_on_lru(range) \
((range)->purged == ASHMEM_NOT_PURGED)
#define page_range_subsumes_range(range, start, end) \
(((range)->pgstart >= (start)) && ((range)->pgend <= (end)))
#define page_range_subsumed_by_range(range, start, end) \
(((range)->pgstart <= (start)) && ((range)->pgend >= (end)))
#define page_in_range(range, page) \
(((range)->pgstart <= (page)) && ((range)->pgend >= (page)))
#define page_range_in_range(range, start, end) \
(page_in_range(range, start) || page_in_range(range, end) || \
page_range_subsumes_range(range, start, end))
#define range_before_page(range, page) \
((range)->pgend < (page))
#define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE)
/**
* lru_add() - Adds a range of memory to the LRU list
* @range: The memory range being added.
*
* The range is first added to the end (tail) of the LRU list.
* After this, the size of the range is added to @lru_count
*/
static inline void lru_add(struct ashmem_range *range)
{
list_add_tail(&range->lru, &ashmem_lru_list);
lru_count += range_size(range);
}
/**
* lru_del() - Removes a range of memory from the LRU list
* @range: The memory range being removed
*
* The range is first deleted from the LRU list.
* After this, the size of the range is removed from @lru_count
*/
static inline void lru_del(struct ashmem_range *range)
{
list_del(&range->lru);
lru_count -= range_size(range);
}
/**
* range_alloc() - Allocates and initializes a new ashmem_range structure
* @asma: The associated ashmem_area
* @prev_range: The previous ashmem_range in the sorted asma->unpinned list
* @purged: Initial purge status (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
* @start: The starting page (inclusive)
* @end: The ending page (inclusive)
*
* This function is protected by ashmem_mutex.
*
* Return: 0 if successful, or -ENOMEM if there is an error
*/
static int range_alloc(struct ashmem_area *asma,
struct ashmem_range *prev_range, unsigned int purged,
size_t start, size_t end)
{
struct ashmem_range *range;
range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
if (unlikely(!range))
return -ENOMEM;
range->asma = asma;
range->pgstart = start;
range->pgend = end;
range->purged = purged;
list_add_tail(&range->unpinned, &prev_range->unpinned);
if (range_on_lru(range))
lru_add(range);
return 0;
}
/**
* range_del() - Deletes and dealloctes an ashmem_range structure
* @range: The associated ashmem_range that has previously been allocated
*/
static void range_del(struct ashmem_range *range)
{
list_del(&range->unpinned);
if (range_on_lru(range))
lru_del(range);
kmem_cache_free(ashmem_range_cachep, range);
}
/**
* range_shrink() - Shrinks an ashmem_range
* @range: The associated ashmem_range being shrunk
* @start: The starting byte of the new range
* @end: The ending byte of the new range
*
* This does not modify the data inside the existing range in any way - It
* simply shrinks the boundaries of the range.
*
* Theoretically, with a little tweaking, this could eventually be changed
* to range_resize, and expand the lru_count if the new range is larger.
*/
static inline void range_shrink(struct ashmem_range *range,
size_t start, size_t end)
{
size_t pre = range_size(range);
range->pgstart = start;
range->pgend = end;
if (range_on_lru(range))
lru_count -= pre - range_size(range);
}
/**
* ashmem_open() - Opens an Anonymous Shared Memory structure
* @inode: The backing file's index node(?)
* @file: The backing file
*
* Please note that the ashmem_area is not returned by this function - It is
* instead written to "file->private_data".
*
* Return: 0 if successful, or another code if unsuccessful.
*/
static int ashmem_open(struct inode *inode, struct file *file)
{
struct ashmem_area *asma;
int ret;
ret = generic_file_open(inode, file);
if (unlikely(ret))
return ret;
asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL);
if (unlikely(!asma))
return -ENOMEM;
INIT_LIST_HEAD(&asma->unpinned_list);
memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN);
asma->prot_mask = PROT_MASK;
file->private_data = asma;
return 0;
}
/**
* ashmem_release() - Releases an Anonymous Shared Memory structure
* @ignored: The backing file's Index Node(?) - It is ignored here.
* @file: The backing file
*
* Return: 0 if successful. If it is anything else, go have a coffee and
* try again.
*/
static int ashmem_release(struct inode *ignored, struct file *file)
{
struct ashmem_area *asma = file->private_data;
struct ashmem_range *range, *next;
mutex_lock(&ashmem_mutex);
list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned)
range_del(range);
mutex_unlock(&ashmem_mutex);
if (asma->file)
fput(asma->file);
kmem_cache_free(ashmem_area_cachep, asma);
return 0;
}
/**
* ashmem_read() - Reads a set of bytes from an Ashmem-enabled file
* @file: The associated backing file.
* @buf: The buffer of data being written to
* @len: The number of bytes being read
* @pos: The position of the first byte to read.
*
* Return: 0 if successful, or another return code if not.
*/
static ssize_t ashmem_read(struct file *file, char __user *buf,
size_t len, loff_t *pos)
{
struct ashmem_area *asma = file->private_data;
int ret = 0;
mutex_lock(&ashmem_mutex);
/* If size is not set, or set to 0, always return EOF. */
if (asma->size == 0)
goto out_unlock;
if (!asma->file) {
ret = -EBADF;
goto out_unlock;
}
mutex_unlock(&ashmem_mutex);
/*
* asma and asma->file are used outside the lock here. We assume
* once asma->file is set it will never be changed, and will not
* be destroyed until all references to the file are dropped and
* ashmem_release is called.
*/
ret = __vfs_read(asma->file, buf, len, pos);
if (ret >= 0) {
/** Update backing file pos, since f_ops->read() doesn't */
asma->file->f_pos = *pos;
}
return ret;
out_unlock:
mutex_unlock(&ashmem_mutex);
return ret;
}
static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
{
struct ashmem_area *asma = file->private_data;
int ret;
mutex_lock(&ashmem_mutex);
if (asma->size == 0) {
ret = -EINVAL;
goto out;
}
if (!asma->file) {
ret = -EBADF;
goto out;
}
ret = vfs_llseek(asma->file, offset, origin);
if (ret < 0)
goto out;
/** Copy f_pos from backing file, since f_ops->llseek() sets it */
file->f_pos = asma->file->f_pos;
out:
mutex_unlock(&ashmem_mutex);
return ret;
}
static inline vm_flags_t calc_vm_may_flags(unsigned long prot)
{
return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD) |
_calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) |
_calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC);
}
static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
{
struct ashmem_area *asma = file->private_data;
int ret = 0;
mutex_lock(&ashmem_mutex);
/* user needs to SET_SIZE before mapping */
if (unlikely(!asma->size)) {
ret = -EINVAL;
goto out;
}
/* requested protection bits must match our allowed protection mask */
if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) &
calc_vm_prot_bits(PROT_MASK))) {
ret = -EPERM;
goto out;
}
vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask);
if (!asma->file) {
char *name = ASHMEM_NAME_DEF;
struct file *vmfile;
if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
name = asma->name;
/* ... and allocate the backing shmem file */
vmfile = shmem_file_setup(name, asma->size, vma->vm_flags);
if (unlikely(IS_ERR(vmfile))) {
ret = PTR_ERR(vmfile);
goto out;
}
asma->file = vmfile;
}
get_file(asma->file);
/*
* XXX - Reworked to use shmem_zero_setup() instead of
* shmem_set_file while we're in staging. -jstultz
*/
if (vma->vm_flags & VM_SHARED) {
ret = shmem_zero_setup(vma);
if (ret) {
fput(asma->file);
goto out;
}
}
if (vma->vm_file)
fput(vma->vm_file);
vma->vm_file = asma->file;
out:
mutex_unlock(&ashmem_mutex);
return ret;
}
/*
* ashmem_shrink - our cache shrinker, called from mm/vmscan.c
*
* 'nr_to_scan' is the number of objects to scan for freeing.
*
* 'gfp_mask' is the mask of the allocation that got us into this mess.
*
* Return value is the number of objects freed or -1 if we cannot
* proceed without risk of deadlock (due to gfp_mask).
*
* We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
* chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
* pages freed.
*/
static unsigned long
ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
struct ashmem_range *range, *next;
unsigned long freed = 0;
/* We might recurse into filesystem code, so bail out if necessary */
if (!(sc->gfp_mask & __GFP_FS))
return SHRINK_STOP;
mutex_lock(&ashmem_mutex);
list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
loff_t start = range->pgstart * PAGE_SIZE;
loff_t end = (range->pgend + 1) * PAGE_SIZE;
vfs_fallocate(range->asma->file,
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
start, end - start);
range->purged = ASHMEM_WAS_PURGED;
lru_del(range);
freed += range_size(range);
if (--sc->nr_to_scan <= 0)
break;
}
mutex_unlock(&ashmem_mutex);
return freed;
}
static unsigned long
ashmem_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
/*
* note that lru_count is count of pages on the lru, not a count of
* objects on the list. This means the scan function needs to return the
* number of pages freed, not the number of objects scanned.
*/
return lru_count;
}
static struct shrinker ashmem_shrinker = {
.count_objects = ashmem_shrink_count,
.scan_objects = ashmem_shrink_scan,
/*
* XXX (dchinner): I wish people would comment on why they need on
* significant changes to the default value here
*/
.seeks = DEFAULT_SEEKS * 4,
};
static int set_prot_mask(struct ashmem_area *asma, unsigned long prot)
{
int ret = 0;
mutex_lock(&ashmem_mutex);
/* the user can only remove, not add, protection bits */
if (unlikely((asma->prot_mask & prot) != prot)) {
ret = -EINVAL;
goto out;
}
/* does the application expect PROT_READ to imply PROT_EXEC? */
if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
prot |= PROT_EXEC;
asma->prot_mask = prot;
out:
mutex_unlock(&ashmem_mutex);
return ret;
}
static int set_name(struct ashmem_area *asma, void __user *name)
{
int len;
int ret = 0;
char local_name[ASHMEM_NAME_LEN];
/*
* Holding the ashmem_mutex while doing a copy_from_user might cause
* an data abort which would try to access mmap_sem. If another
* thread has invoked ashmem_mmap then it will be holding the
* semaphore and will be waiting for ashmem_mutex, there by leading to
* deadlock. We'll release the mutex and take the name to a local
* variable that does not need protection and later copy the local
* variable to the structure member with lock held.
*/
len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN);
if (len < 0)
return len;
if (len == ASHMEM_NAME_LEN)
local_name[ASHMEM_NAME_LEN - 1] = '\0';
mutex_lock(&ashmem_mutex);
/* cannot change an existing mapping's name */
if (unlikely(asma->file))
ret = -EINVAL;
else
strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name);
mutex_unlock(&ashmem_mutex);
return ret;
}
static int get_name(struct ashmem_area *asma, void __user *name)
{
int ret = 0;
size_t len;
/*
* Have a local variable to which we'll copy the content
* from asma with the lock held. Later we can copy this to the user
* space safely without holding any locks. So even if we proceed to
* wait for mmap_sem, it won't lead to deadlock.
*/
char local_name[ASHMEM_NAME_LEN];
mutex_lock(&ashmem_mutex);
if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') {
/*
* Copying only `len', instead of ASHMEM_NAME_LEN, bytes
* prevents us from revealing one user's stack to another.
*/
len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1;
memcpy(local_name, asma->name + ASHMEM_NAME_PREFIX_LEN, len);
} else {
len = sizeof(ASHMEM_NAME_DEF);
memcpy(local_name, ASHMEM_NAME_DEF, len);
}
mutex_unlock(&ashmem_mutex);
/*
* Now we are just copying from the stack variable to userland
* No lock held
*/
if (unlikely(copy_to_user(name, local_name, len)))
ret = -EFAULT;
return ret;
}
/*
* ashmem_pin - pin the given ashmem region, returning whether it was
* previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED).
*
* Caller must hold ashmem_mutex.
*/
static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
{
struct ashmem_range *range, *next;
int ret = ASHMEM_NOT_PURGED;
list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
/* moved past last applicable page; we can short circuit */
if (range_before_page(range, pgstart))
break;
/*
* The user can ask us to pin pages that span multiple ranges,
* or to pin pages that aren't even unpinned, so this is messy.
*
* Four cases:
* 1. The requested range subsumes an existing range, so we
* just remove the entire matching range.
* 2. The requested range overlaps the start of an existing
* range, so we just update that range.
* 3. The requested range overlaps the end of an existing
* range, so we just update that range.
* 4. The requested range punches a hole in an existing range,
* so we have to update one side of the range and then
* create a new range for the other side.
*/
if (page_range_in_range(range, pgstart, pgend)) {
ret |= range->purged;
/* Case #1: Easy. Just nuke the whole thing. */
if (page_range_subsumes_range(range, pgstart, pgend)) {
range_del(range);
continue;
}
/* Case #2: We overlap from the start, so adjust it */
if (range->pgstart >= pgstart) {
range_shrink(range, pgend + 1, range->pgend);
continue;
}
/* Case #3: We overlap from the rear, so adjust it */
if (range->pgend <= pgend) {
range_shrink(range, range->pgstart, pgstart-1);
continue;
}
/*
* Case #4: We eat a chunk out of the middle. A bit
* more complicated, we allocate a new range for the
* second half and adjust the first chunk's endpoint.
*/
range_alloc(asma, range, range->purged,
pgend + 1, range->pgend);
range_shrink(range, range->pgstart, pgstart - 1);
break;
}
}
return ret;
}
/*
* ashmem_unpin - unpin the given range of pages. Returns zero on success.
*
* Caller must hold ashmem_mutex.
*/
static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
{
struct ashmem_range *range, *next;
unsigned int purged = ASHMEM_NOT_PURGED;
restart:
list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
/* short circuit: this is our insertion point */
if (range_before_page(range, pgstart))
break;
/*
* The user can ask us to unpin pages that are already entirely
* or partially pinned. We handle those two cases here.
*/
if (page_range_subsumed_by_range(range, pgstart, pgend))
return 0;
if (page_range_in_range(range, pgstart, pgend)) {
pgstart = min_t(size_t, range->pgstart, pgstart),
pgend = max_t(size_t, range->pgend, pgend);
purged |= range->purged;
range_del(range);
goto restart;
}
}
return range_alloc(asma, range, purged, pgstart, pgend);
}
/*
* ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the
* given interval are unpinned and ASHMEM_IS_PINNED otherwise.
*
* Caller must hold ashmem_mutex.
*/
static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart,
size_t pgend)
{
struct ashmem_range *range;
int ret = ASHMEM_IS_PINNED;
list_for_each_entry(range, &asma->unpinned_list, unpinned) {
if (range_before_page(range, pgstart))
break;
if (page_range_in_range(range, pgstart, pgend)) {
ret = ASHMEM_IS_UNPINNED;
break;
}
}
return ret;
}
static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
void __user *p)
{
struct ashmem_pin pin;
size_t pgstart, pgend;
int ret = -EINVAL;
if (unlikely(!asma->file))
return -EINVAL;
if (unlikely(copy_from_user(&pin, p, sizeof(pin))))
return -EFAULT;
/* per custom, you can pass zero for len to mean "everything onward" */
if (!pin.len)
pin.len = PAGE_ALIGN(asma->size) - pin.offset;
if (unlikely((pin.offset | pin.len) & ~PAGE_MASK))
return -EINVAL;
if (unlikely(((__u32) -1) - pin.offset < pin.len))
return -EINVAL;
if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len))
return -EINVAL;
pgstart = pin.offset / PAGE_SIZE;
pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
mutex_lock(&ashmem_mutex);
switch (cmd) {
case ASHMEM_PIN:
ret = ashmem_pin(asma, pgstart, pgend);
break;
case ASHMEM_UNPIN:
ret = ashmem_unpin(asma, pgstart, pgend);
break;
case ASHMEM_GET_PIN_STATUS:
ret = ashmem_get_pin_status(asma, pgstart, pgend);
break;
}
mutex_unlock(&ashmem_mutex);
return ret;
}
static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct ashmem_area *asma = file->private_data;
long ret = -ENOTTY;
switch (cmd) {
case ASHMEM_SET_NAME:
ret = set_name(asma, (void __user *)arg);
break;
case ASHMEM_GET_NAME:
ret = get_name(asma, (void __user *)arg);
break;
case ASHMEM_SET_SIZE:
ret = -EINVAL;
if (!asma->file) {
ret = 0;
asma->size = (size_t) arg;
}
break;
case ASHMEM_GET_SIZE:
ret = asma->size;
break;
case ASHMEM_SET_PROT_MASK:
ret = set_prot_mask(asma, arg);
break;
case ASHMEM_GET_PROT_MASK:
ret = asma->prot_mask;
break;
case ASHMEM_PIN:
case ASHMEM_UNPIN:
case ASHMEM_GET_PIN_STATUS:
ret = ashmem_pin_unpin(asma, cmd, (void __user *)arg);
break;
case ASHMEM_PURGE_ALL_CACHES:
ret = -EPERM;
if (capable(CAP_SYS_ADMIN)) {
struct shrink_control sc = {
.gfp_mask = GFP_KERNEL,
.nr_to_scan = LONG_MAX,
};
ret = ashmem_shrink_count(&ashmem_shrinker, &sc);
ashmem_shrink_scan(&ashmem_shrinker, &sc);
}
break;
}
return ret;
}
/* support of 32bit userspace on 64bit platforms */
#ifdef CONFIG_COMPAT
static long compat_ashmem_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
switch (cmd) {
case COMPAT_ASHMEM_SET_SIZE:
cmd = ASHMEM_SET_SIZE;
break;
case COMPAT_ASHMEM_SET_PROT_MASK:
cmd = ASHMEM_SET_PROT_MASK;
break;
}
return ashmem_ioctl(file, cmd, arg);
}
#endif
static const struct file_operations ashmem_fops = {
.owner = THIS_MODULE,
.open = ashmem_open,
.release = ashmem_release,
.read = ashmem_read,
.llseek = ashmem_llseek,
.mmap = ashmem_mmap,
.unlocked_ioctl = ashmem_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = compat_ashmem_ioctl,
#endif
};
static struct miscdevice ashmem_misc = {
.minor = MISC_DYNAMIC_MINOR,
.name = "ashmem",
.fops = &ashmem_fops,
};
static int __init ashmem_init(void)
{
int ret;
ashmem_area_cachep = kmem_cache_create("ashmem_area_cache",
sizeof(struct ashmem_area),
0, 0, NULL);
if (unlikely(!ashmem_area_cachep)) {
pr_err("failed to create slab cache\n");
return -ENOMEM;
}
ashmem_range_cachep = kmem_cache_create("ashmem_range_cache",
sizeof(struct ashmem_range),
0, 0, NULL);
if (unlikely(!ashmem_range_cachep)) {
pr_err("failed to create slab cache\n");
return -ENOMEM;
}
ret = misc_register(&ashmem_misc);
if (unlikely(ret)) {
pr_err("failed to register misc device!\n");
return ret;
}
register_shrinker(&ashmem_shrinker);
pr_info("initialized\n");
return 0;
}
static void __exit ashmem_exit(void)
{
int ret;
unregister_shrinker(&ashmem_shrinker);
ret = misc_deregister(&ashmem_misc);
if (unlikely(ret))
pr_err("failed to unregister misc device!\n");
kmem_cache_destroy(ashmem_range_cachep);
kmem_cache_destroy(ashmem_area_cachep);
pr_info("unloaded\n");
}
module_init(ashmem_init);
module_exit(ashmem_exit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
grouper-tests/android_kernel_asus_grouper | drivers/net/ksz884x.c | 386 | 183990 | /**
* drivers/net/ksx884x.c - Micrel KSZ8841/2 PCI Ethernet driver
*
* Copyright (c) 2009-2010 Micrel, Inc.
* Tristram Ha <Tristram.Ha@micrel.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/proc_fs.h>
#include <linux/mii.h>
#include <linux/platform_device.h>
#include <linux/ethtool.h>
#include <linux/etherdevice.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/if_vlan.h>
#include <linux/crc32.h>
#include <linux/sched.h>
#include <linux/slab.h>
/* DMA Registers */
#define KS_DMA_TX_CTRL 0x0000
#define DMA_TX_ENABLE 0x00000001
#define DMA_TX_CRC_ENABLE 0x00000002
#define DMA_TX_PAD_ENABLE 0x00000004
#define DMA_TX_LOOPBACK 0x00000100
#define DMA_TX_FLOW_ENABLE 0x00000200
#define DMA_TX_CSUM_IP 0x00010000
#define DMA_TX_CSUM_TCP 0x00020000
#define DMA_TX_CSUM_UDP 0x00040000
#define DMA_TX_BURST_SIZE 0x3F000000
#define KS_DMA_RX_CTRL 0x0004
#define DMA_RX_ENABLE 0x00000001
#define KS884X_DMA_RX_MULTICAST 0x00000002
#define DMA_RX_PROMISCUOUS 0x00000004
#define DMA_RX_ERROR 0x00000008
#define DMA_RX_UNICAST 0x00000010
#define DMA_RX_ALL_MULTICAST 0x00000020
#define DMA_RX_BROADCAST 0x00000040
#define DMA_RX_FLOW_ENABLE 0x00000200
#define DMA_RX_CSUM_IP 0x00010000
#define DMA_RX_CSUM_TCP 0x00020000
#define DMA_RX_CSUM_UDP 0x00040000
#define DMA_RX_BURST_SIZE 0x3F000000
#define DMA_BURST_SHIFT 24
#define DMA_BURST_DEFAULT 8
#define KS_DMA_TX_START 0x0008
#define KS_DMA_RX_START 0x000C
#define DMA_START 0x00000001
#define KS_DMA_TX_ADDR 0x0010
#define KS_DMA_RX_ADDR 0x0014
#define DMA_ADDR_LIST_MASK 0xFFFFFFFC
#define DMA_ADDR_LIST_SHIFT 2
/* MTR0 */
#define KS884X_MULTICAST_0_OFFSET 0x0020
#define KS884X_MULTICAST_1_OFFSET 0x0021
#define KS884X_MULTICAST_2_OFFSET 0x0022
#define KS884x_MULTICAST_3_OFFSET 0x0023
/* MTR1 */
#define KS884X_MULTICAST_4_OFFSET 0x0024
#define KS884X_MULTICAST_5_OFFSET 0x0025
#define KS884X_MULTICAST_6_OFFSET 0x0026
#define KS884X_MULTICAST_7_OFFSET 0x0027
/* Interrupt Registers */
/* INTEN */
#define KS884X_INTERRUPTS_ENABLE 0x0028
/* INTST */
#define KS884X_INTERRUPTS_STATUS 0x002C
#define KS884X_INT_RX_STOPPED 0x02000000
#define KS884X_INT_TX_STOPPED 0x04000000
#define KS884X_INT_RX_OVERRUN 0x08000000
#define KS884X_INT_TX_EMPTY 0x10000000
#define KS884X_INT_RX 0x20000000
#define KS884X_INT_TX 0x40000000
#define KS884X_INT_PHY 0x80000000
#define KS884X_INT_RX_MASK \
(KS884X_INT_RX | KS884X_INT_RX_OVERRUN)
#define KS884X_INT_TX_MASK \
(KS884X_INT_TX | KS884X_INT_TX_EMPTY)
#define KS884X_INT_MASK (KS884X_INT_RX | KS884X_INT_TX | KS884X_INT_PHY)
/* MAC Additional Station Address */
/* MAAL0 */
#define KS_ADD_ADDR_0_LO 0x0080
/* MAAH0 */
#define KS_ADD_ADDR_0_HI 0x0084
/* MAAL1 */
#define KS_ADD_ADDR_1_LO 0x0088
/* MAAH1 */
#define KS_ADD_ADDR_1_HI 0x008C
/* MAAL2 */
#define KS_ADD_ADDR_2_LO 0x0090
/* MAAH2 */
#define KS_ADD_ADDR_2_HI 0x0094
/* MAAL3 */
#define KS_ADD_ADDR_3_LO 0x0098
/* MAAH3 */
#define KS_ADD_ADDR_3_HI 0x009C
/* MAAL4 */
#define KS_ADD_ADDR_4_LO 0x00A0
/* MAAH4 */
#define KS_ADD_ADDR_4_HI 0x00A4
/* MAAL5 */
#define KS_ADD_ADDR_5_LO 0x00A8
/* MAAH5 */
#define KS_ADD_ADDR_5_HI 0x00AC
/* MAAL6 */
#define KS_ADD_ADDR_6_LO 0x00B0
/* MAAH6 */
#define KS_ADD_ADDR_6_HI 0x00B4
/* MAAL7 */
#define KS_ADD_ADDR_7_LO 0x00B8
/* MAAH7 */
#define KS_ADD_ADDR_7_HI 0x00BC
/* MAAL8 */
#define KS_ADD_ADDR_8_LO 0x00C0
/* MAAH8 */
#define KS_ADD_ADDR_8_HI 0x00C4
/* MAAL9 */
#define KS_ADD_ADDR_9_LO 0x00C8
/* MAAH9 */
#define KS_ADD_ADDR_9_HI 0x00CC
/* MAAL10 */
#define KS_ADD_ADDR_A_LO 0x00D0
/* MAAH10 */
#define KS_ADD_ADDR_A_HI 0x00D4
/* MAAL11 */
#define KS_ADD_ADDR_B_LO 0x00D8
/* MAAH11 */
#define KS_ADD_ADDR_B_HI 0x00DC
/* MAAL12 */
#define KS_ADD_ADDR_C_LO 0x00E0
/* MAAH12 */
#define KS_ADD_ADDR_C_HI 0x00E4
/* MAAL13 */
#define KS_ADD_ADDR_D_LO 0x00E8
/* MAAH13 */
#define KS_ADD_ADDR_D_HI 0x00EC
/* MAAL14 */
#define KS_ADD_ADDR_E_LO 0x00F0
/* MAAH14 */
#define KS_ADD_ADDR_E_HI 0x00F4
/* MAAL15 */
#define KS_ADD_ADDR_F_LO 0x00F8
/* MAAH15 */
#define KS_ADD_ADDR_F_HI 0x00FC
#define ADD_ADDR_HI_MASK 0x0000FFFF
#define ADD_ADDR_ENABLE 0x80000000
#define ADD_ADDR_INCR 8
/* Miscellaneous Registers */
/* MARL */
#define KS884X_ADDR_0_OFFSET 0x0200
#define KS884X_ADDR_1_OFFSET 0x0201
/* MARM */
#define KS884X_ADDR_2_OFFSET 0x0202
#define KS884X_ADDR_3_OFFSET 0x0203
/* MARH */
#define KS884X_ADDR_4_OFFSET 0x0204
#define KS884X_ADDR_5_OFFSET 0x0205
/* OBCR */
#define KS884X_BUS_CTRL_OFFSET 0x0210
#define BUS_SPEED_125_MHZ 0x0000
#define BUS_SPEED_62_5_MHZ 0x0001
#define BUS_SPEED_41_66_MHZ 0x0002
#define BUS_SPEED_25_MHZ 0x0003
/* EEPCR */
#define KS884X_EEPROM_CTRL_OFFSET 0x0212
#define EEPROM_CHIP_SELECT 0x0001
#define EEPROM_SERIAL_CLOCK 0x0002
#define EEPROM_DATA_OUT 0x0004
#define EEPROM_DATA_IN 0x0008
#define EEPROM_ACCESS_ENABLE 0x0010
/* MBIR */
#define KS884X_MEM_INFO_OFFSET 0x0214
#define RX_MEM_TEST_FAILED 0x0008
#define RX_MEM_TEST_FINISHED 0x0010
#define TX_MEM_TEST_FAILED 0x0800
#define TX_MEM_TEST_FINISHED 0x1000
/* GCR */
#define KS884X_GLOBAL_CTRL_OFFSET 0x0216
#define GLOBAL_SOFTWARE_RESET 0x0001
#define KS8841_POWER_MANAGE_OFFSET 0x0218
/* WFCR */
#define KS8841_WOL_CTRL_OFFSET 0x021A
#define KS8841_WOL_MAGIC_ENABLE 0x0080
#define KS8841_WOL_FRAME3_ENABLE 0x0008
#define KS8841_WOL_FRAME2_ENABLE 0x0004
#define KS8841_WOL_FRAME1_ENABLE 0x0002
#define KS8841_WOL_FRAME0_ENABLE 0x0001
/* WF0 */
#define KS8841_WOL_FRAME_CRC_OFFSET 0x0220
#define KS8841_WOL_FRAME_BYTE0_OFFSET 0x0224
#define KS8841_WOL_FRAME_BYTE2_OFFSET 0x0228
/* IACR */
#define KS884X_IACR_P 0x04A0
#define KS884X_IACR_OFFSET KS884X_IACR_P
/* IADR1 */
#define KS884X_IADR1_P 0x04A2
#define KS884X_IADR2_P 0x04A4
#define KS884X_IADR3_P 0x04A6
#define KS884X_IADR4_P 0x04A8
#define KS884X_IADR5_P 0x04AA
#define KS884X_ACC_CTRL_SEL_OFFSET KS884X_IACR_P
#define KS884X_ACC_CTRL_INDEX_OFFSET (KS884X_ACC_CTRL_SEL_OFFSET + 1)
#define KS884X_ACC_DATA_0_OFFSET KS884X_IADR4_P
#define KS884X_ACC_DATA_1_OFFSET (KS884X_ACC_DATA_0_OFFSET + 1)
#define KS884X_ACC_DATA_2_OFFSET KS884X_IADR5_P
#define KS884X_ACC_DATA_3_OFFSET (KS884X_ACC_DATA_2_OFFSET + 1)
#define KS884X_ACC_DATA_4_OFFSET KS884X_IADR2_P
#define KS884X_ACC_DATA_5_OFFSET (KS884X_ACC_DATA_4_OFFSET + 1)
#define KS884X_ACC_DATA_6_OFFSET KS884X_IADR3_P
#define KS884X_ACC_DATA_7_OFFSET (KS884X_ACC_DATA_6_OFFSET + 1)
#define KS884X_ACC_DATA_8_OFFSET KS884X_IADR1_P
/* P1MBCR */
#define KS884X_P1MBCR_P 0x04D0
#define KS884X_P1MBSR_P 0x04D2
#define KS884X_PHY1ILR_P 0x04D4
#define KS884X_PHY1IHR_P 0x04D6
#define KS884X_P1ANAR_P 0x04D8
#define KS884X_P1ANLPR_P 0x04DA
/* P2MBCR */
#define KS884X_P2MBCR_P 0x04E0
#define KS884X_P2MBSR_P 0x04E2
#define KS884X_PHY2ILR_P 0x04E4
#define KS884X_PHY2IHR_P 0x04E6
#define KS884X_P2ANAR_P 0x04E8
#define KS884X_P2ANLPR_P 0x04EA
#define KS884X_PHY_1_CTRL_OFFSET KS884X_P1MBCR_P
#define PHY_CTRL_INTERVAL (KS884X_P2MBCR_P - KS884X_P1MBCR_P)
#define KS884X_PHY_CTRL_OFFSET 0x00
/* Mode Control Register */
#define PHY_REG_CTRL 0
#define PHY_RESET 0x8000
#define PHY_LOOPBACK 0x4000
#define PHY_SPEED_100MBIT 0x2000
#define PHY_AUTO_NEG_ENABLE 0x1000
#define PHY_POWER_DOWN 0x0800
#define PHY_MII_DISABLE 0x0400
#define PHY_AUTO_NEG_RESTART 0x0200
#define PHY_FULL_DUPLEX 0x0100
#define PHY_COLLISION_TEST 0x0080
#define PHY_HP_MDIX 0x0020
#define PHY_FORCE_MDIX 0x0010
#define PHY_AUTO_MDIX_DISABLE 0x0008
#define PHY_REMOTE_FAULT_DISABLE 0x0004
#define PHY_TRANSMIT_DISABLE 0x0002
#define PHY_LED_DISABLE 0x0001
#define KS884X_PHY_STATUS_OFFSET 0x02
/* Mode Status Register */
#define PHY_REG_STATUS 1
#define PHY_100BT4_CAPABLE 0x8000
#define PHY_100BTX_FD_CAPABLE 0x4000
#define PHY_100BTX_CAPABLE 0x2000
#define PHY_10BT_FD_CAPABLE 0x1000
#define PHY_10BT_CAPABLE 0x0800
#define PHY_MII_SUPPRESS_CAPABLE 0x0040
#define PHY_AUTO_NEG_ACKNOWLEDGE 0x0020
#define PHY_REMOTE_FAULT 0x0010
#define PHY_AUTO_NEG_CAPABLE 0x0008
#define PHY_LINK_STATUS 0x0004
#define PHY_JABBER_DETECT 0x0002
#define PHY_EXTENDED_CAPABILITY 0x0001
#define KS884X_PHY_ID_1_OFFSET 0x04
#define KS884X_PHY_ID_2_OFFSET 0x06
/* PHY Identifier Registers */
#define PHY_REG_ID_1 2
#define PHY_REG_ID_2 3
#define KS884X_PHY_AUTO_NEG_OFFSET 0x08
/* Auto-Negotiation Advertisement Register */
#define PHY_REG_AUTO_NEGOTIATION 4
#define PHY_AUTO_NEG_NEXT_PAGE 0x8000
#define PHY_AUTO_NEG_REMOTE_FAULT 0x2000
/* Not supported. */
#define PHY_AUTO_NEG_ASYM_PAUSE 0x0800
#define PHY_AUTO_NEG_SYM_PAUSE 0x0400
#define PHY_AUTO_NEG_100BT4 0x0200
#define PHY_AUTO_NEG_100BTX_FD 0x0100
#define PHY_AUTO_NEG_100BTX 0x0080
#define PHY_AUTO_NEG_10BT_FD 0x0040
#define PHY_AUTO_NEG_10BT 0x0020
#define PHY_AUTO_NEG_SELECTOR 0x001F
#define PHY_AUTO_NEG_802_3 0x0001
#define PHY_AUTO_NEG_PAUSE (PHY_AUTO_NEG_SYM_PAUSE | PHY_AUTO_NEG_ASYM_PAUSE)
#define KS884X_PHY_REMOTE_CAP_OFFSET 0x0A
/* Auto-Negotiation Link Partner Ability Register */
#define PHY_REG_REMOTE_CAPABILITY 5
#define PHY_REMOTE_NEXT_PAGE 0x8000
#define PHY_REMOTE_ACKNOWLEDGE 0x4000
#define PHY_REMOTE_REMOTE_FAULT 0x2000
#define PHY_REMOTE_SYM_PAUSE 0x0400
#define PHY_REMOTE_100BTX_FD 0x0100
#define PHY_REMOTE_100BTX 0x0080
#define PHY_REMOTE_10BT_FD 0x0040
#define PHY_REMOTE_10BT 0x0020
/* P1VCT */
#define KS884X_P1VCT_P 0x04F0
#define KS884X_P1PHYCTRL_P 0x04F2
/* P2VCT */
#define KS884X_P2VCT_P 0x04F4
#define KS884X_P2PHYCTRL_P 0x04F6
#define KS884X_PHY_SPECIAL_OFFSET KS884X_P1VCT_P
#define PHY_SPECIAL_INTERVAL (KS884X_P2VCT_P - KS884X_P1VCT_P)
#define KS884X_PHY_LINK_MD_OFFSET 0x00
#define PHY_START_CABLE_DIAG 0x8000
#define PHY_CABLE_DIAG_RESULT 0x6000
#define PHY_CABLE_STAT_NORMAL 0x0000
#define PHY_CABLE_STAT_OPEN 0x2000
#define PHY_CABLE_STAT_SHORT 0x4000
#define PHY_CABLE_STAT_FAILED 0x6000
#define PHY_CABLE_10M_SHORT 0x1000
#define PHY_CABLE_FAULT_COUNTER 0x01FF
#define KS884X_PHY_PHY_CTRL_OFFSET 0x02
#define PHY_STAT_REVERSED_POLARITY 0x0020
#define PHY_STAT_MDIX 0x0010
#define PHY_FORCE_LINK 0x0008
#define PHY_POWER_SAVING_DISABLE 0x0004
#define PHY_REMOTE_LOOPBACK 0x0002
/* SIDER */
#define KS884X_SIDER_P 0x0400
#define KS884X_CHIP_ID_OFFSET KS884X_SIDER_P
#define KS884X_FAMILY_ID_OFFSET (KS884X_CHIP_ID_OFFSET + 1)
#define REG_FAMILY_ID 0x88
#define REG_CHIP_ID_41 0x8810
#define REG_CHIP_ID_42 0x8800
#define KS884X_CHIP_ID_MASK_41 0xFF10
#define KS884X_CHIP_ID_MASK 0xFFF0
#define KS884X_CHIP_ID_SHIFT 4
#define KS884X_REVISION_MASK 0x000E
#define KS884X_REVISION_SHIFT 1
#define KS8842_START 0x0001
#define CHIP_IP_41_M 0x8810
#define CHIP_IP_42_M 0x8800
#define CHIP_IP_61_M 0x8890
#define CHIP_IP_62_M 0x8880
#define CHIP_IP_41_P 0x8850
#define CHIP_IP_42_P 0x8840
#define CHIP_IP_61_P 0x88D0
#define CHIP_IP_62_P 0x88C0
/* SGCR1 */
#define KS8842_SGCR1_P 0x0402
#define KS8842_SWITCH_CTRL_1_OFFSET KS8842_SGCR1_P
#define SWITCH_PASS_ALL 0x8000
#define SWITCH_TX_FLOW_CTRL 0x2000
#define SWITCH_RX_FLOW_CTRL 0x1000
#define SWITCH_CHECK_LENGTH 0x0800
#define SWITCH_AGING_ENABLE 0x0400
#define SWITCH_FAST_AGING 0x0200
#define SWITCH_AGGR_BACKOFF 0x0100
#define SWITCH_PASS_PAUSE 0x0008
#define SWITCH_LINK_AUTO_AGING 0x0001
/* SGCR2 */
#define KS8842_SGCR2_P 0x0404
#define KS8842_SWITCH_CTRL_2_OFFSET KS8842_SGCR2_P
#define SWITCH_VLAN_ENABLE 0x8000
#define SWITCH_IGMP_SNOOP 0x4000
#define IPV6_MLD_SNOOP_ENABLE 0x2000
#define IPV6_MLD_SNOOP_OPTION 0x1000
#define PRIORITY_SCHEME_SELECT 0x0800
#define SWITCH_MIRROR_RX_TX 0x0100
#define UNICAST_VLAN_BOUNDARY 0x0080
#define MULTICAST_STORM_DISABLE 0x0040
#define SWITCH_BACK_PRESSURE 0x0020
#define FAIR_FLOW_CTRL 0x0010
#define NO_EXC_COLLISION_DROP 0x0008
#define SWITCH_HUGE_PACKET 0x0004
#define SWITCH_LEGAL_PACKET 0x0002
#define SWITCH_BUF_RESERVE 0x0001
/* SGCR3 */
#define KS8842_SGCR3_P 0x0406
#define KS8842_SWITCH_CTRL_3_OFFSET KS8842_SGCR3_P
#define BROADCAST_STORM_RATE_LO 0xFF00
#define SWITCH_REPEATER 0x0080
#define SWITCH_HALF_DUPLEX 0x0040
#define SWITCH_FLOW_CTRL 0x0020
#define SWITCH_10_MBIT 0x0010
#define SWITCH_REPLACE_NULL_VID 0x0008
#define BROADCAST_STORM_RATE_HI 0x0007
#define BROADCAST_STORM_RATE 0x07FF
/* SGCR4 */
#define KS8842_SGCR4_P 0x0408
/* SGCR5 */
#define KS8842_SGCR5_P 0x040A
#define KS8842_SWITCH_CTRL_5_OFFSET KS8842_SGCR5_P
#define LED_MODE 0x8200
#define LED_SPEED_DUPLEX_ACT 0x0000
#define LED_SPEED_DUPLEX_LINK_ACT 0x8000
#define LED_DUPLEX_10_100 0x0200
/* SGCR6 */
#define KS8842_SGCR6_P 0x0410
#define KS8842_SWITCH_CTRL_6_OFFSET KS8842_SGCR6_P
#define KS8842_PRIORITY_MASK 3
#define KS8842_PRIORITY_SHIFT 2
/* SGCR7 */
#define KS8842_SGCR7_P 0x0412
#define KS8842_SWITCH_CTRL_7_OFFSET KS8842_SGCR7_P
#define SWITCH_UNK_DEF_PORT_ENABLE 0x0008
#define SWITCH_UNK_DEF_PORT_3 0x0004
#define SWITCH_UNK_DEF_PORT_2 0x0002
#define SWITCH_UNK_DEF_PORT_1 0x0001
/* MACAR1 */
#define KS8842_MACAR1_P 0x0470
#define KS8842_MACAR2_P 0x0472
#define KS8842_MACAR3_P 0x0474
#define KS8842_MAC_ADDR_1_OFFSET KS8842_MACAR1_P
#define KS8842_MAC_ADDR_0_OFFSET (KS8842_MAC_ADDR_1_OFFSET + 1)
#define KS8842_MAC_ADDR_3_OFFSET KS8842_MACAR2_P
#define KS8842_MAC_ADDR_2_OFFSET (KS8842_MAC_ADDR_3_OFFSET + 1)
#define KS8842_MAC_ADDR_5_OFFSET KS8842_MACAR3_P
#define KS8842_MAC_ADDR_4_OFFSET (KS8842_MAC_ADDR_5_OFFSET + 1)
/* TOSR1 */
#define KS8842_TOSR1_P 0x0480
#define KS8842_TOSR2_P 0x0482
#define KS8842_TOSR3_P 0x0484
#define KS8842_TOSR4_P 0x0486
#define KS8842_TOSR5_P 0x0488
#define KS8842_TOSR6_P 0x048A
#define KS8842_TOSR7_P 0x0490
#define KS8842_TOSR8_P 0x0492
#define KS8842_TOS_1_OFFSET KS8842_TOSR1_P
#define KS8842_TOS_2_OFFSET KS8842_TOSR2_P
#define KS8842_TOS_3_OFFSET KS8842_TOSR3_P
#define KS8842_TOS_4_OFFSET KS8842_TOSR4_P
#define KS8842_TOS_5_OFFSET KS8842_TOSR5_P
#define KS8842_TOS_6_OFFSET KS8842_TOSR6_P
#define KS8842_TOS_7_OFFSET KS8842_TOSR7_P
#define KS8842_TOS_8_OFFSET KS8842_TOSR8_P
/* P1CR1 */
#define KS8842_P1CR1_P 0x0500
#define KS8842_P1CR2_P 0x0502
#define KS8842_P1VIDR_P 0x0504
#define KS8842_P1CR3_P 0x0506
#define KS8842_P1IRCR_P 0x0508
#define KS8842_P1ERCR_P 0x050A
#define KS884X_P1SCSLMD_P 0x0510
#define KS884X_P1CR4_P 0x0512
#define KS884X_P1SR_P 0x0514
/* P2CR1 */
#define KS8842_P2CR1_P 0x0520
#define KS8842_P2CR2_P 0x0522
#define KS8842_P2VIDR_P 0x0524
#define KS8842_P2CR3_P 0x0526
#define KS8842_P2IRCR_P 0x0528
#define KS8842_P2ERCR_P 0x052A
#define KS884X_P2SCSLMD_P 0x0530
#define KS884X_P2CR4_P 0x0532
#define KS884X_P2SR_P 0x0534
/* P3CR1 */
#define KS8842_P3CR1_P 0x0540
#define KS8842_P3CR2_P 0x0542
#define KS8842_P3VIDR_P 0x0544
#define KS8842_P3CR3_P 0x0546
#define KS8842_P3IRCR_P 0x0548
#define KS8842_P3ERCR_P 0x054A
#define KS8842_PORT_1_CTRL_1 KS8842_P1CR1_P
#define KS8842_PORT_2_CTRL_1 KS8842_P2CR1_P
#define KS8842_PORT_3_CTRL_1 KS8842_P3CR1_P
#define PORT_CTRL_ADDR(port, addr) \
(addr = KS8842_PORT_1_CTRL_1 + (port) * \
(KS8842_PORT_2_CTRL_1 - KS8842_PORT_1_CTRL_1))
#define KS8842_PORT_CTRL_1_OFFSET 0x00
#define PORT_BROADCAST_STORM 0x0080
#define PORT_DIFFSERV_ENABLE 0x0040
#define PORT_802_1P_ENABLE 0x0020
#define PORT_BASED_PRIORITY_MASK 0x0018
#define PORT_BASED_PRIORITY_BASE 0x0003
#define PORT_BASED_PRIORITY_SHIFT 3
#define PORT_BASED_PRIORITY_0 0x0000
#define PORT_BASED_PRIORITY_1 0x0008
#define PORT_BASED_PRIORITY_2 0x0010
#define PORT_BASED_PRIORITY_3 0x0018
#define PORT_INSERT_TAG 0x0004
#define PORT_REMOVE_TAG 0x0002
#define PORT_PRIO_QUEUE_ENABLE 0x0001
#define KS8842_PORT_CTRL_2_OFFSET 0x02
#define PORT_INGRESS_VLAN_FILTER 0x4000
#define PORT_DISCARD_NON_VID 0x2000
#define PORT_FORCE_FLOW_CTRL 0x1000
#define PORT_BACK_PRESSURE 0x0800
#define PORT_TX_ENABLE 0x0400
#define PORT_RX_ENABLE 0x0200
#define PORT_LEARN_DISABLE 0x0100
#define PORT_MIRROR_SNIFFER 0x0080
#define PORT_MIRROR_RX 0x0040
#define PORT_MIRROR_TX 0x0020
#define PORT_USER_PRIORITY_CEILING 0x0008
#define PORT_VLAN_MEMBERSHIP 0x0007
#define KS8842_PORT_CTRL_VID_OFFSET 0x04
#define PORT_DEFAULT_VID 0x0001
#define KS8842_PORT_CTRL_3_OFFSET 0x06
#define PORT_INGRESS_LIMIT_MODE 0x000C
#define PORT_INGRESS_ALL 0x0000
#define PORT_INGRESS_UNICAST 0x0004
#define PORT_INGRESS_MULTICAST 0x0008
#define PORT_INGRESS_BROADCAST 0x000C
#define PORT_COUNT_IFG 0x0002
#define PORT_COUNT_PREAMBLE 0x0001
#define KS8842_PORT_IN_RATE_OFFSET 0x08
#define KS8842_PORT_OUT_RATE_OFFSET 0x0A
#define PORT_PRIORITY_RATE 0x0F
#define PORT_PRIORITY_RATE_SHIFT 4
#define KS884X_PORT_LINK_MD 0x10
#define PORT_CABLE_10M_SHORT 0x8000
#define PORT_CABLE_DIAG_RESULT 0x6000
#define PORT_CABLE_STAT_NORMAL 0x0000
#define PORT_CABLE_STAT_OPEN 0x2000
#define PORT_CABLE_STAT_SHORT 0x4000
#define PORT_CABLE_STAT_FAILED 0x6000
#define PORT_START_CABLE_DIAG 0x1000
#define PORT_FORCE_LINK 0x0800
#define PORT_POWER_SAVING_DISABLE 0x0400
#define PORT_PHY_REMOTE_LOOPBACK 0x0200
#define PORT_CABLE_FAULT_COUNTER 0x01FF
#define KS884X_PORT_CTRL_4_OFFSET 0x12
#define PORT_LED_OFF 0x8000
#define PORT_TX_DISABLE 0x4000
#define PORT_AUTO_NEG_RESTART 0x2000
#define PORT_REMOTE_FAULT_DISABLE 0x1000
#define PORT_POWER_DOWN 0x0800
#define PORT_AUTO_MDIX_DISABLE 0x0400
#define PORT_FORCE_MDIX 0x0200
#define PORT_LOOPBACK 0x0100
#define PORT_AUTO_NEG_ENABLE 0x0080
#define PORT_FORCE_100_MBIT 0x0040
#define PORT_FORCE_FULL_DUPLEX 0x0020
#define PORT_AUTO_NEG_SYM_PAUSE 0x0010
#define PORT_AUTO_NEG_100BTX_FD 0x0008
#define PORT_AUTO_NEG_100BTX 0x0004
#define PORT_AUTO_NEG_10BT_FD 0x0002
#define PORT_AUTO_NEG_10BT 0x0001
#define KS884X_PORT_STATUS_OFFSET 0x14
#define PORT_HP_MDIX 0x8000
#define PORT_REVERSED_POLARITY 0x2000
#define PORT_RX_FLOW_CTRL 0x0800
#define PORT_TX_FLOW_CTRL 0x1000
#define PORT_STATUS_SPEED_100MBIT 0x0400
#define PORT_STATUS_FULL_DUPLEX 0x0200
#define PORT_REMOTE_FAULT 0x0100
#define PORT_MDIX_STATUS 0x0080
#define PORT_AUTO_NEG_COMPLETE 0x0040
#define PORT_STATUS_LINK_GOOD 0x0020
#define PORT_REMOTE_SYM_PAUSE 0x0010
#define PORT_REMOTE_100BTX_FD 0x0008
#define PORT_REMOTE_100BTX 0x0004
#define PORT_REMOTE_10BT_FD 0x0002
#define PORT_REMOTE_10BT 0x0001
/*
#define STATIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF
#define STATIC_MAC_TABLE_FWD_PORTS 00-00070000-00000000
#define STATIC_MAC_TABLE_VALID 00-00080000-00000000
#define STATIC_MAC_TABLE_OVERRIDE 00-00100000-00000000
#define STATIC_MAC_TABLE_USE_FID 00-00200000-00000000
#define STATIC_MAC_TABLE_FID 00-03C00000-00000000
*/
#define STATIC_MAC_TABLE_ADDR 0x0000FFFF
#define STATIC_MAC_TABLE_FWD_PORTS 0x00070000
#define STATIC_MAC_TABLE_VALID 0x00080000
#define STATIC_MAC_TABLE_OVERRIDE 0x00100000
#define STATIC_MAC_TABLE_USE_FID 0x00200000
#define STATIC_MAC_TABLE_FID 0x03C00000
#define STATIC_MAC_FWD_PORTS_SHIFT 16
#define STATIC_MAC_FID_SHIFT 22
/*
#define VLAN_TABLE_VID 00-00000000-00000FFF
#define VLAN_TABLE_FID 00-00000000-0000F000
#define VLAN_TABLE_MEMBERSHIP 00-00000000-00070000
#define VLAN_TABLE_VALID 00-00000000-00080000
*/
#define VLAN_TABLE_VID 0x00000FFF
#define VLAN_TABLE_FID 0x0000F000
#define VLAN_TABLE_MEMBERSHIP 0x00070000
#define VLAN_TABLE_VALID 0x00080000
#define VLAN_TABLE_FID_SHIFT 12
#define VLAN_TABLE_MEMBERSHIP_SHIFT 16
/*
#define DYNAMIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF
#define DYNAMIC_MAC_TABLE_FID 00-000F0000-00000000
#define DYNAMIC_MAC_TABLE_SRC_PORT 00-00300000-00000000
#define DYNAMIC_MAC_TABLE_TIMESTAMP 00-00C00000-00000000
#define DYNAMIC_MAC_TABLE_ENTRIES 03-FF000000-00000000
#define DYNAMIC_MAC_TABLE_MAC_EMPTY 04-00000000-00000000
#define DYNAMIC_MAC_TABLE_RESERVED 78-00000000-00000000
#define DYNAMIC_MAC_TABLE_NOT_READY 80-00000000-00000000
*/
#define DYNAMIC_MAC_TABLE_ADDR 0x0000FFFF
#define DYNAMIC_MAC_TABLE_FID 0x000F0000
#define DYNAMIC_MAC_TABLE_SRC_PORT 0x00300000
#define DYNAMIC_MAC_TABLE_TIMESTAMP 0x00C00000
#define DYNAMIC_MAC_TABLE_ENTRIES 0xFF000000
#define DYNAMIC_MAC_TABLE_ENTRIES_H 0x03
#define DYNAMIC_MAC_TABLE_MAC_EMPTY 0x04
#define DYNAMIC_MAC_TABLE_RESERVED 0x78
#define DYNAMIC_MAC_TABLE_NOT_READY 0x80
#define DYNAMIC_MAC_FID_SHIFT 16
#define DYNAMIC_MAC_SRC_PORT_SHIFT 20
#define DYNAMIC_MAC_TIMESTAMP_SHIFT 22
#define DYNAMIC_MAC_ENTRIES_SHIFT 24
#define DYNAMIC_MAC_ENTRIES_H_SHIFT 8
/*
#define MIB_COUNTER_VALUE 00-00000000-3FFFFFFF
#define MIB_COUNTER_VALID 00-00000000-40000000
#define MIB_COUNTER_OVERFLOW 00-00000000-80000000
*/
#define MIB_COUNTER_VALUE 0x3FFFFFFF
#define MIB_COUNTER_VALID 0x40000000
#define MIB_COUNTER_OVERFLOW 0x80000000
#define MIB_PACKET_DROPPED 0x0000FFFF
#define KS_MIB_PACKET_DROPPED_TX_0 0x100
#define KS_MIB_PACKET_DROPPED_TX_1 0x101
#define KS_MIB_PACKET_DROPPED_TX 0x102
#define KS_MIB_PACKET_DROPPED_RX_0 0x103
#define KS_MIB_PACKET_DROPPED_RX_1 0x104
#define KS_MIB_PACKET_DROPPED_RX 0x105
/* Change default LED mode. */
#define SET_DEFAULT_LED LED_SPEED_DUPLEX_ACT
#define MAC_ADDR_LEN 6
#define MAC_ADDR_ORDER(i) (MAC_ADDR_LEN - 1 - (i))
#define MAX_ETHERNET_BODY_SIZE 1500
#define ETHERNET_HEADER_SIZE 14
#define MAX_ETHERNET_PACKET_SIZE \
(MAX_ETHERNET_BODY_SIZE + ETHERNET_HEADER_SIZE)
#define REGULAR_RX_BUF_SIZE (MAX_ETHERNET_PACKET_SIZE + 4)
#define MAX_RX_BUF_SIZE (1912 + 4)
#define ADDITIONAL_ENTRIES 16
#define MAX_MULTICAST_LIST 32
#define HW_MULTICAST_SIZE 8
#define HW_TO_DEV_PORT(port) (port - 1)
enum {
media_connected,
media_disconnected
};
enum {
OID_COUNTER_UNKOWN,
OID_COUNTER_FIRST,
/* total transmit errors */
OID_COUNTER_XMIT_ERROR,
/* total receive errors */
OID_COUNTER_RCV_ERROR,
OID_COUNTER_LAST
};
/*
* Hardware descriptor definitions
*/
#define DESC_ALIGNMENT 16
#define BUFFER_ALIGNMENT 8
#define NUM_OF_RX_DESC 64
#define NUM_OF_TX_DESC 64
#define KS_DESC_RX_FRAME_LEN 0x000007FF
#define KS_DESC_RX_FRAME_TYPE 0x00008000
#define KS_DESC_RX_ERROR_CRC 0x00010000
#define KS_DESC_RX_ERROR_RUNT 0x00020000
#define KS_DESC_RX_ERROR_TOO_LONG 0x00040000
#define KS_DESC_RX_ERROR_PHY 0x00080000
#define KS884X_DESC_RX_PORT_MASK 0x00300000
#define KS_DESC_RX_MULTICAST 0x01000000
#define KS_DESC_RX_ERROR 0x02000000
#define KS_DESC_RX_ERROR_CSUM_UDP 0x04000000
#define KS_DESC_RX_ERROR_CSUM_TCP 0x08000000
#define KS_DESC_RX_ERROR_CSUM_IP 0x10000000
#define KS_DESC_RX_LAST 0x20000000
#define KS_DESC_RX_FIRST 0x40000000
#define KS_DESC_RX_ERROR_COND \
(KS_DESC_RX_ERROR_CRC | \
KS_DESC_RX_ERROR_RUNT | \
KS_DESC_RX_ERROR_PHY | \
KS_DESC_RX_ERROR_TOO_LONG)
#define KS_DESC_HW_OWNED 0x80000000
#define KS_DESC_BUF_SIZE 0x000007FF
#define KS884X_DESC_TX_PORT_MASK 0x00300000
#define KS_DESC_END_OF_RING 0x02000000
#define KS_DESC_TX_CSUM_GEN_UDP 0x04000000
#define KS_DESC_TX_CSUM_GEN_TCP 0x08000000
#define KS_DESC_TX_CSUM_GEN_IP 0x10000000
#define KS_DESC_TX_LAST 0x20000000
#define KS_DESC_TX_FIRST 0x40000000
#define KS_DESC_TX_INTERRUPT 0x80000000
#define KS_DESC_PORT_SHIFT 20
#define KS_DESC_RX_MASK (KS_DESC_BUF_SIZE)
#define KS_DESC_TX_MASK \
(KS_DESC_TX_INTERRUPT | \
KS_DESC_TX_FIRST | \
KS_DESC_TX_LAST | \
KS_DESC_TX_CSUM_GEN_IP | \
KS_DESC_TX_CSUM_GEN_TCP | \
KS_DESC_TX_CSUM_GEN_UDP | \
KS_DESC_BUF_SIZE)
struct ksz_desc_rx_stat {
#ifdef __BIG_ENDIAN_BITFIELD
u32 hw_owned:1;
u32 first_desc:1;
u32 last_desc:1;
u32 csum_err_ip:1;
u32 csum_err_tcp:1;
u32 csum_err_udp:1;
u32 error:1;
u32 multicast:1;
u32 src_port:4;
u32 err_phy:1;
u32 err_too_long:1;
u32 err_runt:1;
u32 err_crc:1;
u32 frame_type:1;
u32 reserved1:4;
u32 frame_len:11;
#else
u32 frame_len:11;
u32 reserved1:4;
u32 frame_type:1;
u32 err_crc:1;
u32 err_runt:1;
u32 err_too_long:1;
u32 err_phy:1;
u32 src_port:4;
u32 multicast:1;
u32 error:1;
u32 csum_err_udp:1;
u32 csum_err_tcp:1;
u32 csum_err_ip:1;
u32 last_desc:1;
u32 first_desc:1;
u32 hw_owned:1;
#endif
};
struct ksz_desc_tx_stat {
#ifdef __BIG_ENDIAN_BITFIELD
u32 hw_owned:1;
u32 reserved1:31;
#else
u32 reserved1:31;
u32 hw_owned:1;
#endif
};
struct ksz_desc_rx_buf {
#ifdef __BIG_ENDIAN_BITFIELD
u32 reserved4:6;
u32 end_of_ring:1;
u32 reserved3:14;
u32 buf_size:11;
#else
u32 buf_size:11;
u32 reserved3:14;
u32 end_of_ring:1;
u32 reserved4:6;
#endif
};
struct ksz_desc_tx_buf {
#ifdef __BIG_ENDIAN_BITFIELD
u32 intr:1;
u32 first_seg:1;
u32 last_seg:1;
u32 csum_gen_ip:1;
u32 csum_gen_tcp:1;
u32 csum_gen_udp:1;
u32 end_of_ring:1;
u32 reserved4:1;
u32 dest_port:4;
u32 reserved3:9;
u32 buf_size:11;
#else
u32 buf_size:11;
u32 reserved3:9;
u32 dest_port:4;
u32 reserved4:1;
u32 end_of_ring:1;
u32 csum_gen_udp:1;
u32 csum_gen_tcp:1;
u32 csum_gen_ip:1;
u32 last_seg:1;
u32 first_seg:1;
u32 intr:1;
#endif
};
union desc_stat {
struct ksz_desc_rx_stat rx;
struct ksz_desc_tx_stat tx;
u32 data;
};
union desc_buf {
struct ksz_desc_rx_buf rx;
struct ksz_desc_tx_buf tx;
u32 data;
};
/**
* struct ksz_hw_desc - Hardware descriptor data structure
* @ctrl: Descriptor control value.
* @buf: Descriptor buffer value.
* @addr: Physical address of memory buffer.
* @next: Pointer to next hardware descriptor.
*/
struct ksz_hw_desc {
union desc_stat ctrl;
union desc_buf buf;
u32 addr;
u32 next;
};
/**
* struct ksz_sw_desc - Software descriptor data structure
* @ctrl: Descriptor control value.
* @buf: Descriptor buffer value.
* @buf_size: Current buffers size value in hardware descriptor.
*/
struct ksz_sw_desc {
union desc_stat ctrl;
union desc_buf buf;
u32 buf_size;
};
/**
* struct ksz_dma_buf - OS dependent DMA buffer data structure
* @skb: Associated socket buffer.
* @dma: Associated physical DMA address.
* len: Actual len used.
*/
struct ksz_dma_buf {
struct sk_buff *skb;
dma_addr_t dma;
int len;
};
/**
* struct ksz_desc - Descriptor structure
* @phw: Hardware descriptor pointer to uncached physical memory.
* @sw: Cached memory to hold hardware descriptor values for
* manipulation.
* @dma_buf: Operating system dependent data structure to hold physical
* memory buffer allocation information.
*/
struct ksz_desc {
struct ksz_hw_desc *phw;
struct ksz_sw_desc sw;
struct ksz_dma_buf dma_buf;
};
#define DMA_BUFFER(desc) ((struct ksz_dma_buf *)(&(desc)->dma_buf))
/**
* struct ksz_desc_info - Descriptor information data structure
* @ring: First descriptor in the ring.
* @cur: Current descriptor being manipulated.
* @ring_virt: First hardware descriptor in the ring.
* @ring_phys: The physical address of the first descriptor of the ring.
* @size: Size of hardware descriptor.
* @alloc: Number of descriptors allocated.
* @avail: Number of descriptors available for use.
* @last: Index for last descriptor released to hardware.
* @next: Index for next descriptor available for use.
* @mask: Mask for index wrapping.
*/
struct ksz_desc_info {
struct ksz_desc *ring;
struct ksz_desc *cur;
struct ksz_hw_desc *ring_virt;
u32 ring_phys;
int size;
int alloc;
int avail;
int last;
int next;
int mask;
};
/*
* KSZ8842 switch definitions
*/
enum {
TABLE_STATIC_MAC = 0,
TABLE_VLAN,
TABLE_DYNAMIC_MAC,
TABLE_MIB
};
#define LEARNED_MAC_TABLE_ENTRIES 1024
#define STATIC_MAC_TABLE_ENTRIES 8
/**
* struct ksz_mac_table - Static MAC table data structure
* @mac_addr: MAC address to filter.
* @vid: VID value.
* @fid: FID value.
* @ports: Port membership.
* @override: Override setting.
* @use_fid: FID use setting.
* @valid: Valid setting indicating the entry is being used.
*/
struct ksz_mac_table {
u8 mac_addr[MAC_ADDR_LEN];
u16 vid;
u8 fid;
u8 ports;
u8 override:1;
u8 use_fid:1;
u8 valid:1;
};
#define VLAN_TABLE_ENTRIES 16
/**
* struct ksz_vlan_table - VLAN table data structure
* @vid: VID value.
* @fid: FID value.
* @member: Port membership.
*/
struct ksz_vlan_table {
u16 vid;
u8 fid;
u8 member;
};
#define DIFFSERV_ENTRIES 64
#define PRIO_802_1P_ENTRIES 8
#define PRIO_QUEUES 4
#define SWITCH_PORT_NUM 2
#define TOTAL_PORT_NUM (SWITCH_PORT_NUM + 1)
#define HOST_MASK (1 << SWITCH_PORT_NUM)
#define PORT_MASK 7
#define MAIN_PORT 0
#define OTHER_PORT 1
#define HOST_PORT SWITCH_PORT_NUM
#define PORT_COUNTER_NUM 0x20
#define TOTAL_PORT_COUNTER_NUM (PORT_COUNTER_NUM + 2)
#define MIB_COUNTER_RX_LO_PRIORITY 0x00
#define MIB_COUNTER_RX_HI_PRIORITY 0x01
#define MIB_COUNTER_RX_UNDERSIZE 0x02
#define MIB_COUNTER_RX_FRAGMENT 0x03
#define MIB_COUNTER_RX_OVERSIZE 0x04
#define MIB_COUNTER_RX_JABBER 0x05
#define MIB_COUNTER_RX_SYMBOL_ERR 0x06
#define MIB_COUNTER_RX_CRC_ERR 0x07
#define MIB_COUNTER_RX_ALIGNMENT_ERR 0x08
#define MIB_COUNTER_RX_CTRL_8808 0x09
#define MIB_COUNTER_RX_PAUSE 0x0A
#define MIB_COUNTER_RX_BROADCAST 0x0B
#define MIB_COUNTER_RX_MULTICAST 0x0C
#define MIB_COUNTER_RX_UNICAST 0x0D
#define MIB_COUNTER_RX_OCTET_64 0x0E
#define MIB_COUNTER_RX_OCTET_65_127 0x0F
#define MIB_COUNTER_RX_OCTET_128_255 0x10
#define MIB_COUNTER_RX_OCTET_256_511 0x11
#define MIB_COUNTER_RX_OCTET_512_1023 0x12
#define MIB_COUNTER_RX_OCTET_1024_1522 0x13
#define MIB_COUNTER_TX_LO_PRIORITY 0x14
#define MIB_COUNTER_TX_HI_PRIORITY 0x15
#define MIB_COUNTER_TX_LATE_COLLISION 0x16
#define MIB_COUNTER_TX_PAUSE 0x17
#define MIB_COUNTER_TX_BROADCAST 0x18
#define MIB_COUNTER_TX_MULTICAST 0x19
#define MIB_COUNTER_TX_UNICAST 0x1A
#define MIB_COUNTER_TX_DEFERRED 0x1B
#define MIB_COUNTER_TX_TOTAL_COLLISION 0x1C
#define MIB_COUNTER_TX_EXCESS_COLLISION 0x1D
#define MIB_COUNTER_TX_SINGLE_COLLISION 0x1E
#define MIB_COUNTER_TX_MULTI_COLLISION 0x1F
#define MIB_COUNTER_RX_DROPPED_PACKET 0x20
#define MIB_COUNTER_TX_DROPPED_PACKET 0x21
/**
* struct ksz_port_mib - Port MIB data structure
* @cnt_ptr: Current pointer to MIB counter index.
* @link_down: Indication the link has just gone down.
* @state: Connection status of the port.
* @mib_start: The starting counter index. Some ports do not start at 0.
* @counter: 64-bit MIB counter value.
* @dropped: Temporary buffer to remember last read packet dropped values.
*
* MIB counters needs to be read periodically so that counters do not get
* overflowed and give incorrect values. A right balance is needed to
* satisfy this condition and not waste too much CPU time.
*
* It is pointless to read MIB counters when the port is disconnected. The
* @state provides the connection status so that MIB counters are read only
* when the port is connected. The @link_down indicates the port is just
* disconnected so that all MIB counters are read one last time to update the
* information.
*/
struct ksz_port_mib {
u8 cnt_ptr;
u8 link_down;
u8 state;
u8 mib_start;
u64 counter[TOTAL_PORT_COUNTER_NUM];
u32 dropped[2];
};
/**
* struct ksz_port_cfg - Port configuration data structure
* @vid: VID value.
* @member: Port membership.
* @port_prio: Port priority.
* @rx_rate: Receive priority rate.
* @tx_rate: Transmit priority rate.
* @stp_state: Current Spanning Tree Protocol state.
*/
struct ksz_port_cfg {
u16 vid;
u8 member;
u8 port_prio;
u32 rx_rate[PRIO_QUEUES];
u32 tx_rate[PRIO_QUEUES];
int stp_state;
};
/**
* struct ksz_switch - KSZ8842 switch data structure
* @mac_table: MAC table entries information.
* @vlan_table: VLAN table entries information.
* @port_cfg: Port configuration information.
* @diffserv: DiffServ priority settings. Possible values from 6-bit of ToS
* (bit7 ~ bit2) field.
* @p_802_1p: 802.1P priority settings. Possible values from 3-bit of 802.1p
* Tag priority field.
* @br_addr: Bridge address. Used for STP.
* @other_addr: Other MAC address. Used for multiple network device mode.
* @broad_per: Broadcast storm percentage.
* @member: Current port membership. Used for STP.
*/
struct ksz_switch {
struct ksz_mac_table mac_table[STATIC_MAC_TABLE_ENTRIES];
struct ksz_vlan_table vlan_table[VLAN_TABLE_ENTRIES];
struct ksz_port_cfg port_cfg[TOTAL_PORT_NUM];
u8 diffserv[DIFFSERV_ENTRIES];
u8 p_802_1p[PRIO_802_1P_ENTRIES];
u8 br_addr[MAC_ADDR_LEN];
u8 other_addr[MAC_ADDR_LEN];
u8 broad_per;
u8 member;
};
#define TX_RATE_UNIT 10000
/**
* struct ksz_port_info - Port information data structure
* @state: Connection status of the port.
* @tx_rate: Transmit rate divided by 10000 to get Mbit.
* @duplex: Duplex mode.
* @advertised: Advertised auto-negotiation setting. Used to determine link.
* @partner: Auto-negotiation partner setting. Used to determine link.
* @port_id: Port index to access actual hardware register.
* @pdev: Pointer to OS dependent network device.
*/
struct ksz_port_info {
uint state;
uint tx_rate;
u8 duplex;
u8 advertised;
u8 partner;
u8 port_id;
void *pdev;
};
#define MAX_TX_HELD_SIZE 52000
/* Hardware features and bug fixes. */
#define LINK_INT_WORKING (1 << 0)
#define SMALL_PACKET_TX_BUG (1 << 1)
#define HALF_DUPLEX_SIGNAL_BUG (1 << 2)
#define RX_HUGE_FRAME (1 << 4)
#define STP_SUPPORT (1 << 8)
/* Software overrides. */
#define PAUSE_FLOW_CTRL (1 << 0)
#define FAST_AGING (1 << 1)
/**
* struct ksz_hw - KSZ884X hardware data structure
* @io: Virtual address assigned.
* @ksz_switch: Pointer to KSZ8842 switch.
* @port_info: Port information.
* @port_mib: Port MIB information.
* @dev_count: Number of network devices this hardware supports.
* @dst_ports: Destination ports in switch for transmission.
* @id: Hardware ID. Used for display only.
* @mib_cnt: Number of MIB counters this hardware has.
* @mib_port_cnt: Number of ports with MIB counters.
* @tx_cfg: Cached transmit control settings.
* @rx_cfg: Cached receive control settings.
* @intr_mask: Current interrupt mask.
* @intr_set: Current interrup set.
* @intr_blocked: Interrupt blocked.
* @rx_desc_info: Receive descriptor information.
* @tx_desc_info: Transmit descriptor information.
* @tx_int_cnt: Transmit interrupt count. Used for TX optimization.
* @tx_int_mask: Transmit interrupt mask. Used for TX optimization.
* @tx_size: Transmit data size. Used for TX optimization.
* The maximum is defined by MAX_TX_HELD_SIZE.
* @perm_addr: Permanent MAC address.
* @override_addr: Overrided MAC address.
* @address: Additional MAC address entries.
* @addr_list_size: Additional MAC address list size.
* @mac_override: Indication of MAC address overrided.
* @promiscuous: Counter to keep track of promiscuous mode set.
* @all_multi: Counter to keep track of all multicast mode set.
* @multi_list: Multicast address entries.
* @multi_bits: Cached multicast hash table settings.
* @multi_list_size: Multicast address list size.
* @enabled: Indication of hardware enabled.
* @rx_stop: Indication of receive process stop.
* @features: Hardware features to enable.
* @overrides: Hardware features to override.
* @parent: Pointer to parent, network device private structure.
*/
struct ksz_hw {
void __iomem *io;
struct ksz_switch *ksz_switch;
struct ksz_port_info port_info[SWITCH_PORT_NUM];
struct ksz_port_mib port_mib[TOTAL_PORT_NUM];
int dev_count;
int dst_ports;
int id;
int mib_cnt;
int mib_port_cnt;
u32 tx_cfg;
u32 rx_cfg;
u32 intr_mask;
u32 intr_set;
uint intr_blocked;
struct ksz_desc_info rx_desc_info;
struct ksz_desc_info tx_desc_info;
int tx_int_cnt;
int tx_int_mask;
int tx_size;
u8 perm_addr[MAC_ADDR_LEN];
u8 override_addr[MAC_ADDR_LEN];
u8 address[ADDITIONAL_ENTRIES][MAC_ADDR_LEN];
u8 addr_list_size;
u8 mac_override;
u8 promiscuous;
u8 all_multi;
u8 multi_list[MAX_MULTICAST_LIST][MAC_ADDR_LEN];
u8 multi_bits[HW_MULTICAST_SIZE];
u8 multi_list_size;
u8 enabled;
u8 rx_stop;
u8 reserved2[1];
uint features;
uint overrides;
void *parent;
};
enum {
PHY_NO_FLOW_CTRL,
PHY_FLOW_CTRL,
PHY_TX_ONLY,
PHY_RX_ONLY
};
/**
* struct ksz_port - Virtual port data structure
* @duplex: Duplex mode setting. 1 for half duplex, 2 for full
* duplex, and 0 for auto, which normally results in full
* duplex.
* @speed: Speed setting. 10 for 10 Mbit, 100 for 100 Mbit, and
* 0 for auto, which normally results in 100 Mbit.
* @force_link: Force link setting. 0 for auto-negotiation, and 1 for
* force.
* @flow_ctrl: Flow control setting. PHY_NO_FLOW_CTRL for no flow
* control, and PHY_FLOW_CTRL for flow control.
* PHY_TX_ONLY and PHY_RX_ONLY are not supported for 100
* Mbit PHY.
* @first_port: Index of first port this port supports.
* @mib_port_cnt: Number of ports with MIB counters.
* @port_cnt: Number of ports this port supports.
* @counter: Port statistics counter.
* @hw: Pointer to hardware structure.
* @linked: Pointer to port information linked to this port.
*/
struct ksz_port {
u8 duplex;
u8 speed;
u8 force_link;
u8 flow_ctrl;
int first_port;
int mib_port_cnt;
int port_cnt;
u64 counter[OID_COUNTER_LAST];
struct ksz_hw *hw;
struct ksz_port_info *linked;
};
/**
* struct ksz_timer_info - Timer information data structure
* @timer: Kernel timer.
* @cnt: Running timer counter.
* @max: Number of times to run timer; -1 for infinity.
* @period: Timer period in jiffies.
*/
struct ksz_timer_info {
struct timer_list timer;
int cnt;
int max;
int period;
};
/**
* struct ksz_shared_mem - OS dependent shared memory data structure
* @dma_addr: Physical DMA address allocated.
* @alloc_size: Allocation size.
* @phys: Actual physical address used.
* @alloc_virt: Virtual address allocated.
* @virt: Actual virtual address used.
*/
struct ksz_shared_mem {
dma_addr_t dma_addr;
uint alloc_size;
uint phys;
u8 *alloc_virt;
u8 *virt;
};
/**
* struct ksz_counter_info - OS dependent counter information data structure
* @counter: Wait queue to wakeup after counters are read.
* @time: Next time in jiffies to read counter.
* @read: Indication of counters read in full or not.
*/
struct ksz_counter_info {
wait_queue_head_t counter;
unsigned long time;
int read;
};
/**
* struct dev_info - Network device information data structure
* @dev: Pointer to network device.
* @pdev: Pointer to PCI device.
* @hw: Hardware structure.
* @desc_pool: Physical memory used for descriptor pool.
* @hwlock: Spinlock to prevent hardware from accessing.
* @lock: Mutex lock to prevent device from accessing.
* @dev_rcv: Receive process function used.
* @last_skb: Socket buffer allocated for descriptor rx fragments.
* @skb_index: Buffer index for receiving fragments.
* @skb_len: Buffer length for receiving fragments.
* @mib_read: Workqueue to read MIB counters.
* @mib_timer_info: Timer to read MIB counters.
* @counter: Used for MIB reading.
* @mtu: Current MTU used. The default is REGULAR_RX_BUF_SIZE;
* the maximum is MAX_RX_BUF_SIZE.
* @opened: Counter to keep track of device open.
* @rx_tasklet: Receive processing tasklet.
* @tx_tasklet: Transmit processing tasklet.
* @wol_enable: Wake-on-LAN enable set by ethtool.
* @wol_support: Wake-on-LAN support used by ethtool.
* @pme_wait: Used for KSZ8841 power management.
*/
struct dev_info {
struct net_device *dev;
struct pci_dev *pdev;
struct ksz_hw hw;
struct ksz_shared_mem desc_pool;
spinlock_t hwlock;
struct mutex lock;
int (*dev_rcv)(struct dev_info *);
struct sk_buff *last_skb;
int skb_index;
int skb_len;
struct work_struct mib_read;
struct ksz_timer_info mib_timer_info;
struct ksz_counter_info counter[TOTAL_PORT_NUM];
int mtu;
int opened;
struct tasklet_struct rx_tasklet;
struct tasklet_struct tx_tasklet;
int wol_enable;
int wol_support;
unsigned long pme_wait;
};
/**
* struct dev_priv - Network device private data structure
* @adapter: Adapter device information.
* @port: Port information.
* @monitor_time_info: Timer to monitor ports.
* @proc_sem: Semaphore for proc accessing.
* @id: Device ID.
* @mii_if: MII interface information.
* @advertising: Temporary variable to store advertised settings.
* @msg_enable: The message flags controlling driver output.
* @media_state: The connection status of the device.
* @multicast: The all multicast state of the device.
* @promiscuous: The promiscuous state of the device.
*/
struct dev_priv {
struct dev_info *adapter;
struct ksz_port port;
struct ksz_timer_info monitor_timer_info;
struct semaphore proc_sem;
int id;
struct mii_if_info mii_if;
u32 advertising;
u32 msg_enable;
int media_state;
int multicast;
int promiscuous;
};
#define DRV_NAME "KSZ884X PCI"
#define DEVICE_NAME "KSZ884x PCI"
#define DRV_VERSION "1.0.0"
#define DRV_RELDATE "Feb 8, 2010"
static char version[] __devinitdata =
"Micrel " DEVICE_NAME " " DRV_VERSION " (" DRV_RELDATE ")";
static u8 DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x88, 0x42, 0x01 };
/*
* Interrupt processing primary routines
*/
static inline void hw_ack_intr(struct ksz_hw *hw, uint interrupt)
{
writel(interrupt, hw->io + KS884X_INTERRUPTS_STATUS);
}
static inline void hw_dis_intr(struct ksz_hw *hw)
{
hw->intr_blocked = hw->intr_mask;
writel(0, hw->io + KS884X_INTERRUPTS_ENABLE);
hw->intr_set = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
}
static inline void hw_set_intr(struct ksz_hw *hw, uint interrupt)
{
hw->intr_set = interrupt;
writel(interrupt, hw->io + KS884X_INTERRUPTS_ENABLE);
}
static inline void hw_ena_intr(struct ksz_hw *hw)
{
hw->intr_blocked = 0;
hw_set_intr(hw, hw->intr_mask);
}
static inline void hw_dis_intr_bit(struct ksz_hw *hw, uint bit)
{
hw->intr_mask &= ~(bit);
}
static inline void hw_turn_off_intr(struct ksz_hw *hw, uint interrupt)
{
u32 read_intr;
read_intr = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
hw->intr_set = read_intr & ~interrupt;
writel(hw->intr_set, hw->io + KS884X_INTERRUPTS_ENABLE);
hw_dis_intr_bit(hw, interrupt);
}
/**
* hw_turn_on_intr - turn on specified interrupts
* @hw: The hardware instance.
* @bit: The interrupt bits to be on.
*
* This routine turns on the specified interrupts in the interrupt mask so that
* those interrupts will be enabled.
*/
static void hw_turn_on_intr(struct ksz_hw *hw, u32 bit)
{
hw->intr_mask |= bit;
if (!hw->intr_blocked)
hw_set_intr(hw, hw->intr_mask);
}
static inline void hw_ena_intr_bit(struct ksz_hw *hw, uint interrupt)
{
u32 read_intr;
read_intr = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
hw->intr_set = read_intr | interrupt;
writel(hw->intr_set, hw->io + KS884X_INTERRUPTS_ENABLE);
}
static inline void hw_read_intr(struct ksz_hw *hw, uint *status)
{
*status = readl(hw->io + KS884X_INTERRUPTS_STATUS);
*status = *status & hw->intr_set;
}
static inline void hw_restore_intr(struct ksz_hw *hw, uint interrupt)
{
if (interrupt)
hw_ena_intr(hw);
}
/**
* hw_block_intr - block hardware interrupts
*
* This function blocks all interrupts of the hardware and returns the current
* interrupt enable mask so that interrupts can be restored later.
*
* Return the current interrupt enable mask.
*/
static uint hw_block_intr(struct ksz_hw *hw)
{
uint interrupt = 0;
if (!hw->intr_blocked) {
hw_dis_intr(hw);
interrupt = hw->intr_blocked;
}
return interrupt;
}
/*
* Hardware descriptor routines
*/
static inline void reset_desc(struct ksz_desc *desc, union desc_stat status)
{
status.rx.hw_owned = 0;
desc->phw->ctrl.data = cpu_to_le32(status.data);
}
static inline void release_desc(struct ksz_desc *desc)
{
desc->sw.ctrl.tx.hw_owned = 1;
if (desc->sw.buf_size != desc->sw.buf.data) {
desc->sw.buf_size = desc->sw.buf.data;
desc->phw->buf.data = cpu_to_le32(desc->sw.buf.data);
}
desc->phw->ctrl.data = cpu_to_le32(desc->sw.ctrl.data);
}
static void get_rx_pkt(struct ksz_desc_info *info, struct ksz_desc **desc)
{
*desc = &info->ring[info->last];
info->last++;
info->last &= info->mask;
info->avail--;
(*desc)->sw.buf.data &= ~KS_DESC_RX_MASK;
}
static inline void set_rx_buf(struct ksz_desc *desc, u32 addr)
{
desc->phw->addr = cpu_to_le32(addr);
}
static inline void set_rx_len(struct ksz_desc *desc, u32 len)
{
desc->sw.buf.rx.buf_size = len;
}
static inline void get_tx_pkt(struct ksz_desc_info *info,
struct ksz_desc **desc)
{
*desc = &info->ring[info->next];
info->next++;
info->next &= info->mask;
info->avail--;
(*desc)->sw.buf.data &= ~KS_DESC_TX_MASK;
}
static inline void set_tx_buf(struct ksz_desc *desc, u32 addr)
{
desc->phw->addr = cpu_to_le32(addr);
}
static inline void set_tx_len(struct ksz_desc *desc, u32 len)
{
desc->sw.buf.tx.buf_size = len;
}
/* Switch functions */
#define TABLE_READ 0x10
#define TABLE_SEL_SHIFT 2
#define HW_DELAY(hw, reg) \
do { \
u16 dummy; \
dummy = readw(hw->io + reg); \
} while (0)
/**
* sw_r_table - read 4 bytes of data from switch table
* @hw: The hardware instance.
* @table: The table selector.
* @addr: The address of the table entry.
* @data: Buffer to store the read data.
*
* This routine reads 4 bytes of data from the table of the switch.
* Hardware interrupts are disabled to minimize corruption of read data.
*/
static void sw_r_table(struct ksz_hw *hw, int table, u16 addr, u32 *data)
{
u16 ctrl_addr;
uint interrupt;
ctrl_addr = (((table << TABLE_SEL_SHIFT) | TABLE_READ) << 8) | addr;
interrupt = hw_block_intr(hw);
writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
HW_DELAY(hw, KS884X_IACR_OFFSET);
*data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
hw_restore_intr(hw, interrupt);
}
/**
* sw_w_table_64 - write 8 bytes of data to the switch table
* @hw: The hardware instance.
* @table: The table selector.
* @addr: The address of the table entry.
* @data_hi: The high part of data to be written (bit63 ~ bit32).
* @data_lo: The low part of data to be written (bit31 ~ bit0).
*
* This routine writes 8 bytes of data to the table of the switch.
* Hardware interrupts are disabled to minimize corruption of written data.
*/
static void sw_w_table_64(struct ksz_hw *hw, int table, u16 addr, u32 data_hi,
u32 data_lo)
{
u16 ctrl_addr;
uint interrupt;
ctrl_addr = ((table << TABLE_SEL_SHIFT) << 8) | addr;
interrupt = hw_block_intr(hw);
writel(data_hi, hw->io + KS884X_ACC_DATA_4_OFFSET);
writel(data_lo, hw->io + KS884X_ACC_DATA_0_OFFSET);
writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
HW_DELAY(hw, KS884X_IACR_OFFSET);
hw_restore_intr(hw, interrupt);
}
/**
* sw_w_sta_mac_table - write to the static MAC table
* @hw: The hardware instance.
* @addr: The address of the table entry.
* @mac_addr: The MAC address.
* @ports: The port members.
* @override: The flag to override the port receive/transmit settings.
* @valid: The flag to indicate entry is valid.
* @use_fid: The flag to indicate the FID is valid.
* @fid: The FID value.
*
* This routine writes an entry of the static MAC table of the switch. It
* calls sw_w_table_64() to write the data.
*/
static void sw_w_sta_mac_table(struct ksz_hw *hw, u16 addr, u8 *mac_addr,
u8 ports, int override, int valid, int use_fid, u8 fid)
{
u32 data_hi;
u32 data_lo;
data_lo = ((u32) mac_addr[2] << 24) |
((u32) mac_addr[3] << 16) |
((u32) mac_addr[4] << 8) | mac_addr[5];
data_hi = ((u32) mac_addr[0] << 8) | mac_addr[1];
data_hi |= (u32) ports << STATIC_MAC_FWD_PORTS_SHIFT;
if (override)
data_hi |= STATIC_MAC_TABLE_OVERRIDE;
if (use_fid) {
data_hi |= STATIC_MAC_TABLE_USE_FID;
data_hi |= (u32) fid << STATIC_MAC_FID_SHIFT;
}
if (valid)
data_hi |= STATIC_MAC_TABLE_VALID;
sw_w_table_64(hw, TABLE_STATIC_MAC, addr, data_hi, data_lo);
}
/**
* sw_r_vlan_table - read from the VLAN table
* @hw: The hardware instance.
* @addr: The address of the table entry.
* @vid: Buffer to store the VID.
* @fid: Buffer to store the VID.
* @member: Buffer to store the port membership.
*
* This function reads an entry of the VLAN table of the switch. It calls
* sw_r_table() to get the data.
*
* Return 0 if the entry is valid; otherwise -1.
*/
static int sw_r_vlan_table(struct ksz_hw *hw, u16 addr, u16 *vid, u8 *fid,
u8 *member)
{
u32 data;
sw_r_table(hw, TABLE_VLAN, addr, &data);
if (data & VLAN_TABLE_VALID) {
*vid = (u16)(data & VLAN_TABLE_VID);
*fid = (u8)((data & VLAN_TABLE_FID) >> VLAN_TABLE_FID_SHIFT);
*member = (u8)((data & VLAN_TABLE_MEMBERSHIP) >>
VLAN_TABLE_MEMBERSHIP_SHIFT);
return 0;
}
return -1;
}
/**
* port_r_mib_cnt - read MIB counter
* @hw: The hardware instance.
* @port: The port index.
* @addr: The address of the counter.
* @cnt: Buffer to store the counter.
*
* This routine reads a MIB counter of the port.
* Hardware interrupts are disabled to minimize corruption of read data.
*/
static void port_r_mib_cnt(struct ksz_hw *hw, int port, u16 addr, u64 *cnt)
{
u32 data;
u16 ctrl_addr;
uint interrupt;
int timeout;
ctrl_addr = addr + PORT_COUNTER_NUM * port;
interrupt = hw_block_intr(hw);
ctrl_addr |= (((TABLE_MIB << TABLE_SEL_SHIFT) | TABLE_READ) << 8);
writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
HW_DELAY(hw, KS884X_IACR_OFFSET);
for (timeout = 100; timeout > 0; timeout--) {
data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
if (data & MIB_COUNTER_VALID) {
if (data & MIB_COUNTER_OVERFLOW)
*cnt += MIB_COUNTER_VALUE + 1;
*cnt += data & MIB_COUNTER_VALUE;
break;
}
}
hw_restore_intr(hw, interrupt);
}
/**
* port_r_mib_pkt - read dropped packet counts
* @hw: The hardware instance.
* @port: The port index.
* @cnt: Buffer to store the receive and transmit dropped packet counts.
*
* This routine reads the dropped packet counts of the port.
* Hardware interrupts are disabled to minimize corruption of read data.
*/
static void port_r_mib_pkt(struct ksz_hw *hw, int port, u32 *last, u64 *cnt)
{
u32 cur;
u32 data;
u16 ctrl_addr;
uint interrupt;
int index;
index = KS_MIB_PACKET_DROPPED_RX_0 + port;
do {
interrupt = hw_block_intr(hw);
ctrl_addr = (u16) index;
ctrl_addr |= (((TABLE_MIB << TABLE_SEL_SHIFT) | TABLE_READ)
<< 8);
writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
HW_DELAY(hw, KS884X_IACR_OFFSET);
data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
hw_restore_intr(hw, interrupt);
data &= MIB_PACKET_DROPPED;
cur = *last;
if (data != cur) {
*last = data;
if (data < cur)
data += MIB_PACKET_DROPPED + 1;
data -= cur;
*cnt += data;
}
++last;
++cnt;
index -= KS_MIB_PACKET_DROPPED_TX -
KS_MIB_PACKET_DROPPED_TX_0 + 1;
} while (index >= KS_MIB_PACKET_DROPPED_TX_0 + port);
}
/**
* port_r_cnt - read MIB counters periodically
* @hw: The hardware instance.
* @port: The port index.
*
* This routine is used to read the counters of the port periodically to avoid
* counter overflow. The hardware should be acquired first before calling this
* routine.
*
* Return non-zero when not all counters not read.
*/
static int port_r_cnt(struct ksz_hw *hw, int port)
{
struct ksz_port_mib *mib = &hw->port_mib[port];
if (mib->mib_start < PORT_COUNTER_NUM)
while (mib->cnt_ptr < PORT_COUNTER_NUM) {
port_r_mib_cnt(hw, port, mib->cnt_ptr,
&mib->counter[mib->cnt_ptr]);
++mib->cnt_ptr;
}
if (hw->mib_cnt > PORT_COUNTER_NUM)
port_r_mib_pkt(hw, port, mib->dropped,
&mib->counter[PORT_COUNTER_NUM]);
mib->cnt_ptr = 0;
return 0;
}
/**
* port_init_cnt - initialize MIB counter values
* @hw: The hardware instance.
* @port: The port index.
*
* This routine is used to initialize all counters to zero if the hardware
* cannot do it after reset.
*/
static void port_init_cnt(struct ksz_hw *hw, int port)
{
struct ksz_port_mib *mib = &hw->port_mib[port];
mib->cnt_ptr = 0;
if (mib->mib_start < PORT_COUNTER_NUM)
do {
port_r_mib_cnt(hw, port, mib->cnt_ptr,
&mib->counter[mib->cnt_ptr]);
++mib->cnt_ptr;
} while (mib->cnt_ptr < PORT_COUNTER_NUM);
if (hw->mib_cnt > PORT_COUNTER_NUM)
port_r_mib_pkt(hw, port, mib->dropped,
&mib->counter[PORT_COUNTER_NUM]);
memset((void *) mib->counter, 0, sizeof(u64) * TOTAL_PORT_COUNTER_NUM);
mib->cnt_ptr = 0;
}
/*
* Port functions
*/
/**
* port_chk - check port register bits
* @hw: The hardware instance.
* @port: The port index.
* @offset: The offset of the port register.
* @bits: The data bits to check.
*
* This function checks whether the specified bits of the port register are set
* or not.
*
* Return 0 if the bits are not set.
*/
static int port_chk(struct ksz_hw *hw, int port, int offset, u16 bits)
{
u32 addr;
u16 data;
PORT_CTRL_ADDR(port, addr);
addr += offset;
data = readw(hw->io + addr);
return (data & bits) == bits;
}
/**
* port_cfg - set port register bits
* @hw: The hardware instance.
* @port: The port index.
* @offset: The offset of the port register.
* @bits: The data bits to set.
* @set: The flag indicating whether the bits are to be set or not.
*
* This routine sets or resets the specified bits of the port register.
*/
static void port_cfg(struct ksz_hw *hw, int port, int offset, u16 bits,
int set)
{
u32 addr;
u16 data;
PORT_CTRL_ADDR(port, addr);
addr += offset;
data = readw(hw->io + addr);
if (set)
data |= bits;
else
data &= ~bits;
writew(data, hw->io + addr);
}
/**
* port_chk_shift - check port bit
* @hw: The hardware instance.
* @port: The port index.
* @offset: The offset of the register.
* @shift: Number of bits to shift.
*
* This function checks whether the specified port is set in the register or
* not.
*
* Return 0 if the port is not set.
*/
static int port_chk_shift(struct ksz_hw *hw, int port, u32 addr, int shift)
{
u16 data;
u16 bit = 1 << port;
data = readw(hw->io + addr);
data >>= shift;
return (data & bit) == bit;
}
/**
* port_cfg_shift - set port bit
* @hw: The hardware instance.
* @port: The port index.
* @offset: The offset of the register.
* @shift: Number of bits to shift.
* @set: The flag indicating whether the port is to be set or not.
*
* This routine sets or resets the specified port in the register.
*/
static void port_cfg_shift(struct ksz_hw *hw, int port, u32 addr, int shift,
int set)
{
u16 data;
u16 bits = 1 << port;
data = readw(hw->io + addr);
bits <<= shift;
if (set)
data |= bits;
else
data &= ~bits;
writew(data, hw->io + addr);
}
/**
* port_r8 - read byte from port register
* @hw: The hardware instance.
* @port: The port index.
* @offset: The offset of the port register.
* @data: Buffer to store the data.
*
* This routine reads a byte from the port register.
*/
static void port_r8(struct ksz_hw *hw, int port, int offset, u8 *data)
{
u32 addr;
PORT_CTRL_ADDR(port, addr);
addr += offset;
*data = readb(hw->io + addr);
}
/**
* port_r16 - read word from port register.
* @hw: The hardware instance.
* @port: The port index.
* @offset: The offset of the port register.
* @data: Buffer to store the data.
*
* This routine reads a word from the port register.
*/
static void port_r16(struct ksz_hw *hw, int port, int offset, u16 *data)
{
u32 addr;
PORT_CTRL_ADDR(port, addr);
addr += offset;
*data = readw(hw->io + addr);
}
/**
* port_w16 - write word to port register.
* @hw: The hardware instance.
* @port: The port index.
* @offset: The offset of the port register.
* @data: Data to write.
*
* This routine writes a word to the port register.
*/
static void port_w16(struct ksz_hw *hw, int port, int offset, u16 data)
{
u32 addr;
PORT_CTRL_ADDR(port, addr);
addr += offset;
writew(data, hw->io + addr);
}
/**
* sw_chk - check switch register bits
* @hw: The hardware instance.
* @addr: The address of the switch register.
* @bits: The data bits to check.
*
* This function checks whether the specified bits of the switch register are
* set or not.
*
* Return 0 if the bits are not set.
*/
static int sw_chk(struct ksz_hw *hw, u32 addr, u16 bits)
{
u16 data;
data = readw(hw->io + addr);
return (data & bits) == bits;
}
/**
* sw_cfg - set switch register bits
* @hw: The hardware instance.
* @addr: The address of the switch register.
* @bits: The data bits to set.
* @set: The flag indicating whether the bits are to be set or not.
*
* This function sets or resets the specified bits of the switch register.
*/
static void sw_cfg(struct ksz_hw *hw, u32 addr, u16 bits, int set)
{
u16 data;
data = readw(hw->io + addr);
if (set)
data |= bits;
else
data &= ~bits;
writew(data, hw->io + addr);
}
/* Bandwidth */
static inline void port_cfg_broad_storm(struct ksz_hw *hw, int p, int set)
{
port_cfg(hw, p,
KS8842_PORT_CTRL_1_OFFSET, PORT_BROADCAST_STORM, set);
}
static inline int port_chk_broad_storm(struct ksz_hw *hw, int p)
{
return port_chk(hw, p,
KS8842_PORT_CTRL_1_OFFSET, PORT_BROADCAST_STORM);
}
/* Driver set switch broadcast storm protection at 10% rate. */
#define BROADCAST_STORM_PROTECTION_RATE 10
/* 148,800 frames * 67 ms / 100 */
#define BROADCAST_STORM_VALUE 9969
/**
* sw_cfg_broad_storm - configure broadcast storm threshold
* @hw: The hardware instance.
* @percent: Broadcast storm threshold in percent of transmit rate.
*
* This routine configures the broadcast storm threshold of the switch.
*/
static void sw_cfg_broad_storm(struct ksz_hw *hw, u8 percent)
{
u16 data;
u32 value = ((u32) BROADCAST_STORM_VALUE * (u32) percent / 100);
if (value > BROADCAST_STORM_RATE)
value = BROADCAST_STORM_RATE;
data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
data &= ~(BROADCAST_STORM_RATE_LO | BROADCAST_STORM_RATE_HI);
data |= ((value & 0x00FF) << 8) | ((value & 0xFF00) >> 8);
writew(data, hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
}
/**
* sw_get_board_storm - get broadcast storm threshold
* @hw: The hardware instance.
* @percent: Buffer to store the broadcast storm threshold percentage.
*
* This routine retrieves the broadcast storm threshold of the switch.
*/
static void sw_get_broad_storm(struct ksz_hw *hw, u8 *percent)
{
int num;
u16 data;
data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
num = (data & BROADCAST_STORM_RATE_HI);
num <<= 8;
num |= (data & BROADCAST_STORM_RATE_LO) >> 8;
num = (num * 100 + BROADCAST_STORM_VALUE / 2) / BROADCAST_STORM_VALUE;
*percent = (u8) num;
}
/**
* sw_dis_broad_storm - disable broadstorm
* @hw: The hardware instance.
* @port: The port index.
*
* This routine disables the broadcast storm limit function of the switch.
*/
static void sw_dis_broad_storm(struct ksz_hw *hw, int port)
{
port_cfg_broad_storm(hw, port, 0);
}
/**
* sw_ena_broad_storm - enable broadcast storm
* @hw: The hardware instance.
* @port: The port index.
*
* This routine enables the broadcast storm limit function of the switch.
*/
static void sw_ena_broad_storm(struct ksz_hw *hw, int port)
{
sw_cfg_broad_storm(hw, hw->ksz_switch->broad_per);
port_cfg_broad_storm(hw, port, 1);
}
/**
* sw_init_broad_storm - initialize broadcast storm
* @hw: The hardware instance.
*
* This routine initializes the broadcast storm limit function of the switch.
*/
static void sw_init_broad_storm(struct ksz_hw *hw)
{
int port;
hw->ksz_switch->broad_per = 1;
sw_cfg_broad_storm(hw, hw->ksz_switch->broad_per);
for (port = 0; port < TOTAL_PORT_NUM; port++)
sw_dis_broad_storm(hw, port);
sw_cfg(hw, KS8842_SWITCH_CTRL_2_OFFSET, MULTICAST_STORM_DISABLE, 1);
}
/**
* hw_cfg_broad_storm - configure broadcast storm
* @hw: The hardware instance.
* @percent: Broadcast storm threshold in percent of transmit rate.
*
* This routine configures the broadcast storm threshold of the switch.
* It is called by user functions. The hardware should be acquired first.
*/
static void hw_cfg_broad_storm(struct ksz_hw *hw, u8 percent)
{
if (percent > 100)
percent = 100;
sw_cfg_broad_storm(hw, percent);
sw_get_broad_storm(hw, &percent);
hw->ksz_switch->broad_per = percent;
}
/**
* sw_dis_prio_rate - disable switch priority rate
* @hw: The hardware instance.
* @port: The port index.
*
* This routine disables the priority rate function of the switch.
*/
static void sw_dis_prio_rate(struct ksz_hw *hw, int port)
{
u32 addr;
PORT_CTRL_ADDR(port, addr);
addr += KS8842_PORT_IN_RATE_OFFSET;
writel(0, hw->io + addr);
}
/**
* sw_init_prio_rate - initialize switch prioirty rate
* @hw: The hardware instance.
*
* This routine initializes the priority rate function of the switch.
*/
static void sw_init_prio_rate(struct ksz_hw *hw)
{
int port;
int prio;
struct ksz_switch *sw = hw->ksz_switch;
for (port = 0; port < TOTAL_PORT_NUM; port++) {
for (prio = 0; prio < PRIO_QUEUES; prio++) {
sw->port_cfg[port].rx_rate[prio] =
sw->port_cfg[port].tx_rate[prio] = 0;
}
sw_dis_prio_rate(hw, port);
}
}
/* Communication */
static inline void port_cfg_back_pressure(struct ksz_hw *hw, int p, int set)
{
port_cfg(hw, p,
KS8842_PORT_CTRL_2_OFFSET, PORT_BACK_PRESSURE, set);
}
static inline void port_cfg_force_flow_ctrl(struct ksz_hw *hw, int p, int set)
{
port_cfg(hw, p,
KS8842_PORT_CTRL_2_OFFSET, PORT_FORCE_FLOW_CTRL, set);
}
static inline int port_chk_back_pressure(struct ksz_hw *hw, int p)
{
return port_chk(hw, p,
KS8842_PORT_CTRL_2_OFFSET, PORT_BACK_PRESSURE);
}
static inline int port_chk_force_flow_ctrl(struct ksz_hw *hw, int p)
{
return port_chk(hw, p,
KS8842_PORT_CTRL_2_OFFSET, PORT_FORCE_FLOW_CTRL);
}
/* Spanning Tree */
static inline void port_cfg_dis_learn(struct ksz_hw *hw, int p, int set)
{
port_cfg(hw, p,
KS8842_PORT_CTRL_2_OFFSET, PORT_LEARN_DISABLE, set);
}
static inline void port_cfg_rx(struct ksz_hw *hw, int p, int set)
{
port_cfg(hw, p,
KS8842_PORT_CTRL_2_OFFSET, PORT_RX_ENABLE, set);
}
static inline void port_cfg_tx(struct ksz_hw *hw, int p, int set)
{
port_cfg(hw, p,
KS8842_PORT_CTRL_2_OFFSET, PORT_TX_ENABLE, set);
}
static inline void sw_cfg_fast_aging(struct ksz_hw *hw, int set)
{
sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET, SWITCH_FAST_AGING, set);
}
static inline void sw_flush_dyn_mac_table(struct ksz_hw *hw)
{
if (!(hw->overrides & FAST_AGING)) {
sw_cfg_fast_aging(hw, 1);
mdelay(1);
sw_cfg_fast_aging(hw, 0);
}
}
/* VLAN */
static inline void port_cfg_ins_tag(struct ksz_hw *hw, int p, int insert)
{
port_cfg(hw, p,
KS8842_PORT_CTRL_1_OFFSET, PORT_INSERT_TAG, insert);
}
static inline void port_cfg_rmv_tag(struct ksz_hw *hw, int p, int remove)
{
port_cfg(hw, p,
KS8842_PORT_CTRL_1_OFFSET, PORT_REMOVE_TAG, remove);
}
static inline int port_chk_ins_tag(struct ksz_hw *hw, int p)
{
return port_chk(hw, p,
KS8842_PORT_CTRL_1_OFFSET, PORT_INSERT_TAG);
}
static inline int port_chk_rmv_tag(struct ksz_hw *hw, int p)
{
return port_chk(hw, p,
KS8842_PORT_CTRL_1_OFFSET, PORT_REMOVE_TAG);
}
static inline void port_cfg_dis_non_vid(struct ksz_hw *hw, int p, int set)
{
port_cfg(hw, p,
KS8842_PORT_CTRL_2_OFFSET, PORT_DISCARD_NON_VID, set);
}
static inline void port_cfg_in_filter(struct ksz_hw *hw, int p, int set)
{
port_cfg(hw, p,
KS8842_PORT_CTRL_2_OFFSET, PORT_INGRESS_VLAN_FILTER, set);
}
static inline int port_chk_dis_non_vid(struct ksz_hw *hw, int p)
{
return port_chk(hw, p,
KS8842_PORT_CTRL_2_OFFSET, PORT_DISCARD_NON_VID);
}
static inline int port_chk_in_filter(struct ksz_hw *hw, int p)
{
return port_chk(hw, p,
KS8842_PORT_CTRL_2_OFFSET, PORT_INGRESS_VLAN_FILTER);
}
/* Mirroring */
static inline void port_cfg_mirror_sniffer(struct ksz_hw *hw, int p, int set)
{
port_cfg(hw, p,
KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_SNIFFER, set);
}
static inline void port_cfg_mirror_rx(struct ksz_hw *hw, int p, int set)
{
port_cfg(hw, p,
KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_RX, set);
}
static inline void port_cfg_mirror_tx(struct ksz_hw *hw, int p, int set)
{
port_cfg(hw, p,
KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_TX, set);
}
static inline void sw_cfg_mirror_rx_tx(struct ksz_hw *hw, int set)
{
sw_cfg(hw, KS8842_SWITCH_CTRL_2_OFFSET, SWITCH_MIRROR_RX_TX, set);
}
static void sw_init_mirror(struct ksz_hw *hw)
{
int port;
for (port = 0; port < TOTAL_PORT_NUM; port++) {
port_cfg_mirror_sniffer(hw, port, 0);
port_cfg_mirror_rx(hw, port, 0);
port_cfg_mirror_tx(hw, port, 0);
}
sw_cfg_mirror_rx_tx(hw, 0);
}
static inline void sw_cfg_unk_def_deliver(struct ksz_hw *hw, int set)
{
sw_cfg(hw, KS8842_SWITCH_CTRL_7_OFFSET,
SWITCH_UNK_DEF_PORT_ENABLE, set);
}
static inline int sw_cfg_chk_unk_def_deliver(struct ksz_hw *hw)
{
return sw_chk(hw, KS8842_SWITCH_CTRL_7_OFFSET,
SWITCH_UNK_DEF_PORT_ENABLE);
}
static inline void sw_cfg_unk_def_port(struct ksz_hw *hw, int port, int set)
{
port_cfg_shift(hw, port, KS8842_SWITCH_CTRL_7_OFFSET, 0, set);
}
static inline int sw_chk_unk_def_port(struct ksz_hw *hw, int port)
{
return port_chk_shift(hw, port, KS8842_SWITCH_CTRL_7_OFFSET, 0);
}
/* Priority */
static inline void port_cfg_diffserv(struct ksz_hw *hw, int p, int set)
{
port_cfg(hw, p,
KS8842_PORT_CTRL_1_OFFSET, PORT_DIFFSERV_ENABLE, set);
}
static inline void port_cfg_802_1p(struct ksz_hw *hw, int p, int set)
{
port_cfg(hw, p,
KS8842_PORT_CTRL_1_OFFSET, PORT_802_1P_ENABLE, set);
}
static inline void port_cfg_replace_vid(struct ksz_hw *hw, int p, int set)
{
port_cfg(hw, p,
KS8842_PORT_CTRL_2_OFFSET, PORT_USER_PRIORITY_CEILING, set);
}
static inline void port_cfg_prio(struct ksz_hw *hw, int p, int set)
{
port_cfg(hw, p,
KS8842_PORT_CTRL_1_OFFSET, PORT_PRIO_QUEUE_ENABLE, set);
}
static inline int port_chk_diffserv(struct ksz_hw *hw, int p)
{
return port_chk(hw, p,
KS8842_PORT_CTRL_1_OFFSET, PORT_DIFFSERV_ENABLE);
}
static inline int port_chk_802_1p(struct ksz_hw *hw, int p)
{
return port_chk(hw, p,
KS8842_PORT_CTRL_1_OFFSET, PORT_802_1P_ENABLE);
}
static inline int port_chk_replace_vid(struct ksz_hw *hw, int p)
{
return port_chk(hw, p,
KS8842_PORT_CTRL_2_OFFSET, PORT_USER_PRIORITY_CEILING);
}
static inline int port_chk_prio(struct ksz_hw *hw, int p)
{
return port_chk(hw, p,
KS8842_PORT_CTRL_1_OFFSET, PORT_PRIO_QUEUE_ENABLE);
}
/**
* sw_dis_diffserv - disable switch DiffServ priority
* @hw: The hardware instance.
* @port: The port index.
*
* This routine disables the DiffServ priority function of the switch.
*/
static void sw_dis_diffserv(struct ksz_hw *hw, int port)
{
port_cfg_diffserv(hw, port, 0);
}
/**
* sw_dis_802_1p - disable switch 802.1p priority
* @hw: The hardware instance.
* @port: The port index.
*
* This routine disables the 802.1p priority function of the switch.
*/
static void sw_dis_802_1p(struct ksz_hw *hw, int port)
{
port_cfg_802_1p(hw, port, 0);
}
/**
* sw_cfg_replace_null_vid -
* @hw: The hardware instance.
* @set: The flag to disable or enable.
*
*/
static void sw_cfg_replace_null_vid(struct ksz_hw *hw, int set)
{
sw_cfg(hw, KS8842_SWITCH_CTRL_3_OFFSET, SWITCH_REPLACE_NULL_VID, set);
}
/**
* sw_cfg_replace_vid - enable switch 802.10 priority re-mapping
* @hw: The hardware instance.
* @port: The port index.
* @set: The flag to disable or enable.
*
* This routine enables the 802.1p priority re-mapping function of the switch.
* That allows 802.1p priority field to be replaced with the port's default
* tag's priority value if the ingress packet's 802.1p priority has a higher
* priority than port's default tag's priority.
*/
static void sw_cfg_replace_vid(struct ksz_hw *hw, int port, int set)
{
port_cfg_replace_vid(hw, port, set);
}
/**
* sw_cfg_port_based - configure switch port based priority
* @hw: The hardware instance.
* @port: The port index.
* @prio: The priority to set.
*
* This routine configures the port based priority of the switch.
*/
static void sw_cfg_port_based(struct ksz_hw *hw, int port, u8 prio)
{
u16 data;
if (prio > PORT_BASED_PRIORITY_BASE)
prio = PORT_BASED_PRIORITY_BASE;
hw->ksz_switch->port_cfg[port].port_prio = prio;
port_r16(hw, port, KS8842_PORT_CTRL_1_OFFSET, &data);
data &= ~PORT_BASED_PRIORITY_MASK;
data |= prio << PORT_BASED_PRIORITY_SHIFT;
port_w16(hw, port, KS8842_PORT_CTRL_1_OFFSET, data);
}
/**
* sw_dis_multi_queue - disable transmit multiple queues
* @hw: The hardware instance.
* @port: The port index.
*
* This routine disables the transmit multiple queues selection of the switch
* port. Only single transmit queue on the port.
*/
static void sw_dis_multi_queue(struct ksz_hw *hw, int port)
{
port_cfg_prio(hw, port, 0);
}
/**
* sw_init_prio - initialize switch priority
* @hw: The hardware instance.
*
* This routine initializes the switch QoS priority functions.
*/
static void sw_init_prio(struct ksz_hw *hw)
{
int port;
int tos;
struct ksz_switch *sw = hw->ksz_switch;
/*
* Init all the 802.1p tag priority value to be assigned to different
* priority queue.
*/
sw->p_802_1p[0] = 0;
sw->p_802_1p[1] = 0;
sw->p_802_1p[2] = 1;
sw->p_802_1p[3] = 1;
sw->p_802_1p[4] = 2;
sw->p_802_1p[5] = 2;
sw->p_802_1p[6] = 3;
sw->p_802_1p[7] = 3;
/*
* Init all the DiffServ priority value to be assigned to priority
* queue 0.
*/
for (tos = 0; tos < DIFFSERV_ENTRIES; tos++)
sw->diffserv[tos] = 0;
/* All QoS functions disabled. */
for (port = 0; port < TOTAL_PORT_NUM; port++) {
sw_dis_multi_queue(hw, port);
sw_dis_diffserv(hw, port);
sw_dis_802_1p(hw, port);
sw_cfg_replace_vid(hw, port, 0);
sw->port_cfg[port].port_prio = 0;
sw_cfg_port_based(hw, port, sw->port_cfg[port].port_prio);
}
sw_cfg_replace_null_vid(hw, 0);
}
/**
* port_get_def_vid - get port default VID.
* @hw: The hardware instance.
* @port: The port index.
* @vid: Buffer to store the VID.
*
* This routine retrieves the default VID of the port.
*/
static void port_get_def_vid(struct ksz_hw *hw, int port, u16 *vid)
{
u32 addr;
PORT_CTRL_ADDR(port, addr);
addr += KS8842_PORT_CTRL_VID_OFFSET;
*vid = readw(hw->io + addr);
}
/**
* sw_init_vlan - initialize switch VLAN
* @hw: The hardware instance.
*
* This routine initializes the VLAN function of the switch.
*/
static void sw_init_vlan(struct ksz_hw *hw)
{
int port;
int entry;
struct ksz_switch *sw = hw->ksz_switch;
/* Read 16 VLAN entries from device's VLAN table. */
for (entry = 0; entry < VLAN_TABLE_ENTRIES; entry++) {
sw_r_vlan_table(hw, entry,
&sw->vlan_table[entry].vid,
&sw->vlan_table[entry].fid,
&sw->vlan_table[entry].member);
}
for (port = 0; port < TOTAL_PORT_NUM; port++) {
port_get_def_vid(hw, port, &sw->port_cfg[port].vid);
sw->port_cfg[port].member = PORT_MASK;
}
}
/**
* sw_cfg_port_base_vlan - configure port-based VLAN membership
* @hw: The hardware instance.
* @port: The port index.
* @member: The port-based VLAN membership.
*
* This routine configures the port-based VLAN membership of the port.
*/
static void sw_cfg_port_base_vlan(struct ksz_hw *hw, int port, u8 member)
{
u32 addr;
u8 data;
PORT_CTRL_ADDR(port, addr);
addr += KS8842_PORT_CTRL_2_OFFSET;
data = readb(hw->io + addr);
data &= ~PORT_VLAN_MEMBERSHIP;
data |= (member & PORT_MASK);
writeb(data, hw->io + addr);
hw->ksz_switch->port_cfg[port].member = member;
}
/**
* sw_get_addr - get the switch MAC address.
* @hw: The hardware instance.
* @mac_addr: Buffer to store the MAC address.
*
* This function retrieves the MAC address of the switch.
*/
static inline void sw_get_addr(struct ksz_hw *hw, u8 *mac_addr)
{
int i;
for (i = 0; i < 6; i += 2) {
mac_addr[i] = readb(hw->io + KS8842_MAC_ADDR_0_OFFSET + i);
mac_addr[1 + i] = readb(hw->io + KS8842_MAC_ADDR_1_OFFSET + i);
}
}
/**
* sw_set_addr - configure switch MAC address
* @hw: The hardware instance.
* @mac_addr: The MAC address.
*
* This function configures the MAC address of the switch.
*/
static void sw_set_addr(struct ksz_hw *hw, u8 *mac_addr)
{
int i;
for (i = 0; i < 6; i += 2) {
writeb(mac_addr[i], hw->io + KS8842_MAC_ADDR_0_OFFSET + i);
writeb(mac_addr[1 + i], hw->io + KS8842_MAC_ADDR_1_OFFSET + i);
}
}
/**
* sw_set_global_ctrl - set switch global control
* @hw: The hardware instance.
*
* This routine sets the global control of the switch function.
*/
static void sw_set_global_ctrl(struct ksz_hw *hw)
{
u16 data;
/* Enable switch MII flow control. */
data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
data |= SWITCH_FLOW_CTRL;
writew(data, hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
data = readw(hw->io + KS8842_SWITCH_CTRL_1_OFFSET);
/* Enable aggressive back off algorithm in half duplex mode. */
data |= SWITCH_AGGR_BACKOFF;
/* Enable automatic fast aging when link changed detected. */
data |= SWITCH_AGING_ENABLE;
data |= SWITCH_LINK_AUTO_AGING;
if (hw->overrides & FAST_AGING)
data |= SWITCH_FAST_AGING;
else
data &= ~SWITCH_FAST_AGING;
writew(data, hw->io + KS8842_SWITCH_CTRL_1_OFFSET);
data = readw(hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
/* Enable no excessive collision drop. */
data |= NO_EXC_COLLISION_DROP;
writew(data, hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
}
enum {
STP_STATE_DISABLED = 0,
STP_STATE_LISTENING,
STP_STATE_LEARNING,
STP_STATE_FORWARDING,
STP_STATE_BLOCKED,
STP_STATE_SIMPLE
};
/**
* port_set_stp_state - configure port spanning tree state
* @hw: The hardware instance.
* @port: The port index.
* @state: The spanning tree state.
*
* This routine configures the spanning tree state of the port.
*/
static void port_set_stp_state(struct ksz_hw *hw, int port, int state)
{
u16 data;
port_r16(hw, port, KS8842_PORT_CTRL_2_OFFSET, &data);
switch (state) {
case STP_STATE_DISABLED:
data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE);
data |= PORT_LEARN_DISABLE;
break;
case STP_STATE_LISTENING:
/*
* No need to turn on transmit because of port direct mode.
* Turning on receive is required if static MAC table is not setup.
*/
data &= ~PORT_TX_ENABLE;
data |= PORT_RX_ENABLE;
data |= PORT_LEARN_DISABLE;
break;
case STP_STATE_LEARNING:
data &= ~PORT_TX_ENABLE;
data |= PORT_RX_ENABLE;
data &= ~PORT_LEARN_DISABLE;
break;
case STP_STATE_FORWARDING:
data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
data &= ~PORT_LEARN_DISABLE;
break;
case STP_STATE_BLOCKED:
/*
* Need to setup static MAC table with override to keep receiving BPDU
* messages. See sw_init_stp routine.
*/
data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE);
data |= PORT_LEARN_DISABLE;
break;
case STP_STATE_SIMPLE:
data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
data |= PORT_LEARN_DISABLE;
break;
}
port_w16(hw, port, KS8842_PORT_CTRL_2_OFFSET, data);
hw->ksz_switch->port_cfg[port].stp_state = state;
}
#define STP_ENTRY 0
#define BROADCAST_ENTRY 1
#define BRIDGE_ADDR_ENTRY 2
#define IPV6_ADDR_ENTRY 3
/**
* sw_clr_sta_mac_table - clear static MAC table
* @hw: The hardware instance.
*
* This routine clears the static MAC table.
*/
static void sw_clr_sta_mac_table(struct ksz_hw *hw)
{
struct ksz_mac_table *entry;
int i;
for (i = 0; i < STATIC_MAC_TABLE_ENTRIES; i++) {
entry = &hw->ksz_switch->mac_table[i];
sw_w_sta_mac_table(hw, i,
entry->mac_addr, entry->ports,
entry->override, 0,
entry->use_fid, entry->fid);
}
}
/**
* sw_init_stp - initialize switch spanning tree support
* @hw: The hardware instance.
*
* This routine initializes the spanning tree support of the switch.
*/
static void sw_init_stp(struct ksz_hw *hw)
{
struct ksz_mac_table *entry;
entry = &hw->ksz_switch->mac_table[STP_ENTRY];
entry->mac_addr[0] = 0x01;
entry->mac_addr[1] = 0x80;
entry->mac_addr[2] = 0xC2;
entry->mac_addr[3] = 0x00;
entry->mac_addr[4] = 0x00;
entry->mac_addr[5] = 0x00;
entry->ports = HOST_MASK;
entry->override = 1;
entry->valid = 1;
sw_w_sta_mac_table(hw, STP_ENTRY,
entry->mac_addr, entry->ports,
entry->override, entry->valid,
entry->use_fid, entry->fid);
}
/**
* sw_block_addr - block certain packets from the host port
* @hw: The hardware instance.
*
* This routine blocks certain packets from reaching to the host port.
*/
static void sw_block_addr(struct ksz_hw *hw)
{
struct ksz_mac_table *entry;
int i;
for (i = BROADCAST_ENTRY; i <= IPV6_ADDR_ENTRY; i++) {
entry = &hw->ksz_switch->mac_table[i];
entry->valid = 0;
sw_w_sta_mac_table(hw, i,
entry->mac_addr, entry->ports,
entry->override, entry->valid,
entry->use_fid, entry->fid);
}
}
#define PHY_LINK_SUPPORT \
(PHY_AUTO_NEG_ASYM_PAUSE | \
PHY_AUTO_NEG_SYM_PAUSE | \
PHY_AUTO_NEG_100BT4 | \
PHY_AUTO_NEG_100BTX_FD | \
PHY_AUTO_NEG_100BTX | \
PHY_AUTO_NEG_10BT_FD | \
PHY_AUTO_NEG_10BT)
static inline void hw_r_phy_ctrl(struct ksz_hw *hw, int phy, u16 *data)
{
*data = readw(hw->io + phy + KS884X_PHY_CTRL_OFFSET);
}
static inline void hw_w_phy_ctrl(struct ksz_hw *hw, int phy, u16 data)
{
writew(data, hw->io + phy + KS884X_PHY_CTRL_OFFSET);
}
static inline void hw_r_phy_link_stat(struct ksz_hw *hw, int phy, u16 *data)
{
*data = readw(hw->io + phy + KS884X_PHY_STATUS_OFFSET);
}
static inline void hw_r_phy_auto_neg(struct ksz_hw *hw, int phy, u16 *data)
{
*data = readw(hw->io + phy + KS884X_PHY_AUTO_NEG_OFFSET);
}
static inline void hw_w_phy_auto_neg(struct ksz_hw *hw, int phy, u16 data)
{
writew(data, hw->io + phy + KS884X_PHY_AUTO_NEG_OFFSET);
}
static inline void hw_r_phy_rem_cap(struct ksz_hw *hw, int phy, u16 *data)
{
*data = readw(hw->io + phy + KS884X_PHY_REMOTE_CAP_OFFSET);
}
static inline void hw_r_phy_crossover(struct ksz_hw *hw, int phy, u16 *data)
{
*data = readw(hw->io + phy + KS884X_PHY_CTRL_OFFSET);
}
static inline void hw_w_phy_crossover(struct ksz_hw *hw, int phy, u16 data)
{
writew(data, hw->io + phy + KS884X_PHY_CTRL_OFFSET);
}
static inline void hw_r_phy_polarity(struct ksz_hw *hw, int phy, u16 *data)
{
*data = readw(hw->io + phy + KS884X_PHY_PHY_CTRL_OFFSET);
}
static inline void hw_w_phy_polarity(struct ksz_hw *hw, int phy, u16 data)
{
writew(data, hw->io + phy + KS884X_PHY_PHY_CTRL_OFFSET);
}
static inline void hw_r_phy_link_md(struct ksz_hw *hw, int phy, u16 *data)
{
*data = readw(hw->io + phy + KS884X_PHY_LINK_MD_OFFSET);
}
static inline void hw_w_phy_link_md(struct ksz_hw *hw, int phy, u16 data)
{
writew(data, hw->io + phy + KS884X_PHY_LINK_MD_OFFSET);
}
/**
* hw_r_phy - read data from PHY register
* @hw: The hardware instance.
* @port: Port to read.
* @reg: PHY register to read.
* @val: Buffer to store the read data.
*
* This routine reads data from the PHY register.
*/
static void hw_r_phy(struct ksz_hw *hw, int port, u16 reg, u16 *val)
{
int phy;
phy = KS884X_PHY_1_CTRL_OFFSET + port * PHY_CTRL_INTERVAL + reg;
*val = readw(hw->io + phy);
}
/**
* port_w_phy - write data to PHY register
* @hw: The hardware instance.
* @port: Port to write.
* @reg: PHY register to write.
* @val: Word data to write.
*
* This routine writes data to the PHY register.
*/
static void hw_w_phy(struct ksz_hw *hw, int port, u16 reg, u16 val)
{
int phy;
phy = KS884X_PHY_1_CTRL_OFFSET + port * PHY_CTRL_INTERVAL + reg;
writew(val, hw->io + phy);
}
/*
* EEPROM access functions
*/
#define AT93C_CODE 0
#define AT93C_WR_OFF 0x00
#define AT93C_WR_ALL 0x10
#define AT93C_ER_ALL 0x20
#define AT93C_WR_ON 0x30
#define AT93C_WRITE 1
#define AT93C_READ 2
#define AT93C_ERASE 3
#define EEPROM_DELAY 4
static inline void drop_gpio(struct ksz_hw *hw, u8 gpio)
{
u16 data;
data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
data &= ~gpio;
writew(data, hw->io + KS884X_EEPROM_CTRL_OFFSET);
}
static inline void raise_gpio(struct ksz_hw *hw, u8 gpio)
{
u16 data;
data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
data |= gpio;
writew(data, hw->io + KS884X_EEPROM_CTRL_OFFSET);
}
static inline u8 state_gpio(struct ksz_hw *hw, u8 gpio)
{
u16 data;
data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
return (u8)(data & gpio);
}
static void eeprom_clk(struct ksz_hw *hw)
{
raise_gpio(hw, EEPROM_SERIAL_CLOCK);
udelay(EEPROM_DELAY);
drop_gpio(hw, EEPROM_SERIAL_CLOCK);
udelay(EEPROM_DELAY);
}
static u16 spi_r(struct ksz_hw *hw)
{
int i;
u16 temp = 0;
for (i = 15; i >= 0; i--) {
raise_gpio(hw, EEPROM_SERIAL_CLOCK);
udelay(EEPROM_DELAY);
temp |= (state_gpio(hw, EEPROM_DATA_IN)) ? 1 << i : 0;
drop_gpio(hw, EEPROM_SERIAL_CLOCK);
udelay(EEPROM_DELAY);
}
return temp;
}
static void spi_w(struct ksz_hw *hw, u16 data)
{
int i;
for (i = 15; i >= 0; i--) {
(data & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
drop_gpio(hw, EEPROM_DATA_OUT);
eeprom_clk(hw);
}
}
static void spi_reg(struct ksz_hw *hw, u8 data, u8 reg)
{
int i;
/* Initial start bit */
raise_gpio(hw, EEPROM_DATA_OUT);
eeprom_clk(hw);
/* AT93C operation */
for (i = 1; i >= 0; i--) {
(data & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
drop_gpio(hw, EEPROM_DATA_OUT);
eeprom_clk(hw);
}
/* Address location */
for (i = 5; i >= 0; i--) {
(reg & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
drop_gpio(hw, EEPROM_DATA_OUT);
eeprom_clk(hw);
}
}
#define EEPROM_DATA_RESERVED 0
#define EEPROM_DATA_MAC_ADDR_0 1
#define EEPROM_DATA_MAC_ADDR_1 2
#define EEPROM_DATA_MAC_ADDR_2 3
#define EEPROM_DATA_SUBSYS_ID 4
#define EEPROM_DATA_SUBSYS_VEN_ID 5
#define EEPROM_DATA_PM_CAP 6
/* User defined EEPROM data */
#define EEPROM_DATA_OTHER_MAC_ADDR 9
/**
* eeprom_read - read from AT93C46 EEPROM
* @hw: The hardware instance.
* @reg: The register offset.
*
* This function reads a word from the AT93C46 EEPROM.
*
* Return the data value.
*/
static u16 eeprom_read(struct ksz_hw *hw, u8 reg)
{
u16 data;
raise_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
spi_reg(hw, AT93C_READ, reg);
data = spi_r(hw);
drop_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
return data;
}
/**
* eeprom_write - write to AT93C46 EEPROM
* @hw: The hardware instance.
* @reg: The register offset.
* @data: The data value.
*
* This procedure writes a word to the AT93C46 EEPROM.
*/
static void eeprom_write(struct ksz_hw *hw, u8 reg, u16 data)
{
int timeout;
raise_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
/* Enable write. */
spi_reg(hw, AT93C_CODE, AT93C_WR_ON);
drop_gpio(hw, EEPROM_CHIP_SELECT);
udelay(1);
/* Erase the register. */
raise_gpio(hw, EEPROM_CHIP_SELECT);
spi_reg(hw, AT93C_ERASE, reg);
drop_gpio(hw, EEPROM_CHIP_SELECT);
udelay(1);
/* Check operation complete. */
raise_gpio(hw, EEPROM_CHIP_SELECT);
timeout = 8;
mdelay(2);
do {
mdelay(1);
} while (!state_gpio(hw, EEPROM_DATA_IN) && --timeout);
drop_gpio(hw, EEPROM_CHIP_SELECT);
udelay(1);
/* Write the register. */
raise_gpio(hw, EEPROM_CHIP_SELECT);
spi_reg(hw, AT93C_WRITE, reg);
spi_w(hw, data);
drop_gpio(hw, EEPROM_CHIP_SELECT);
udelay(1);
/* Check operation complete. */
raise_gpio(hw, EEPROM_CHIP_SELECT);
timeout = 8;
mdelay(2);
do {
mdelay(1);
} while (!state_gpio(hw, EEPROM_DATA_IN) && --timeout);
drop_gpio(hw, EEPROM_CHIP_SELECT);
udelay(1);
/* Disable write. */
raise_gpio(hw, EEPROM_CHIP_SELECT);
spi_reg(hw, AT93C_CODE, AT93C_WR_OFF);
drop_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
}
/*
* Link detection routines
*/
static u16 advertised_flow_ctrl(struct ksz_port *port, u16 ctrl)
{
ctrl &= ~PORT_AUTO_NEG_SYM_PAUSE;
switch (port->flow_ctrl) {
case PHY_FLOW_CTRL:
ctrl |= PORT_AUTO_NEG_SYM_PAUSE;
break;
/* Not supported. */
case PHY_TX_ONLY:
case PHY_RX_ONLY:
default:
break;
}
return ctrl;
}
static void set_flow_ctrl(struct ksz_hw *hw, int rx, int tx)
{
u32 rx_cfg;
u32 tx_cfg;
rx_cfg = hw->rx_cfg;
tx_cfg = hw->tx_cfg;
if (rx)
hw->rx_cfg |= DMA_RX_FLOW_ENABLE;
else
hw->rx_cfg &= ~DMA_RX_FLOW_ENABLE;
if (tx)
hw->tx_cfg |= DMA_TX_FLOW_ENABLE;
else
hw->tx_cfg &= ~DMA_TX_FLOW_ENABLE;
if (hw->enabled) {
if (rx_cfg != hw->rx_cfg)
writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
if (tx_cfg != hw->tx_cfg)
writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
}
}
static void determine_flow_ctrl(struct ksz_hw *hw, struct ksz_port *port,
u16 local, u16 remote)
{
int rx;
int tx;
if (hw->overrides & PAUSE_FLOW_CTRL)
return;
rx = tx = 0;
if (port->force_link)
rx = tx = 1;
if (remote & PHY_AUTO_NEG_SYM_PAUSE) {
if (local & PHY_AUTO_NEG_SYM_PAUSE) {
rx = tx = 1;
} else if ((remote & PHY_AUTO_NEG_ASYM_PAUSE) &&
(local & PHY_AUTO_NEG_PAUSE) ==
PHY_AUTO_NEG_ASYM_PAUSE) {
tx = 1;
}
} else if (remote & PHY_AUTO_NEG_ASYM_PAUSE) {
if ((local & PHY_AUTO_NEG_PAUSE) == PHY_AUTO_NEG_PAUSE)
rx = 1;
}
if (!hw->ksz_switch)
set_flow_ctrl(hw, rx, tx);
}
static inline void port_cfg_change(struct ksz_hw *hw, struct ksz_port *port,
struct ksz_port_info *info, u16 link_status)
{
if ((hw->features & HALF_DUPLEX_SIGNAL_BUG) &&
!(hw->overrides & PAUSE_FLOW_CTRL)) {
u32 cfg = hw->tx_cfg;
/* Disable flow control in the half duplex mode. */
if (1 == info->duplex)
hw->tx_cfg &= ~DMA_TX_FLOW_ENABLE;
if (hw->enabled && cfg != hw->tx_cfg)
writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
}
}
/**
* port_get_link_speed - get current link status
* @port: The port instance.
*
* This routine reads PHY registers to determine the current link status of the
* switch ports.
*/
static void port_get_link_speed(struct ksz_port *port)
{
uint interrupt;
struct ksz_port_info *info;
struct ksz_port_info *linked = NULL;
struct ksz_hw *hw = port->hw;
u16 data;
u16 status;
u8 local;
u8 remote;
int i;
int p;
int change = 0;
interrupt = hw_block_intr(hw);
for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
info = &hw->port_info[p];
port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data);
port_r16(hw, p, KS884X_PORT_STATUS_OFFSET, &status);
/*
* Link status is changing all the time even when there is no
* cable connection!
*/
remote = status & (PORT_AUTO_NEG_COMPLETE |
PORT_STATUS_LINK_GOOD);
local = (u8) data;
/* No change to status. */
if (local == info->advertised && remote == info->partner)
continue;
info->advertised = local;
info->partner = remote;
if (status & PORT_STATUS_LINK_GOOD) {
/* Remember the first linked port. */
if (!linked)
linked = info;
info->tx_rate = 10 * TX_RATE_UNIT;
if (status & PORT_STATUS_SPEED_100MBIT)
info->tx_rate = 100 * TX_RATE_UNIT;
info->duplex = 1;
if (status & PORT_STATUS_FULL_DUPLEX)
info->duplex = 2;
if (media_connected != info->state) {
hw_r_phy(hw, p, KS884X_PHY_AUTO_NEG_OFFSET,
&data);
hw_r_phy(hw, p, KS884X_PHY_REMOTE_CAP_OFFSET,
&status);
determine_flow_ctrl(hw, port, data, status);
if (hw->ksz_switch) {
port_cfg_back_pressure(hw, p,
(1 == info->duplex));
}
change |= 1 << i;
port_cfg_change(hw, port, info, status);
}
info->state = media_connected;
} else {
if (media_disconnected != info->state) {
change |= 1 << i;
/* Indicate the link just goes down. */
hw->port_mib[p].link_down = 1;
}
info->state = media_disconnected;
}
hw->port_mib[p].state = (u8) info->state;
}
if (linked && media_disconnected == port->linked->state)
port->linked = linked;
hw_restore_intr(hw, interrupt);
}
#define PHY_RESET_TIMEOUT 10
/**
* port_set_link_speed - set port speed
* @port: The port instance.
*
* This routine sets the link speed of the switch ports.
*/
static void port_set_link_speed(struct ksz_port *port)
{
struct ksz_port_info *info;
struct ksz_hw *hw = port->hw;
u16 data;
u16 cfg;
u8 status;
int i;
int p;
for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
info = &hw->port_info[p];
port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data);
port_r8(hw, p, KS884X_PORT_STATUS_OFFSET, &status);
cfg = 0;
if (status & PORT_STATUS_LINK_GOOD)
cfg = data;
data |= PORT_AUTO_NEG_ENABLE;
data = advertised_flow_ctrl(port, data);
data |= PORT_AUTO_NEG_100BTX_FD | PORT_AUTO_NEG_100BTX |
PORT_AUTO_NEG_10BT_FD | PORT_AUTO_NEG_10BT;
/* Check if manual configuration is specified by the user. */
if (port->speed || port->duplex) {
if (10 == port->speed)
data &= ~(PORT_AUTO_NEG_100BTX_FD |
PORT_AUTO_NEG_100BTX);
else if (100 == port->speed)
data &= ~(PORT_AUTO_NEG_10BT_FD |
PORT_AUTO_NEG_10BT);
if (1 == port->duplex)
data &= ~(PORT_AUTO_NEG_100BTX_FD |
PORT_AUTO_NEG_10BT_FD);
else if (2 == port->duplex)
data &= ~(PORT_AUTO_NEG_100BTX |
PORT_AUTO_NEG_10BT);
}
if (data != cfg) {
data |= PORT_AUTO_NEG_RESTART;
port_w16(hw, p, KS884X_PORT_CTRL_4_OFFSET, data);
}
}
}
/**
* port_force_link_speed - force port speed
* @port: The port instance.
*
* This routine forces the link speed of the switch ports.
*/
static void port_force_link_speed(struct ksz_port *port)
{
struct ksz_hw *hw = port->hw;
u16 data;
int i;
int phy;
int p;
for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
phy = KS884X_PHY_1_CTRL_OFFSET + p * PHY_CTRL_INTERVAL;
hw_r_phy_ctrl(hw, phy, &data);
data &= ~PHY_AUTO_NEG_ENABLE;
if (10 == port->speed)
data &= ~PHY_SPEED_100MBIT;
else if (100 == port->speed)
data |= PHY_SPEED_100MBIT;
if (1 == port->duplex)
data &= ~PHY_FULL_DUPLEX;
else if (2 == port->duplex)
data |= PHY_FULL_DUPLEX;
hw_w_phy_ctrl(hw, phy, data);
}
}
static void port_set_power_saving(struct ksz_port *port, int enable)
{
struct ksz_hw *hw = port->hw;
int i;
int p;
for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++)
port_cfg(hw, p,
KS884X_PORT_CTRL_4_OFFSET, PORT_POWER_DOWN, enable);
}
/*
* KSZ8841 power management functions
*/
/**
* hw_chk_wol_pme_status - check PMEN pin
* @hw: The hardware instance.
*
* This function is used to check PMEN pin is asserted.
*
* Return 1 if PMEN pin is asserted; otherwise, 0.
*/
static int hw_chk_wol_pme_status(struct ksz_hw *hw)
{
struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
struct pci_dev *pdev = hw_priv->pdev;
u16 data;
if (!pdev->pm_cap)
return 0;
pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
return (data & PCI_PM_CTRL_PME_STATUS) == PCI_PM_CTRL_PME_STATUS;
}
/**
* hw_clr_wol_pme_status - clear PMEN pin
* @hw: The hardware instance.
*
* This routine is used to clear PME_Status to deassert PMEN pin.
*/
static void hw_clr_wol_pme_status(struct ksz_hw *hw)
{
struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
struct pci_dev *pdev = hw_priv->pdev;
u16 data;
if (!pdev->pm_cap)
return;
/* Clear PME_Status to deassert PMEN pin. */
pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
data |= PCI_PM_CTRL_PME_STATUS;
pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, data);
}
/**
* hw_cfg_wol_pme - enable or disable Wake-on-LAN
* @hw: The hardware instance.
* @set: The flag indicating whether to enable or disable.
*
* This routine is used to enable or disable Wake-on-LAN.
*/
static void hw_cfg_wol_pme(struct ksz_hw *hw, int set)
{
struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
struct pci_dev *pdev = hw_priv->pdev;
u16 data;
if (!pdev->pm_cap)
return;
pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
data &= ~PCI_PM_CTRL_STATE_MASK;
if (set)
data |= PCI_PM_CTRL_PME_ENABLE | PCI_D3hot;
else
data &= ~PCI_PM_CTRL_PME_ENABLE;
pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, data);
}
/**
* hw_cfg_wol - configure Wake-on-LAN features
* @hw: The hardware instance.
* @frame: The pattern frame bit.
* @set: The flag indicating whether to enable or disable.
*
* This routine is used to enable or disable certain Wake-on-LAN features.
*/
static void hw_cfg_wol(struct ksz_hw *hw, u16 frame, int set)
{
u16 data;
data = readw(hw->io + KS8841_WOL_CTRL_OFFSET);
if (set)
data |= frame;
else
data &= ~frame;
writew(data, hw->io + KS8841_WOL_CTRL_OFFSET);
}
/**
* hw_set_wol_frame - program Wake-on-LAN pattern
* @hw: The hardware instance.
* @i: The frame index.
* @mask_size: The size of the mask.
* @mask: Mask to ignore certain bytes in the pattern.
* @frame_size: The size of the frame.
* @pattern: The frame data.
*
* This routine is used to program Wake-on-LAN pattern.
*/
static void hw_set_wol_frame(struct ksz_hw *hw, int i, uint mask_size,
const u8 *mask, uint frame_size, const u8 *pattern)
{
int bits;
int from;
int len;
int to;
u32 crc;
u8 data[64];
u8 val = 0;
if (frame_size > mask_size * 8)
frame_size = mask_size * 8;
if (frame_size > 64)
frame_size = 64;
i *= 0x10;
writel(0, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i);
writel(0, hw->io + KS8841_WOL_FRAME_BYTE2_OFFSET + i);
bits = len = from = to = 0;
do {
if (bits) {
if ((val & 1))
data[to++] = pattern[from];
val >>= 1;
++from;
--bits;
} else {
val = mask[len];
writeb(val, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i
+ len);
++len;
if (val)
bits = 8;
else
from += 8;
}
} while (from < (int) frame_size);
if (val) {
bits = mask[len - 1];
val <<= (from % 8);
bits &= ~val;
writeb(bits, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i + len -
1);
}
crc = ether_crc(to, data);
writel(crc, hw->io + KS8841_WOL_FRAME_CRC_OFFSET + i);
}
/**
* hw_add_wol_arp - add ARP pattern
* @hw: The hardware instance.
* @ip_addr: The IPv4 address assigned to the device.
*
* This routine is used to add ARP pattern for waking up the host.
*/
static void hw_add_wol_arp(struct ksz_hw *hw, const u8 *ip_addr)
{
static const u8 mask[6] = { 0x3F, 0xF0, 0x3F, 0x00, 0xC0, 0x03 };
u8 pattern[42] = {
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x08, 0x06,
0x00, 0x01, 0x08, 0x00, 0x06, 0x04, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00 };
memcpy(&pattern[38], ip_addr, 4);
hw_set_wol_frame(hw, 3, 6, mask, 42, pattern);
}
/**
* hw_add_wol_bcast - add broadcast pattern
* @hw: The hardware instance.
*
* This routine is used to add broadcast pattern for waking up the host.
*/
static void hw_add_wol_bcast(struct ksz_hw *hw)
{
static const u8 mask[] = { 0x3F };
static const u8 pattern[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
hw_set_wol_frame(hw, 2, 1, mask, MAC_ADDR_LEN, pattern);
}
/**
* hw_add_wol_mcast - add multicast pattern
* @hw: The hardware instance.
*
* This routine is used to add multicast pattern for waking up the host.
*
* It is assumed the multicast packet is the ICMPv6 neighbor solicitation used
* by IPv6 ping command. Note that multicast packets are filtred through the
* multicast hash table, so not all multicast packets can wake up the host.
*/
static void hw_add_wol_mcast(struct ksz_hw *hw)
{
static const u8 mask[] = { 0x3F };
u8 pattern[] = { 0x33, 0x33, 0xFF, 0x00, 0x00, 0x00 };
memcpy(&pattern[3], &hw->override_addr[3], 3);
hw_set_wol_frame(hw, 1, 1, mask, 6, pattern);
}
/**
* hw_add_wol_ucast - add unicast pattern
* @hw: The hardware instance.
*
* This routine is used to add unicast pattern to wakeup the host.
*
* It is assumed the unicast packet is directed to the device, as the hardware
* can only receive them in normal case.
*/
static void hw_add_wol_ucast(struct ksz_hw *hw)
{
static const u8 mask[] = { 0x3F };
hw_set_wol_frame(hw, 0, 1, mask, MAC_ADDR_LEN, hw->override_addr);
}
/**
* hw_enable_wol - enable Wake-on-LAN
* @hw: The hardware instance.
* @wol_enable: The Wake-on-LAN settings.
* @net_addr: The IPv4 address assigned to the device.
*
* This routine is used to enable Wake-on-LAN depending on driver settings.
*/
static void hw_enable_wol(struct ksz_hw *hw, u32 wol_enable, const u8 *net_addr)
{
hw_cfg_wol(hw, KS8841_WOL_MAGIC_ENABLE, (wol_enable & WAKE_MAGIC));
hw_cfg_wol(hw, KS8841_WOL_FRAME0_ENABLE, (wol_enable & WAKE_UCAST));
hw_add_wol_ucast(hw);
hw_cfg_wol(hw, KS8841_WOL_FRAME1_ENABLE, (wol_enable & WAKE_MCAST));
hw_add_wol_mcast(hw);
hw_cfg_wol(hw, KS8841_WOL_FRAME2_ENABLE, (wol_enable & WAKE_BCAST));
hw_cfg_wol(hw, KS8841_WOL_FRAME3_ENABLE, (wol_enable & WAKE_ARP));
hw_add_wol_arp(hw, net_addr);
}
/**
* hw_init - check driver is correct for the hardware
* @hw: The hardware instance.
*
* This function checks the hardware is correct for this driver and sets the
* hardware up for proper initialization.
*
* Return number of ports or 0 if not right.
*/
static int hw_init(struct ksz_hw *hw)
{
int rc = 0;
u16 data;
u16 revision;
/* Set bus speed to 125MHz. */
writew(BUS_SPEED_125_MHZ, hw->io + KS884X_BUS_CTRL_OFFSET);
/* Check KSZ884x chip ID. */
data = readw(hw->io + KS884X_CHIP_ID_OFFSET);
revision = (data & KS884X_REVISION_MASK) >> KS884X_REVISION_SHIFT;
data &= KS884X_CHIP_ID_MASK_41;
if (REG_CHIP_ID_41 == data)
rc = 1;
else if (REG_CHIP_ID_42 == data)
rc = 2;
else
return 0;
/* Setup hardware features or bug workarounds. */
if (revision <= 1) {
hw->features |= SMALL_PACKET_TX_BUG;
if (1 == rc)
hw->features |= HALF_DUPLEX_SIGNAL_BUG;
}
return rc;
}
/**
* hw_reset - reset the hardware
* @hw: The hardware instance.
*
* This routine resets the hardware.
*/
static void hw_reset(struct ksz_hw *hw)
{
writew(GLOBAL_SOFTWARE_RESET, hw->io + KS884X_GLOBAL_CTRL_OFFSET);
/* Wait for device to reset. */
mdelay(10);
/* Write 0 to clear device reset. */
writew(0, hw->io + KS884X_GLOBAL_CTRL_OFFSET);
}
/**
* hw_setup - setup the hardware
* @hw: The hardware instance.
*
* This routine setup the hardware for proper operation.
*/
static void hw_setup(struct ksz_hw *hw)
{
#if SET_DEFAULT_LED
u16 data;
/* Change default LED mode. */
data = readw(hw->io + KS8842_SWITCH_CTRL_5_OFFSET);
data &= ~LED_MODE;
data |= SET_DEFAULT_LED;
writew(data, hw->io + KS8842_SWITCH_CTRL_5_OFFSET);
#endif
/* Setup transmit control. */
hw->tx_cfg = (DMA_TX_PAD_ENABLE | DMA_TX_CRC_ENABLE |
(DMA_BURST_DEFAULT << DMA_BURST_SHIFT) | DMA_TX_ENABLE);
/* Setup receive control. */
hw->rx_cfg = (DMA_RX_BROADCAST | DMA_RX_UNICAST |
(DMA_BURST_DEFAULT << DMA_BURST_SHIFT) | DMA_RX_ENABLE);
hw->rx_cfg |= KS884X_DMA_RX_MULTICAST;
/* Hardware cannot handle UDP packet in IP fragments. */
hw->rx_cfg |= (DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP);
if (hw->all_multi)
hw->rx_cfg |= DMA_RX_ALL_MULTICAST;
if (hw->promiscuous)
hw->rx_cfg |= DMA_RX_PROMISCUOUS;
}
/**
* hw_setup_intr - setup interrupt mask
* @hw: The hardware instance.
*
* This routine setup the interrupt mask for proper operation.
*/
static void hw_setup_intr(struct ksz_hw *hw)
{
hw->intr_mask = KS884X_INT_MASK | KS884X_INT_RX_OVERRUN;
}
static void ksz_check_desc_num(struct ksz_desc_info *info)
{
#define MIN_DESC_SHIFT 2
int alloc = info->alloc;
int shift;
shift = 0;
while (!(alloc & 1)) {
shift++;
alloc >>= 1;
}
if (alloc != 1 || shift < MIN_DESC_SHIFT) {
pr_alert("Hardware descriptor numbers not right!\n");
while (alloc) {
shift++;
alloc >>= 1;
}
if (shift < MIN_DESC_SHIFT)
shift = MIN_DESC_SHIFT;
alloc = 1 << shift;
info->alloc = alloc;
}
info->mask = info->alloc - 1;
}
static void hw_init_desc(struct ksz_desc_info *desc_info, int transmit)
{
int i;
u32 phys = desc_info->ring_phys;
struct ksz_hw_desc *desc = desc_info->ring_virt;
struct ksz_desc *cur = desc_info->ring;
struct ksz_desc *previous = NULL;
for (i = 0; i < desc_info->alloc; i++) {
cur->phw = desc++;
phys += desc_info->size;
previous = cur++;
previous->phw->next = cpu_to_le32(phys);
}
previous->phw->next = cpu_to_le32(desc_info->ring_phys);
previous->sw.buf.rx.end_of_ring = 1;
previous->phw->buf.data = cpu_to_le32(previous->sw.buf.data);
desc_info->avail = desc_info->alloc;
desc_info->last = desc_info->next = 0;
desc_info->cur = desc_info->ring;
}
/**
* hw_set_desc_base - set descriptor base addresses
* @hw: The hardware instance.
* @tx_addr: The transmit descriptor base.
* @rx_addr: The receive descriptor base.
*
* This routine programs the descriptor base addresses after reset.
*/
static void hw_set_desc_base(struct ksz_hw *hw, u32 tx_addr, u32 rx_addr)
{
/* Set base address of Tx/Rx descriptors. */
writel(tx_addr, hw->io + KS_DMA_TX_ADDR);
writel(rx_addr, hw->io + KS_DMA_RX_ADDR);
}
static void hw_reset_pkts(struct ksz_desc_info *info)
{
info->cur = info->ring;
info->avail = info->alloc;
info->last = info->next = 0;
}
static inline void hw_resume_rx(struct ksz_hw *hw)
{
writel(DMA_START, hw->io + KS_DMA_RX_START);
}
/**
* hw_start_rx - start receiving
* @hw: The hardware instance.
*
* This routine starts the receive function of the hardware.
*/
static void hw_start_rx(struct ksz_hw *hw)
{
writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
/* Notify when the receive stops. */
hw->intr_mask |= KS884X_INT_RX_STOPPED;
writel(DMA_START, hw->io + KS_DMA_RX_START);
hw_ack_intr(hw, KS884X_INT_RX_STOPPED);
hw->rx_stop++;
/* Variable overflows. */
if (0 == hw->rx_stop)
hw->rx_stop = 2;
}
/*
* hw_stop_rx - stop receiving
* @hw: The hardware instance.
*
* This routine stops the receive function of the hardware.
*/
static void hw_stop_rx(struct ksz_hw *hw)
{
hw->rx_stop = 0;
hw_turn_off_intr(hw, KS884X_INT_RX_STOPPED);
writel((hw->rx_cfg & ~DMA_RX_ENABLE), hw->io + KS_DMA_RX_CTRL);
}
/**
* hw_start_tx - start transmitting
* @hw: The hardware instance.
*
* This routine starts the transmit function of the hardware.
*/
static void hw_start_tx(struct ksz_hw *hw)
{
writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
}
/**
* hw_stop_tx - stop transmitting
* @hw: The hardware instance.
*
* This routine stops the transmit function of the hardware.
*/
static void hw_stop_tx(struct ksz_hw *hw)
{
writel((hw->tx_cfg & ~DMA_TX_ENABLE), hw->io + KS_DMA_TX_CTRL);
}
/**
* hw_disable - disable hardware
* @hw: The hardware instance.
*
* This routine disables the hardware.
*/
static void hw_disable(struct ksz_hw *hw)
{
hw_stop_rx(hw);
hw_stop_tx(hw);
hw->enabled = 0;
}
/**
* hw_enable - enable hardware
* @hw: The hardware instance.
*
* This routine enables the hardware.
*/
static void hw_enable(struct ksz_hw *hw)
{
hw_start_tx(hw);
hw_start_rx(hw);
hw->enabled = 1;
}
/**
* hw_alloc_pkt - allocate enough descriptors for transmission
* @hw: The hardware instance.
* @length: The length of the packet.
* @physical: Number of descriptors required.
*
* This function allocates descriptors for transmission.
*
* Return 0 if not successful; 1 for buffer copy; or number of descriptors.
*/
static int hw_alloc_pkt(struct ksz_hw *hw, int length, int physical)
{
/* Always leave one descriptor free. */
if (hw->tx_desc_info.avail <= 1)
return 0;
/* Allocate a descriptor for transmission and mark it current. */
get_tx_pkt(&hw->tx_desc_info, &hw->tx_desc_info.cur);
hw->tx_desc_info.cur->sw.buf.tx.first_seg = 1;
/* Keep track of number of transmit descriptors used so far. */
++hw->tx_int_cnt;
hw->tx_size += length;
/* Cannot hold on too much data. */
if (hw->tx_size >= MAX_TX_HELD_SIZE)
hw->tx_int_cnt = hw->tx_int_mask + 1;
if (physical > hw->tx_desc_info.avail)
return 1;
return hw->tx_desc_info.avail;
}
/**
* hw_send_pkt - mark packet for transmission
* @hw: The hardware instance.
*
* This routine marks the packet for transmission in PCI version.
*/
static void hw_send_pkt(struct ksz_hw *hw)
{
struct ksz_desc *cur = hw->tx_desc_info.cur;
cur->sw.buf.tx.last_seg = 1;
/* Interrupt only after specified number of descriptors used. */
if (hw->tx_int_cnt > hw->tx_int_mask) {
cur->sw.buf.tx.intr = 1;
hw->tx_int_cnt = 0;
hw->tx_size = 0;
}
/* KSZ8842 supports port directed transmission. */
cur->sw.buf.tx.dest_port = hw->dst_ports;
release_desc(cur);
writel(0, hw->io + KS_DMA_TX_START);
}
static int empty_addr(u8 *addr)
{
u32 *addr1 = (u32 *) addr;
u16 *addr2 = (u16 *) &addr[4];
return 0 == *addr1 && 0 == *addr2;
}
/**
* hw_set_addr - set MAC address
* @hw: The hardware instance.
*
* This routine programs the MAC address of the hardware when the address is
* overrided.
*/
static void hw_set_addr(struct ksz_hw *hw)
{
int i;
for (i = 0; i < MAC_ADDR_LEN; i++)
writeb(hw->override_addr[MAC_ADDR_ORDER(i)],
hw->io + KS884X_ADDR_0_OFFSET + i);
sw_set_addr(hw, hw->override_addr);
}
/**
* hw_read_addr - read MAC address
* @hw: The hardware instance.
*
* This routine retrieves the MAC address of the hardware.
*/
static void hw_read_addr(struct ksz_hw *hw)
{
int i;
for (i = 0; i < MAC_ADDR_LEN; i++)
hw->perm_addr[MAC_ADDR_ORDER(i)] = readb(hw->io +
KS884X_ADDR_0_OFFSET + i);
if (!hw->mac_override) {
memcpy(hw->override_addr, hw->perm_addr, MAC_ADDR_LEN);
if (empty_addr(hw->override_addr)) {
memcpy(hw->perm_addr, DEFAULT_MAC_ADDRESS,
MAC_ADDR_LEN);
memcpy(hw->override_addr, DEFAULT_MAC_ADDRESS,
MAC_ADDR_LEN);
hw->override_addr[5] += hw->id;
hw_set_addr(hw);
}
}
}
static void hw_ena_add_addr(struct ksz_hw *hw, int index, u8 *mac_addr)
{
int i;
u32 mac_addr_lo;
u32 mac_addr_hi;
mac_addr_hi = 0;
for (i = 0; i < 2; i++) {
mac_addr_hi <<= 8;
mac_addr_hi |= mac_addr[i];
}
mac_addr_hi |= ADD_ADDR_ENABLE;
mac_addr_lo = 0;
for (i = 2; i < 6; i++) {
mac_addr_lo <<= 8;
mac_addr_lo |= mac_addr[i];
}
index *= ADD_ADDR_INCR;
writel(mac_addr_lo, hw->io + index + KS_ADD_ADDR_0_LO);
writel(mac_addr_hi, hw->io + index + KS_ADD_ADDR_0_HI);
}
static void hw_set_add_addr(struct ksz_hw *hw)
{
int i;
for (i = 0; i < ADDITIONAL_ENTRIES; i++) {
if (empty_addr(hw->address[i]))
writel(0, hw->io + ADD_ADDR_INCR * i +
KS_ADD_ADDR_0_HI);
else
hw_ena_add_addr(hw, i, hw->address[i]);
}
}
static int hw_add_addr(struct ksz_hw *hw, u8 *mac_addr)
{
int i;
int j = ADDITIONAL_ENTRIES;
if (!memcmp(hw->override_addr, mac_addr, MAC_ADDR_LEN))
return 0;
for (i = 0; i < hw->addr_list_size; i++) {
if (!memcmp(hw->address[i], mac_addr, MAC_ADDR_LEN))
return 0;
if (ADDITIONAL_ENTRIES == j && empty_addr(hw->address[i]))
j = i;
}
if (j < ADDITIONAL_ENTRIES) {
memcpy(hw->address[j], mac_addr, MAC_ADDR_LEN);
hw_ena_add_addr(hw, j, hw->address[j]);
return 0;
}
return -1;
}
static int hw_del_addr(struct ksz_hw *hw, u8 *mac_addr)
{
int i;
for (i = 0; i < hw->addr_list_size; i++) {
if (!memcmp(hw->address[i], mac_addr, MAC_ADDR_LEN)) {
memset(hw->address[i], 0, MAC_ADDR_LEN);
writel(0, hw->io + ADD_ADDR_INCR * i +
KS_ADD_ADDR_0_HI);
return 0;
}
}
return -1;
}
/**
* hw_clr_multicast - clear multicast addresses
* @hw: The hardware instance.
*
* This routine removes all multicast addresses set in the hardware.
*/
static void hw_clr_multicast(struct ksz_hw *hw)
{
int i;
for (i = 0; i < HW_MULTICAST_SIZE; i++) {
hw->multi_bits[i] = 0;
writeb(0, hw->io + KS884X_MULTICAST_0_OFFSET + i);
}
}
/**
* hw_set_grp_addr - set multicast addresses
* @hw: The hardware instance.
*
* This routine programs multicast addresses for the hardware to accept those
* addresses.
*/
static void hw_set_grp_addr(struct ksz_hw *hw)
{
int i;
int index;
int position;
int value;
memset(hw->multi_bits, 0, sizeof(u8) * HW_MULTICAST_SIZE);
for (i = 0; i < hw->multi_list_size; i++) {
position = (ether_crc(6, hw->multi_list[i]) >> 26) & 0x3f;
index = position >> 3;
value = 1 << (position & 7);
hw->multi_bits[index] |= (u8) value;
}
for (i = 0; i < HW_MULTICAST_SIZE; i++)
writeb(hw->multi_bits[i], hw->io + KS884X_MULTICAST_0_OFFSET +
i);
}
/**
* hw_set_multicast - enable or disable all multicast receiving
* @hw: The hardware instance.
* @multicast: To turn on or off the all multicast feature.
*
* This routine enables/disables the hardware to accept all multicast packets.
*/
static void hw_set_multicast(struct ksz_hw *hw, u8 multicast)
{
/* Stop receiving for reconfiguration. */
hw_stop_rx(hw);
if (multicast)
hw->rx_cfg |= DMA_RX_ALL_MULTICAST;
else
hw->rx_cfg &= ~DMA_RX_ALL_MULTICAST;
if (hw->enabled)
hw_start_rx(hw);
}
/**
* hw_set_promiscuous - enable or disable promiscuous receiving
* @hw: The hardware instance.
* @prom: To turn on or off the promiscuous feature.
*
* This routine enables/disables the hardware to accept all packets.
*/
static void hw_set_promiscuous(struct ksz_hw *hw, u8 prom)
{
/* Stop receiving for reconfiguration. */
hw_stop_rx(hw);
if (prom)
hw->rx_cfg |= DMA_RX_PROMISCUOUS;
else
hw->rx_cfg &= ~DMA_RX_PROMISCUOUS;
if (hw->enabled)
hw_start_rx(hw);
}
/**
* sw_enable - enable the switch
* @hw: The hardware instance.
* @enable: The flag to enable or disable the switch
*
* This routine is used to enable/disable the switch in KSZ8842.
*/
static void sw_enable(struct ksz_hw *hw, int enable)
{
int port;
for (port = 0; port < SWITCH_PORT_NUM; port++) {
if (hw->dev_count > 1) {
/* Set port-base vlan membership with host port. */
sw_cfg_port_base_vlan(hw, port,
HOST_MASK | (1 << port));
port_set_stp_state(hw, port, STP_STATE_DISABLED);
} else {
sw_cfg_port_base_vlan(hw, port, PORT_MASK);
port_set_stp_state(hw, port, STP_STATE_FORWARDING);
}
}
if (hw->dev_count > 1)
port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_SIMPLE);
else
port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_FORWARDING);
if (enable)
enable = KS8842_START;
writew(enable, hw->io + KS884X_CHIP_ID_OFFSET);
}
/**
* sw_setup - setup the switch
* @hw: The hardware instance.
*
* This routine setup the hardware switch engine for default operation.
*/
static void sw_setup(struct ksz_hw *hw)
{
int port;
sw_set_global_ctrl(hw);
/* Enable switch broadcast storm protection at 10% percent rate. */
sw_init_broad_storm(hw);
hw_cfg_broad_storm(hw, BROADCAST_STORM_PROTECTION_RATE);
for (port = 0; port < SWITCH_PORT_NUM; port++)
sw_ena_broad_storm(hw, port);
sw_init_prio(hw);
sw_init_mirror(hw);
sw_init_prio_rate(hw);
sw_init_vlan(hw);
if (hw->features & STP_SUPPORT)
sw_init_stp(hw);
if (!sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
SWITCH_TX_FLOW_CTRL | SWITCH_RX_FLOW_CTRL))
hw->overrides |= PAUSE_FLOW_CTRL;
sw_enable(hw, 1);
}
/**
* ksz_start_timer - start kernel timer
* @info: Kernel timer information.
* @time: The time tick.
*
* This routine starts the kernel timer after the specified time tick.
*/
static void ksz_start_timer(struct ksz_timer_info *info, int time)
{
info->cnt = 0;
info->timer.expires = jiffies + time;
add_timer(&info->timer);
/* infinity */
info->max = -1;
}
/**
* ksz_stop_timer - stop kernel timer
* @info: Kernel timer information.
*
* This routine stops the kernel timer.
*/
static void ksz_stop_timer(struct ksz_timer_info *info)
{
if (info->max) {
info->max = 0;
del_timer_sync(&info->timer);
}
}
static void ksz_init_timer(struct ksz_timer_info *info, int period,
void (*function)(unsigned long), void *data)
{
info->max = 0;
info->period = period;
init_timer(&info->timer);
info->timer.function = function;
info->timer.data = (unsigned long) data;
}
static void ksz_update_timer(struct ksz_timer_info *info)
{
++info->cnt;
if (info->max > 0) {
if (info->cnt < info->max) {
info->timer.expires = jiffies + info->period;
add_timer(&info->timer);
} else
info->max = 0;
} else if (info->max < 0) {
info->timer.expires = jiffies + info->period;
add_timer(&info->timer);
}
}
/**
* ksz_alloc_soft_desc - allocate software descriptors
* @desc_info: Descriptor information structure.
* @transmit: Indication that descriptors are for transmit.
*
* This local function allocates software descriptors for manipulation in
* memory.
*
* Return 0 if successful.
*/
static int ksz_alloc_soft_desc(struct ksz_desc_info *desc_info, int transmit)
{
desc_info->ring = kmalloc(sizeof(struct ksz_desc) * desc_info->alloc,
GFP_KERNEL);
if (!desc_info->ring)
return 1;
memset((void *) desc_info->ring, 0,
sizeof(struct ksz_desc) * desc_info->alloc);
hw_init_desc(desc_info, transmit);
return 0;
}
/**
* ksz_alloc_desc - allocate hardware descriptors
* @adapter: Adapter information structure.
*
* This local function allocates hardware descriptors for receiving and
* transmitting.
*
* Return 0 if successful.
*/
static int ksz_alloc_desc(struct dev_info *adapter)
{
struct ksz_hw *hw = &adapter->hw;
int offset;
/* Allocate memory for RX & TX descriptors. */
adapter->desc_pool.alloc_size =
hw->rx_desc_info.size * hw->rx_desc_info.alloc +
hw->tx_desc_info.size * hw->tx_desc_info.alloc +
DESC_ALIGNMENT;
adapter->desc_pool.alloc_virt =
pci_alloc_consistent(
adapter->pdev, adapter->desc_pool.alloc_size,
&adapter->desc_pool.dma_addr);
if (adapter->desc_pool.alloc_virt == NULL) {
adapter->desc_pool.alloc_size = 0;
return 1;
}
memset(adapter->desc_pool.alloc_virt, 0, adapter->desc_pool.alloc_size);
/* Align to the next cache line boundary. */
offset = (((ulong) adapter->desc_pool.alloc_virt % DESC_ALIGNMENT) ?
(DESC_ALIGNMENT -
((ulong) adapter->desc_pool.alloc_virt % DESC_ALIGNMENT)) : 0);
adapter->desc_pool.virt = adapter->desc_pool.alloc_virt + offset;
adapter->desc_pool.phys = adapter->desc_pool.dma_addr + offset;
/* Allocate receive/transmit descriptors. */
hw->rx_desc_info.ring_virt = (struct ksz_hw_desc *)
adapter->desc_pool.virt;
hw->rx_desc_info.ring_phys = adapter->desc_pool.phys;
offset = hw->rx_desc_info.alloc * hw->rx_desc_info.size;
hw->tx_desc_info.ring_virt = (struct ksz_hw_desc *)
(adapter->desc_pool.virt + offset);
hw->tx_desc_info.ring_phys = adapter->desc_pool.phys + offset;
if (ksz_alloc_soft_desc(&hw->rx_desc_info, 0))
return 1;
if (ksz_alloc_soft_desc(&hw->tx_desc_info, 1))
return 1;
return 0;
}
/**
* free_dma_buf - release DMA buffer resources
* @adapter: Adapter information structure.
*
* This routine is just a helper function to release the DMA buffer resources.
*/
static void free_dma_buf(struct dev_info *adapter, struct ksz_dma_buf *dma_buf,
int direction)
{
pci_unmap_single(adapter->pdev, dma_buf->dma, dma_buf->len, direction);
dev_kfree_skb(dma_buf->skb);
dma_buf->skb = NULL;
dma_buf->dma = 0;
}
/**
* ksz_init_rx_buffers - initialize receive descriptors
* @adapter: Adapter information structure.
*
* This routine initializes DMA buffers for receiving.
*/
static void ksz_init_rx_buffers(struct dev_info *adapter)
{
int i;
struct ksz_desc *desc;
struct ksz_dma_buf *dma_buf;
struct ksz_hw *hw = &adapter->hw;
struct ksz_desc_info *info = &hw->rx_desc_info;
for (i = 0; i < hw->rx_desc_info.alloc; i++) {
get_rx_pkt(info, &desc);
dma_buf = DMA_BUFFER(desc);
if (dma_buf->skb && dma_buf->len != adapter->mtu)
free_dma_buf(adapter, dma_buf, PCI_DMA_FROMDEVICE);
dma_buf->len = adapter->mtu;
if (!dma_buf->skb)
dma_buf->skb = alloc_skb(dma_buf->len, GFP_ATOMIC);
if (dma_buf->skb && !dma_buf->dma) {
dma_buf->skb->dev = adapter->dev;
dma_buf->dma = pci_map_single(
adapter->pdev,
skb_tail_pointer(dma_buf->skb),
dma_buf->len,
PCI_DMA_FROMDEVICE);
}
/* Set descriptor. */
set_rx_buf(desc, dma_buf->dma);
set_rx_len(desc, dma_buf->len);
release_desc(desc);
}
}
/**
* ksz_alloc_mem - allocate memory for hardware descriptors
* @adapter: Adapter information structure.
*
* This function allocates memory for use by hardware descriptors for receiving
* and transmitting.
*
* Return 0 if successful.
*/
static int ksz_alloc_mem(struct dev_info *adapter)
{
struct ksz_hw *hw = &adapter->hw;
/* Determine the number of receive and transmit descriptors. */
hw->rx_desc_info.alloc = NUM_OF_RX_DESC;
hw->tx_desc_info.alloc = NUM_OF_TX_DESC;
/* Determine how many descriptors to skip transmit interrupt. */
hw->tx_int_cnt = 0;
hw->tx_int_mask = NUM_OF_TX_DESC / 4;
if (hw->tx_int_mask > 8)
hw->tx_int_mask = 8;
while (hw->tx_int_mask) {
hw->tx_int_cnt++;
hw->tx_int_mask >>= 1;
}
if (hw->tx_int_cnt) {
hw->tx_int_mask = (1 << (hw->tx_int_cnt - 1)) - 1;
hw->tx_int_cnt = 0;
}
/* Determine the descriptor size. */
hw->rx_desc_info.size =
(((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) /
DESC_ALIGNMENT) * DESC_ALIGNMENT);
hw->tx_desc_info.size =
(((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) /
DESC_ALIGNMENT) * DESC_ALIGNMENT);
if (hw->rx_desc_info.size != sizeof(struct ksz_hw_desc))
pr_alert("Hardware descriptor size not right!\n");
ksz_check_desc_num(&hw->rx_desc_info);
ksz_check_desc_num(&hw->tx_desc_info);
/* Allocate descriptors. */
if (ksz_alloc_desc(adapter))
return 1;
return 0;
}
/**
* ksz_free_desc - free software and hardware descriptors
* @adapter: Adapter information structure.
*
* This local routine frees the software and hardware descriptors allocated by
* ksz_alloc_desc().
*/
static void ksz_free_desc(struct dev_info *adapter)
{
struct ksz_hw *hw = &adapter->hw;
/* Reset descriptor. */
hw->rx_desc_info.ring_virt = NULL;
hw->tx_desc_info.ring_virt = NULL;
hw->rx_desc_info.ring_phys = 0;
hw->tx_desc_info.ring_phys = 0;
/* Free memory. */
if (adapter->desc_pool.alloc_virt)
pci_free_consistent(
adapter->pdev,
adapter->desc_pool.alloc_size,
adapter->desc_pool.alloc_virt,
adapter->desc_pool.dma_addr);
/* Reset resource pool. */
adapter->desc_pool.alloc_size = 0;
adapter->desc_pool.alloc_virt = NULL;
kfree(hw->rx_desc_info.ring);
hw->rx_desc_info.ring = NULL;
kfree(hw->tx_desc_info.ring);
hw->tx_desc_info.ring = NULL;
}
/**
* ksz_free_buffers - free buffers used in the descriptors
* @adapter: Adapter information structure.
* @desc_info: Descriptor information structure.
*
* This local routine frees buffers used in the DMA buffers.
*/
static void ksz_free_buffers(struct dev_info *adapter,
struct ksz_desc_info *desc_info, int direction)
{
int i;
struct ksz_dma_buf *dma_buf;
struct ksz_desc *desc = desc_info->ring;
for (i = 0; i < desc_info->alloc; i++) {
dma_buf = DMA_BUFFER(desc);
if (dma_buf->skb)
free_dma_buf(adapter, dma_buf, direction);
desc++;
}
}
/**
* ksz_free_mem - free all resources used by descriptors
* @adapter: Adapter information structure.
*
* This local routine frees all the resources allocated by ksz_alloc_mem().
*/
static void ksz_free_mem(struct dev_info *adapter)
{
/* Free transmit buffers. */
ksz_free_buffers(adapter, &adapter->hw.tx_desc_info,
PCI_DMA_TODEVICE);
/* Free receive buffers. */
ksz_free_buffers(adapter, &adapter->hw.rx_desc_info,
PCI_DMA_FROMDEVICE);
/* Free descriptors. */
ksz_free_desc(adapter);
}
static void get_mib_counters(struct ksz_hw *hw, int first, int cnt,
u64 *counter)
{
int i;
int mib;
int port;
struct ksz_port_mib *port_mib;
memset(counter, 0, sizeof(u64) * TOTAL_PORT_COUNTER_NUM);
for (i = 0, port = first; i < cnt; i++, port++) {
port_mib = &hw->port_mib[port];
for (mib = port_mib->mib_start; mib < hw->mib_cnt; mib++)
counter[mib] += port_mib->counter[mib];
}
}
/**
* send_packet - send packet
* @skb: Socket buffer.
* @dev: Network device.
*
* This routine is used to send a packet out to the network.
*/
static void send_packet(struct sk_buff *skb, struct net_device *dev)
{
struct ksz_desc *desc;
struct ksz_desc *first;
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
struct ksz_desc_info *info = &hw->tx_desc_info;
struct ksz_dma_buf *dma_buf;
int len;
int last_frag = skb_shinfo(skb)->nr_frags;
/*
* KSZ8842 with multiple device interfaces needs to be told which port
* to send.
*/
if (hw->dev_count > 1)
hw->dst_ports = 1 << priv->port.first_port;
/* Hardware will pad the length to 60. */
len = skb->len;
/* Remember the very first descriptor. */
first = info->cur;
desc = first;
dma_buf = DMA_BUFFER(desc);
if (last_frag) {
int frag;
skb_frag_t *this_frag;
dma_buf->len = skb_headlen(skb);
dma_buf->dma = pci_map_single(
hw_priv->pdev, skb->data, dma_buf->len,
PCI_DMA_TODEVICE);
set_tx_buf(desc, dma_buf->dma);
set_tx_len(desc, dma_buf->len);
frag = 0;
do {
this_frag = &skb_shinfo(skb)->frags[frag];
/* Get a new descriptor. */
get_tx_pkt(info, &desc);
/* Keep track of descriptors used so far. */
++hw->tx_int_cnt;
dma_buf = DMA_BUFFER(desc);
dma_buf->len = this_frag->size;
dma_buf->dma = pci_map_single(
hw_priv->pdev,
page_address(this_frag->page) +
this_frag->page_offset,
dma_buf->len,
PCI_DMA_TODEVICE);
set_tx_buf(desc, dma_buf->dma);
set_tx_len(desc, dma_buf->len);
frag++;
if (frag == last_frag)
break;
/* Do not release the last descriptor here. */
release_desc(desc);
} while (1);
/* current points to the last descriptor. */
info->cur = desc;
/* Release the first descriptor. */
release_desc(first);
} else {
dma_buf->len = len;
dma_buf->dma = pci_map_single(
hw_priv->pdev, skb->data, dma_buf->len,
PCI_DMA_TODEVICE);
set_tx_buf(desc, dma_buf->dma);
set_tx_len(desc, dma_buf->len);
}
if (skb->ip_summed == CHECKSUM_PARTIAL) {
(desc)->sw.buf.tx.csum_gen_tcp = 1;
(desc)->sw.buf.tx.csum_gen_udp = 1;
}
/*
* The last descriptor holds the packet so that it can be returned to
* network subsystem after all descriptors are transmitted.
*/
dma_buf->skb = skb;
hw_send_pkt(hw);
/* Update transmit statistics. */
dev->stats.tx_packets++;
dev->stats.tx_bytes += len;
}
/**
* transmit_cleanup - clean up transmit descriptors
* @dev: Network device.
*
* This routine is called to clean up the transmitted buffers.
*/
static void transmit_cleanup(struct dev_info *hw_priv, int normal)
{
int last;
union desc_stat status;
struct ksz_hw *hw = &hw_priv->hw;
struct ksz_desc_info *info = &hw->tx_desc_info;
struct ksz_desc *desc;
struct ksz_dma_buf *dma_buf;
struct net_device *dev = NULL;
spin_lock(&hw_priv->hwlock);
last = info->last;
while (info->avail < info->alloc) {
/* Get next descriptor which is not hardware owned. */
desc = &info->ring[last];
status.data = le32_to_cpu(desc->phw->ctrl.data);
if (status.tx.hw_owned) {
if (normal)
break;
else
reset_desc(desc, status);
}
dma_buf = DMA_BUFFER(desc);
pci_unmap_single(
hw_priv->pdev, dma_buf->dma, dma_buf->len,
PCI_DMA_TODEVICE);
/* This descriptor contains the last buffer in the packet. */
if (dma_buf->skb) {
dev = dma_buf->skb->dev;
/* Release the packet back to network subsystem. */
dev_kfree_skb_irq(dma_buf->skb);
dma_buf->skb = NULL;
}
/* Free the transmitted descriptor. */
last++;
last &= info->mask;
info->avail++;
}
info->last = last;
spin_unlock(&hw_priv->hwlock);
/* Notify the network subsystem that the packet has been sent. */
if (dev)
dev->trans_start = jiffies;
}
/**
* transmit_done - transmit done processing
* @dev: Network device.
*
* This routine is called when the transmit interrupt is triggered, indicating
* either a packet is sent successfully or there are transmit errors.
*/
static void tx_done(struct dev_info *hw_priv)
{
struct ksz_hw *hw = &hw_priv->hw;
int port;
transmit_cleanup(hw_priv, 1);
for (port = 0; port < hw->dev_count; port++) {
struct net_device *dev = hw->port_info[port].pdev;
if (netif_running(dev) && netif_queue_stopped(dev))
netif_wake_queue(dev);
}
}
static inline void copy_old_skb(struct sk_buff *old, struct sk_buff *skb)
{
skb->dev = old->dev;
skb->protocol = old->protocol;
skb->ip_summed = old->ip_summed;
skb->csum = old->csum;
skb_set_network_header(skb, ETH_HLEN);
dev_kfree_skb(old);
}
/**
* netdev_tx - send out packet
* @skb: Socket buffer.
* @dev: Network device.
*
* This function is used by the upper network layer to send out a packet.
*
* Return 0 if successful; otherwise an error code indicating failure.
*/
static netdev_tx_t netdev_tx(struct sk_buff *skb, struct net_device *dev)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
int left;
int num = 1;
int rc = 0;
if (hw->features & SMALL_PACKET_TX_BUG) {
struct sk_buff *org_skb = skb;
if (skb->len <= 48) {
if (skb_end_pointer(skb) - skb->data >= 50) {
memset(&skb->data[skb->len], 0, 50 - skb->len);
skb->len = 50;
} else {
skb = dev_alloc_skb(50);
if (!skb)
return NETDEV_TX_BUSY;
memcpy(skb->data, org_skb->data, org_skb->len);
memset(&skb->data[org_skb->len], 0,
50 - org_skb->len);
skb->len = 50;
copy_old_skb(org_skb, skb);
}
}
}
spin_lock_irq(&hw_priv->hwlock);
num = skb_shinfo(skb)->nr_frags + 1;
left = hw_alloc_pkt(hw, skb->len, num);
if (left) {
if (left < num ||
((CHECKSUM_PARTIAL == skb->ip_summed) &&
(ETH_P_IPV6 == htons(skb->protocol)))) {
struct sk_buff *org_skb = skb;
skb = dev_alloc_skb(org_skb->len);
if (!skb) {
rc = NETDEV_TX_BUSY;
goto unlock;
}
skb_copy_and_csum_dev(org_skb, skb->data);
org_skb->ip_summed = CHECKSUM_NONE;
skb->len = org_skb->len;
copy_old_skb(org_skb, skb);
}
send_packet(skb, dev);
if (left <= num)
netif_stop_queue(dev);
} else {
/* Stop the transmit queue until packet is allocated. */
netif_stop_queue(dev);
rc = NETDEV_TX_BUSY;
}
unlock:
spin_unlock_irq(&hw_priv->hwlock);
return rc;
}
/**
* netdev_tx_timeout - transmit timeout processing
* @dev: Network device.
*
* This routine is called when the transmit timer expires. That indicates the
* hardware is not running correctly because transmit interrupts are not
* triggered to free up resources so that the transmit routine can continue
* sending out packets. The hardware is reset to correct the problem.
*/
static void netdev_tx_timeout(struct net_device *dev)
{
static unsigned long last_reset;
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
int port;
if (hw->dev_count > 1) {
/*
* Only reset the hardware if time between calls is long
* enough.
*/
if (jiffies - last_reset <= dev->watchdog_timeo)
hw_priv = NULL;
}
last_reset = jiffies;
if (hw_priv) {
hw_dis_intr(hw);
hw_disable(hw);
transmit_cleanup(hw_priv, 0);
hw_reset_pkts(&hw->rx_desc_info);
hw_reset_pkts(&hw->tx_desc_info);
ksz_init_rx_buffers(hw_priv);
hw_reset(hw);
hw_set_desc_base(hw,
hw->tx_desc_info.ring_phys,
hw->rx_desc_info.ring_phys);
hw_set_addr(hw);
if (hw->all_multi)
hw_set_multicast(hw, hw->all_multi);
else if (hw->multi_list_size)
hw_set_grp_addr(hw);
if (hw->dev_count > 1) {
hw_set_add_addr(hw);
for (port = 0; port < SWITCH_PORT_NUM; port++) {
struct net_device *port_dev;
port_set_stp_state(hw, port,
STP_STATE_DISABLED);
port_dev = hw->port_info[port].pdev;
if (netif_running(port_dev))
port_set_stp_state(hw, port,
STP_STATE_SIMPLE);
}
}
hw_enable(hw);
hw_ena_intr(hw);
}
dev->trans_start = jiffies;
netif_wake_queue(dev);
}
static inline void csum_verified(struct sk_buff *skb)
{
unsigned short protocol;
struct iphdr *iph;
protocol = skb->protocol;
skb_reset_network_header(skb);
iph = (struct iphdr *) skb_network_header(skb);
if (protocol == htons(ETH_P_8021Q)) {
protocol = iph->tot_len;
skb_set_network_header(skb, VLAN_HLEN);
iph = (struct iphdr *) skb_network_header(skb);
}
if (protocol == htons(ETH_P_IP)) {
if (iph->protocol == IPPROTO_TCP)
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
}
static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw,
struct ksz_desc *desc, union desc_stat status)
{
int packet_len;
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_dma_buf *dma_buf;
struct sk_buff *skb;
int rx_status;
/* Received length includes 4-byte CRC. */
packet_len = status.rx.frame_len - 4;
dma_buf = DMA_BUFFER(desc);
pci_dma_sync_single_for_cpu(
hw_priv->pdev, dma_buf->dma, packet_len + 4,
PCI_DMA_FROMDEVICE);
do {
/* skb->data != skb->head */
skb = dev_alloc_skb(packet_len + 2);
if (!skb) {
dev->stats.rx_dropped++;
return -ENOMEM;
}
/*
* Align socket buffer in 4-byte boundary for better
* performance.
*/
skb_reserve(skb, 2);
memcpy(skb_put(skb, packet_len),
dma_buf->skb->data, packet_len);
} while (0);
skb->protocol = eth_type_trans(skb, dev);
if (hw->rx_cfg & (DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP))
csum_verified(skb);
/* Update receive statistics. */
dev->stats.rx_packets++;
dev->stats.rx_bytes += packet_len;
/* Notify upper layer for received packet. */
rx_status = netif_rx(skb);
return 0;
}
static int dev_rcv_packets(struct dev_info *hw_priv)
{
int next;
union desc_stat status;
struct ksz_hw *hw = &hw_priv->hw;
struct net_device *dev = hw->port_info[0].pdev;
struct ksz_desc_info *info = &hw->rx_desc_info;
int left = info->alloc;
struct ksz_desc *desc;
int received = 0;
next = info->next;
while (left--) {
/* Get next descriptor which is not hardware owned. */
desc = &info->ring[next];
status.data = le32_to_cpu(desc->phw->ctrl.data);
if (status.rx.hw_owned)
break;
/* Status valid only when last descriptor bit is set. */
if (status.rx.last_desc && status.rx.first_desc) {
if (rx_proc(dev, hw, desc, status))
goto release_packet;
received++;
}
release_packet:
release_desc(desc);
next++;
next &= info->mask;
}
info->next = next;
return received;
}
static int port_rcv_packets(struct dev_info *hw_priv)
{
int next;
union desc_stat status;
struct ksz_hw *hw = &hw_priv->hw;
struct net_device *dev = hw->port_info[0].pdev;
struct ksz_desc_info *info = &hw->rx_desc_info;
int left = info->alloc;
struct ksz_desc *desc;
int received = 0;
next = info->next;
while (left--) {
/* Get next descriptor which is not hardware owned. */
desc = &info->ring[next];
status.data = le32_to_cpu(desc->phw->ctrl.data);
if (status.rx.hw_owned)
break;
if (hw->dev_count > 1) {
/* Get received port number. */
int p = HW_TO_DEV_PORT(status.rx.src_port);
dev = hw->port_info[p].pdev;
if (!netif_running(dev))
goto release_packet;
}
/* Status valid only when last descriptor bit is set. */
if (status.rx.last_desc && status.rx.first_desc) {
if (rx_proc(dev, hw, desc, status))
goto release_packet;
received++;
}
release_packet:
release_desc(desc);
next++;
next &= info->mask;
}
info->next = next;
return received;
}
static int dev_rcv_special(struct dev_info *hw_priv)
{
int next;
union desc_stat status;
struct ksz_hw *hw = &hw_priv->hw;
struct net_device *dev = hw->port_info[0].pdev;
struct ksz_desc_info *info = &hw->rx_desc_info;
int left = info->alloc;
struct ksz_desc *desc;
int received = 0;
next = info->next;
while (left--) {
/* Get next descriptor which is not hardware owned. */
desc = &info->ring[next];
status.data = le32_to_cpu(desc->phw->ctrl.data);
if (status.rx.hw_owned)
break;
if (hw->dev_count > 1) {
/* Get received port number. */
int p = HW_TO_DEV_PORT(status.rx.src_port);
dev = hw->port_info[p].pdev;
if (!netif_running(dev))
goto release_packet;
}
/* Status valid only when last descriptor bit is set. */
if (status.rx.last_desc && status.rx.first_desc) {
/*
* Receive without error. With receive errors
* disabled, packets with receive errors will be
* dropped, so no need to check the error bit.
*/
if (!status.rx.error || (status.data &
KS_DESC_RX_ERROR_COND) ==
KS_DESC_RX_ERROR_TOO_LONG) {
if (rx_proc(dev, hw, desc, status))
goto release_packet;
received++;
} else {
struct dev_priv *priv = netdev_priv(dev);
/* Update receive error statistics. */
priv->port.counter[OID_COUNTER_RCV_ERROR]++;
}
}
release_packet:
release_desc(desc);
next++;
next &= info->mask;
}
info->next = next;
return received;
}
static void rx_proc_task(unsigned long data)
{
struct dev_info *hw_priv = (struct dev_info *) data;
struct ksz_hw *hw = &hw_priv->hw;
if (!hw->enabled)
return;
if (unlikely(!hw_priv->dev_rcv(hw_priv))) {
/* In case receive process is suspended because of overrun. */
hw_resume_rx(hw);
/* tasklets are interruptible. */
spin_lock_irq(&hw_priv->hwlock);
hw_turn_on_intr(hw, KS884X_INT_RX_MASK);
spin_unlock_irq(&hw_priv->hwlock);
} else {
hw_ack_intr(hw, KS884X_INT_RX);
tasklet_schedule(&hw_priv->rx_tasklet);
}
}
static void tx_proc_task(unsigned long data)
{
struct dev_info *hw_priv = (struct dev_info *) data;
struct ksz_hw *hw = &hw_priv->hw;
hw_ack_intr(hw, KS884X_INT_TX_MASK);
tx_done(hw_priv);
/* tasklets are interruptible. */
spin_lock_irq(&hw_priv->hwlock);
hw_turn_on_intr(hw, KS884X_INT_TX);
spin_unlock_irq(&hw_priv->hwlock);
}
static inline void handle_rx_stop(struct ksz_hw *hw)
{
/* Receive just has been stopped. */
if (0 == hw->rx_stop)
hw->intr_mask &= ~KS884X_INT_RX_STOPPED;
else if (hw->rx_stop > 1) {
if (hw->enabled && (hw->rx_cfg & DMA_RX_ENABLE)) {
hw_start_rx(hw);
} else {
hw->intr_mask &= ~KS884X_INT_RX_STOPPED;
hw->rx_stop = 0;
}
} else
/* Receive just has been started. */
hw->rx_stop++;
}
/**
* netdev_intr - interrupt handling
* @irq: Interrupt number.
* @dev_id: Network device.
*
* This function is called by upper network layer to signal interrupt.
*
* Return IRQ_HANDLED if interrupt is handled.
*/
static irqreturn_t netdev_intr(int irq, void *dev_id)
{
uint int_enable = 0;
struct net_device *dev = (struct net_device *) dev_id;
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
hw_read_intr(hw, &int_enable);
/* Not our interrupt! */
if (!int_enable)
return IRQ_NONE;
do {
hw_ack_intr(hw, int_enable);
int_enable &= hw->intr_mask;
if (unlikely(int_enable & KS884X_INT_TX_MASK)) {
hw_dis_intr_bit(hw, KS884X_INT_TX_MASK);
tasklet_schedule(&hw_priv->tx_tasklet);
}
if (likely(int_enable & KS884X_INT_RX)) {
hw_dis_intr_bit(hw, KS884X_INT_RX);
tasklet_schedule(&hw_priv->rx_tasklet);
}
if (unlikely(int_enable & KS884X_INT_RX_OVERRUN)) {
dev->stats.rx_fifo_errors++;
hw_resume_rx(hw);
}
if (unlikely(int_enable & KS884X_INT_PHY)) {
struct ksz_port *port = &priv->port;
hw->features |= LINK_INT_WORKING;
port_get_link_speed(port);
}
if (unlikely(int_enable & KS884X_INT_RX_STOPPED)) {
handle_rx_stop(hw);
break;
}
if (unlikely(int_enable & KS884X_INT_TX_STOPPED)) {
u32 data;
hw->intr_mask &= ~KS884X_INT_TX_STOPPED;
pr_info("Tx stopped\n");
data = readl(hw->io + KS_DMA_TX_CTRL);
if (!(data & DMA_TX_ENABLE))
pr_info("Tx disabled\n");
break;
}
} while (0);
hw_ena_intr(hw);
return IRQ_HANDLED;
}
/*
* Linux network device functions
*/
static unsigned long next_jiffies;
#ifdef CONFIG_NET_POLL_CONTROLLER
static void netdev_netpoll(struct net_device *dev)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
hw_dis_intr(&hw_priv->hw);
netdev_intr(dev->irq, dev);
}
#endif
static void bridge_change(struct ksz_hw *hw)
{
int port;
u8 member;
struct ksz_switch *sw = hw->ksz_switch;
/* No ports in forwarding state. */
if (!sw->member) {
port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_SIMPLE);
sw_block_addr(hw);
}
for (port = 0; port < SWITCH_PORT_NUM; port++) {
if (STP_STATE_FORWARDING == sw->port_cfg[port].stp_state)
member = HOST_MASK | sw->member;
else
member = HOST_MASK | (1 << port);
if (member != sw->port_cfg[port].member)
sw_cfg_port_base_vlan(hw, port, member);
}
}
/**
* netdev_close - close network device
* @dev: Network device.
*
* This function process the close operation of network device. This is caused
* by the user command "ifconfig ethX down."
*
* Return 0 if successful; otherwise an error code indicating failure.
*/
static int netdev_close(struct net_device *dev)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_port *port = &priv->port;
struct ksz_hw *hw = &hw_priv->hw;
int pi;
netif_stop_queue(dev);
ksz_stop_timer(&priv->monitor_timer_info);
/* Need to shut the port manually in multiple device interfaces mode. */
if (hw->dev_count > 1) {
port_set_stp_state(hw, port->first_port, STP_STATE_DISABLED);
/* Port is closed. Need to change bridge setting. */
if (hw->features & STP_SUPPORT) {
pi = 1 << port->first_port;
if (hw->ksz_switch->member & pi) {
hw->ksz_switch->member &= ~pi;
bridge_change(hw);
}
}
}
if (port->first_port > 0)
hw_del_addr(hw, dev->dev_addr);
if (!hw_priv->wol_enable)
port_set_power_saving(port, true);
if (priv->multicast)
--hw->all_multi;
if (priv->promiscuous)
--hw->promiscuous;
hw_priv->opened--;
if (!(hw_priv->opened)) {
ksz_stop_timer(&hw_priv->mib_timer_info);
flush_work(&hw_priv->mib_read);
hw_dis_intr(hw);
hw_disable(hw);
hw_clr_multicast(hw);
/* Delay for receive task to stop scheduling itself. */
msleep(2000 / HZ);
tasklet_disable(&hw_priv->rx_tasklet);
tasklet_disable(&hw_priv->tx_tasklet);
free_irq(dev->irq, hw_priv->dev);
transmit_cleanup(hw_priv, 0);
hw_reset_pkts(&hw->rx_desc_info);
hw_reset_pkts(&hw->tx_desc_info);
/* Clean out static MAC table when the switch is shutdown. */
if (hw->features & STP_SUPPORT)
sw_clr_sta_mac_table(hw);
}
return 0;
}
static void hw_cfg_huge_frame(struct dev_info *hw_priv, struct ksz_hw *hw)
{
if (hw->ksz_switch) {
u32 data;
data = readw(hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
if (hw->features & RX_HUGE_FRAME)
data |= SWITCH_HUGE_PACKET;
else
data &= ~SWITCH_HUGE_PACKET;
writew(data, hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
}
if (hw->features & RX_HUGE_FRAME) {
hw->rx_cfg |= DMA_RX_ERROR;
hw_priv->dev_rcv = dev_rcv_special;
} else {
hw->rx_cfg &= ~DMA_RX_ERROR;
if (hw->dev_count > 1)
hw_priv->dev_rcv = port_rcv_packets;
else
hw_priv->dev_rcv = dev_rcv_packets;
}
}
static int prepare_hardware(struct net_device *dev)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
int rc = 0;
/* Remember the network device that requests interrupts. */
hw_priv->dev = dev;
rc = request_irq(dev->irq, netdev_intr, IRQF_SHARED, dev->name, dev);
if (rc)
return rc;
tasklet_enable(&hw_priv->rx_tasklet);
tasklet_enable(&hw_priv->tx_tasklet);
hw->promiscuous = 0;
hw->all_multi = 0;
hw->multi_list_size = 0;
hw_reset(hw);
hw_set_desc_base(hw,
hw->tx_desc_info.ring_phys, hw->rx_desc_info.ring_phys);
hw_set_addr(hw);
hw_cfg_huge_frame(hw_priv, hw);
ksz_init_rx_buffers(hw_priv);
return 0;
}
static void set_media_state(struct net_device *dev, int media_state)
{
struct dev_priv *priv = netdev_priv(dev);
if (media_state == priv->media_state)
netif_carrier_on(dev);
else
netif_carrier_off(dev);
netif_info(priv, link, dev, "link %s\n",
media_state == priv->media_state ? "on" : "off");
}
/**
* netdev_open - open network device
* @dev: Network device.
*
* This function process the open operation of network device. This is caused
* by the user command "ifconfig ethX up."
*
* Return 0 if successful; otherwise an error code indicating failure.
*/
static int netdev_open(struct net_device *dev)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
struct ksz_port *port = &priv->port;
int i;
int p;
int rc = 0;
priv->multicast = 0;
priv->promiscuous = 0;
/* Reset device statistics. */
memset(&dev->stats, 0, sizeof(struct net_device_stats));
memset((void *) port->counter, 0,
(sizeof(u64) * OID_COUNTER_LAST));
if (!(hw_priv->opened)) {
rc = prepare_hardware(dev);
if (rc)
return rc;
for (i = 0; i < hw->mib_port_cnt; i++) {
if (next_jiffies < jiffies)
next_jiffies = jiffies + HZ * 2;
else
next_jiffies += HZ * 1;
hw_priv->counter[i].time = next_jiffies;
hw->port_mib[i].state = media_disconnected;
port_init_cnt(hw, i);
}
if (hw->ksz_switch)
hw->port_mib[HOST_PORT].state = media_connected;
else {
hw_add_wol_bcast(hw);
hw_cfg_wol_pme(hw, 0);
hw_clr_wol_pme_status(&hw_priv->hw);
}
}
port_set_power_saving(port, false);
for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
/*
* Initialize to invalid value so that link detection
* is done.
*/
hw->port_info[p].partner = 0xFF;
hw->port_info[p].state = media_disconnected;
}
/* Need to open the port in multiple device interfaces mode. */
if (hw->dev_count > 1) {
port_set_stp_state(hw, port->first_port, STP_STATE_SIMPLE);
if (port->first_port > 0)
hw_add_addr(hw, dev->dev_addr);
}
port_get_link_speed(port);
if (port->force_link)
port_force_link_speed(port);
else
port_set_link_speed(port);
if (!(hw_priv->opened)) {
hw_setup_intr(hw);
hw_enable(hw);
hw_ena_intr(hw);
if (hw->mib_port_cnt)
ksz_start_timer(&hw_priv->mib_timer_info,
hw_priv->mib_timer_info.period);
}
hw_priv->opened++;
ksz_start_timer(&priv->monitor_timer_info,
priv->monitor_timer_info.period);
priv->media_state = port->linked->state;
set_media_state(dev, media_connected);
netif_start_queue(dev);
return 0;
}
/* RX errors = rx_errors */
/* RX dropped = rx_dropped */
/* RX overruns = rx_fifo_errors */
/* RX frame = rx_crc_errors + rx_frame_errors + rx_length_errors */
/* TX errors = tx_errors */
/* TX dropped = tx_dropped */
/* TX overruns = tx_fifo_errors */
/* TX carrier = tx_aborted_errors + tx_carrier_errors + tx_window_errors */
/* collisions = collisions */
/**
* netdev_query_statistics - query network device statistics
* @dev: Network device.
*
* This function returns the statistics of the network device. The device
* needs not be opened.
*
* Return network device statistics.
*/
static struct net_device_stats *netdev_query_statistics(struct net_device *dev)
{
struct dev_priv *priv = netdev_priv(dev);
struct ksz_port *port = &priv->port;
struct ksz_hw *hw = &priv->adapter->hw;
struct ksz_port_mib *mib;
int i;
int p;
dev->stats.rx_errors = port->counter[OID_COUNTER_RCV_ERROR];
dev->stats.tx_errors = port->counter[OID_COUNTER_XMIT_ERROR];
/* Reset to zero to add count later. */
dev->stats.multicast = 0;
dev->stats.collisions = 0;
dev->stats.rx_length_errors = 0;
dev->stats.rx_crc_errors = 0;
dev->stats.rx_frame_errors = 0;
dev->stats.tx_window_errors = 0;
for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
mib = &hw->port_mib[p];
dev->stats.multicast += (unsigned long)
mib->counter[MIB_COUNTER_RX_MULTICAST];
dev->stats.collisions += (unsigned long)
mib->counter[MIB_COUNTER_TX_TOTAL_COLLISION];
dev->stats.rx_length_errors += (unsigned long)(
mib->counter[MIB_COUNTER_RX_UNDERSIZE] +
mib->counter[MIB_COUNTER_RX_FRAGMENT] +
mib->counter[MIB_COUNTER_RX_OVERSIZE] +
mib->counter[MIB_COUNTER_RX_JABBER]);
dev->stats.rx_crc_errors += (unsigned long)
mib->counter[MIB_COUNTER_RX_CRC_ERR];
dev->stats.rx_frame_errors += (unsigned long)(
mib->counter[MIB_COUNTER_RX_ALIGNMENT_ERR] +
mib->counter[MIB_COUNTER_RX_SYMBOL_ERR]);
dev->stats.tx_window_errors += (unsigned long)
mib->counter[MIB_COUNTER_TX_LATE_COLLISION];
}
return &dev->stats;
}
/**
* netdev_set_mac_address - set network device MAC address
* @dev: Network device.
* @addr: Buffer of MAC address.
*
* This function is used to set the MAC address of the network device.
*
* Return 0 to indicate success.
*/
static int netdev_set_mac_address(struct net_device *dev, void *addr)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
struct sockaddr *mac = addr;
uint interrupt;
if (priv->port.first_port > 0)
hw_del_addr(hw, dev->dev_addr);
else {
hw->mac_override = 1;
memcpy(hw->override_addr, mac->sa_data, MAC_ADDR_LEN);
}
memcpy(dev->dev_addr, mac->sa_data, MAX_ADDR_LEN);
interrupt = hw_block_intr(hw);
if (priv->port.first_port > 0)
hw_add_addr(hw, dev->dev_addr);
else
hw_set_addr(hw);
hw_restore_intr(hw, interrupt);
return 0;
}
static void dev_set_promiscuous(struct net_device *dev, struct dev_priv *priv,
struct ksz_hw *hw, int promiscuous)
{
if (promiscuous != priv->promiscuous) {
u8 prev_state = hw->promiscuous;
if (promiscuous)
++hw->promiscuous;
else
--hw->promiscuous;
priv->promiscuous = promiscuous;
/* Turn on/off promiscuous mode. */
if (hw->promiscuous <= 1 && prev_state <= 1)
hw_set_promiscuous(hw, hw->promiscuous);
/*
* Port is not in promiscuous mode, meaning it is released
* from the bridge.
*/
if ((hw->features & STP_SUPPORT) && !promiscuous &&
(dev->priv_flags & IFF_BRIDGE_PORT)) {
struct ksz_switch *sw = hw->ksz_switch;
int port = priv->port.first_port;
port_set_stp_state(hw, port, STP_STATE_DISABLED);
port = 1 << port;
if (sw->member & port) {
sw->member &= ~port;
bridge_change(hw);
}
}
}
}
static void dev_set_multicast(struct dev_priv *priv, struct ksz_hw *hw,
int multicast)
{
if (multicast != priv->multicast) {
u8 all_multi = hw->all_multi;
if (multicast)
++hw->all_multi;
else
--hw->all_multi;
priv->multicast = multicast;
/* Turn on/off all multicast mode. */
if (hw->all_multi <= 1 && all_multi <= 1)
hw_set_multicast(hw, hw->all_multi);
}
}
/**
* netdev_set_rx_mode
* @dev: Network device.
*
* This routine is used to set multicast addresses or put the network device
* into promiscuous mode.
*/
static void netdev_set_rx_mode(struct net_device *dev)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
struct netdev_hw_addr *ha;
int multicast = (dev->flags & IFF_ALLMULTI);
dev_set_promiscuous(dev, priv, hw, (dev->flags & IFF_PROMISC));
if (hw_priv->hw.dev_count > 1)
multicast |= (dev->flags & IFF_MULTICAST);
dev_set_multicast(priv, hw, multicast);
/* Cannot use different hashes in multiple device interfaces mode. */
if (hw_priv->hw.dev_count > 1)
return;
if ((dev->flags & IFF_MULTICAST) && !netdev_mc_empty(dev)) {
int i = 0;
/* List too big to support so turn on all multicast mode. */
if (netdev_mc_count(dev) > MAX_MULTICAST_LIST) {
if (MAX_MULTICAST_LIST != hw->multi_list_size) {
hw->multi_list_size = MAX_MULTICAST_LIST;
++hw->all_multi;
hw_set_multicast(hw, hw->all_multi);
}
return;
}
netdev_for_each_mc_addr(ha, dev) {
if (i >= MAX_MULTICAST_LIST)
break;
memcpy(hw->multi_list[i++], ha->addr, MAC_ADDR_LEN);
}
hw->multi_list_size = (u8) i;
hw_set_grp_addr(hw);
} else {
if (MAX_MULTICAST_LIST == hw->multi_list_size) {
--hw->all_multi;
hw_set_multicast(hw, hw->all_multi);
}
hw->multi_list_size = 0;
hw_clr_multicast(hw);
}
}
static int netdev_change_mtu(struct net_device *dev, int new_mtu)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
int hw_mtu;
if (netif_running(dev))
return -EBUSY;
/* Cannot use different MTU in multiple device interfaces mode. */
if (hw->dev_count > 1)
if (dev != hw_priv->dev)
return 0;
if (new_mtu < 60)
return -EINVAL;
if (dev->mtu != new_mtu) {
hw_mtu = new_mtu + ETHERNET_HEADER_SIZE + 4;
if (hw_mtu > MAX_RX_BUF_SIZE)
return -EINVAL;
if (hw_mtu > REGULAR_RX_BUF_SIZE) {
hw->features |= RX_HUGE_FRAME;
hw_mtu = MAX_RX_BUF_SIZE;
} else {
hw->features &= ~RX_HUGE_FRAME;
hw_mtu = REGULAR_RX_BUF_SIZE;
}
hw_mtu = (hw_mtu + 3) & ~3;
hw_priv->mtu = hw_mtu;
dev->mtu = new_mtu;
}
return 0;
}
/**
* netdev_ioctl - I/O control processing
* @dev: Network device.
* @ifr: Interface request structure.
* @cmd: I/O control code.
*
* This function is used to process I/O control calls.
*
* Return 0 to indicate success.
*/
static int netdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
struct ksz_port *port = &priv->port;
int rc;
int result = 0;
struct mii_ioctl_data *data = if_mii(ifr);
if (down_interruptible(&priv->proc_sem))
return -ERESTARTSYS;
/* assume success */
rc = 0;
switch (cmd) {
/* Get address of MII PHY in use. */
case SIOCGMIIPHY:
data->phy_id = priv->id;
/* Fallthrough... */
/* Read MII PHY register. */
case SIOCGMIIREG:
if (data->phy_id != priv->id || data->reg_num >= 6)
result = -EIO;
else
hw_r_phy(hw, port->linked->port_id, data->reg_num,
&data->val_out);
break;
/* Write MII PHY register. */
case SIOCSMIIREG:
if (!capable(CAP_NET_ADMIN))
result = -EPERM;
else if (data->phy_id != priv->id || data->reg_num >= 6)
result = -EIO;
else
hw_w_phy(hw, port->linked->port_id, data->reg_num,
data->val_in);
break;
default:
result = -EOPNOTSUPP;
}
up(&priv->proc_sem);
return result;
}
/*
* MII support
*/
/**
* mdio_read - read PHY register
* @dev: Network device.
* @phy_id: The PHY id.
* @reg_num: The register number.
*
* This function returns the PHY register value.
*
* Return the register value.
*/
static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
{
struct dev_priv *priv = netdev_priv(dev);
struct ksz_port *port = &priv->port;
struct ksz_hw *hw = port->hw;
u16 val_out;
hw_r_phy(hw, port->linked->port_id, reg_num << 1, &val_out);
return val_out;
}
/**
* mdio_write - set PHY register
* @dev: Network device.
* @phy_id: The PHY id.
* @reg_num: The register number.
* @val: The register value.
*
* This procedure sets the PHY register value.
*/
static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
{
struct dev_priv *priv = netdev_priv(dev);
struct ksz_port *port = &priv->port;
struct ksz_hw *hw = port->hw;
int i;
int pi;
for (i = 0, pi = port->first_port; i < port->port_cnt; i++, pi++)
hw_w_phy(hw, pi, reg_num << 1, val);
}
/*
* ethtool support
*/
#define EEPROM_SIZE 0x40
static u16 eeprom_data[EEPROM_SIZE] = { 0 };
#define ADVERTISED_ALL \
(ADVERTISED_10baseT_Half | \
ADVERTISED_10baseT_Full | \
ADVERTISED_100baseT_Half | \
ADVERTISED_100baseT_Full)
/* These functions use the MII functions in mii.c. */
/**
* netdev_get_settings - get network device settings
* @dev: Network device.
* @cmd: Ethtool command.
*
* This function queries the PHY and returns its state in the ethtool command.
*
* Return 0 if successful; otherwise an error code.
*/
static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
mutex_lock(&hw_priv->lock);
mii_ethtool_gset(&priv->mii_if, cmd);
cmd->advertising |= SUPPORTED_TP;
mutex_unlock(&hw_priv->lock);
/* Save advertised settings for workaround in next function. */
priv->advertising = cmd->advertising;
return 0;
}
/**
* netdev_set_settings - set network device settings
* @dev: Network device.
* @cmd: Ethtool command.
*
* This function sets the PHY according to the ethtool command.
*
* Return 0 if successful; otherwise an error code.
*/
static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_port *port = &priv->port;
u32 speed = ethtool_cmd_speed(cmd);
int rc;
/*
* ethtool utility does not change advertised setting if auto
* negotiation is not specified explicitly.
*/
if (cmd->autoneg && priv->advertising == cmd->advertising) {
cmd->advertising |= ADVERTISED_ALL;
if (10 == speed)
cmd->advertising &=
~(ADVERTISED_100baseT_Full |
ADVERTISED_100baseT_Half);
else if (100 == speed)
cmd->advertising &=
~(ADVERTISED_10baseT_Full |
ADVERTISED_10baseT_Half);
if (0 == cmd->duplex)
cmd->advertising &=
~(ADVERTISED_100baseT_Full |
ADVERTISED_10baseT_Full);
else if (1 == cmd->duplex)
cmd->advertising &=
~(ADVERTISED_100baseT_Half |
ADVERTISED_10baseT_Half);
}
mutex_lock(&hw_priv->lock);
if (cmd->autoneg &&
(cmd->advertising & ADVERTISED_ALL) ==
ADVERTISED_ALL) {
port->duplex = 0;
port->speed = 0;
port->force_link = 0;
} else {
port->duplex = cmd->duplex + 1;
if (1000 != speed)
port->speed = speed;
if (cmd->autoneg)
port->force_link = 0;
else
port->force_link = 1;
}
rc = mii_ethtool_sset(&priv->mii_if, cmd);
mutex_unlock(&hw_priv->lock);
return rc;
}
/**
* netdev_nway_reset - restart auto-negotiation
* @dev: Network device.
*
* This function restarts the PHY for auto-negotiation.
*
* Return 0 if successful; otherwise an error code.
*/
static int netdev_nway_reset(struct net_device *dev)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
int rc;
mutex_lock(&hw_priv->lock);
rc = mii_nway_restart(&priv->mii_if);
mutex_unlock(&hw_priv->lock);
return rc;
}
/**
* netdev_get_link - get network device link status
* @dev: Network device.
*
* This function gets the link status from the PHY.
*
* Return true if PHY is linked and false otherwise.
*/
static u32 netdev_get_link(struct net_device *dev)
{
struct dev_priv *priv = netdev_priv(dev);
int rc;
rc = mii_link_ok(&priv->mii_if);
return rc;
}
/**
* netdev_get_drvinfo - get network driver information
* @dev: Network device.
* @info: Ethtool driver info data structure.
*
* This procedure returns the driver information.
*/
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
strcpy(info->driver, DRV_NAME);
strcpy(info->version, DRV_VERSION);
strcpy(info->bus_info, pci_name(hw_priv->pdev));
}
/**
* netdev_get_regs_len - get length of register dump
* @dev: Network device.
*
* This function returns the length of the register dump.
*
* Return length of the register dump.
*/
static struct hw_regs {
int start;
int end;
} hw_regs_range[] = {
{ KS_DMA_TX_CTRL, KS884X_INTERRUPTS_STATUS },
{ KS_ADD_ADDR_0_LO, KS_ADD_ADDR_F_HI },
{ KS884X_ADDR_0_OFFSET, KS8841_WOL_FRAME_BYTE2_OFFSET },
{ KS884X_SIDER_P, KS8842_SGCR7_P },
{ KS8842_MACAR1_P, KS8842_TOSR8_P },
{ KS884X_P1MBCR_P, KS8842_P3ERCR_P },
{ 0, 0 }
};
static int netdev_get_regs_len(struct net_device *dev)
{
struct hw_regs *range = hw_regs_range;
int regs_len = 0x10 * sizeof(u32);
while (range->end > range->start) {
regs_len += (range->end - range->start + 3) / 4 * 4;
range++;
}
return regs_len;
}
/**
* netdev_get_regs - get register dump
* @dev: Network device.
* @regs: Ethtool registers data structure.
* @ptr: Buffer to store the register values.
*
* This procedure dumps the register values in the provided buffer.
*/
static void netdev_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *ptr)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
int *buf = (int *) ptr;
struct hw_regs *range = hw_regs_range;
int len;
mutex_lock(&hw_priv->lock);
regs->version = 0;
for (len = 0; len < 0x40; len += 4) {
pci_read_config_dword(hw_priv->pdev, len, buf);
buf++;
}
while (range->end > range->start) {
for (len = range->start; len < range->end; len += 4) {
*buf = readl(hw->io + len);
buf++;
}
range++;
}
mutex_unlock(&hw_priv->lock);
}
#define WOL_SUPPORT \
(WAKE_PHY | WAKE_MAGIC | \
WAKE_UCAST | WAKE_MCAST | \
WAKE_BCAST | WAKE_ARP)
/**
* netdev_get_wol - get Wake-on-LAN support
* @dev: Network device.
* @wol: Ethtool Wake-on-LAN data structure.
*
* This procedure returns Wake-on-LAN support.
*/
static void netdev_get_wol(struct net_device *dev,
struct ethtool_wolinfo *wol)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
wol->supported = hw_priv->wol_support;
wol->wolopts = hw_priv->wol_enable;
memset(&wol->sopass, 0, sizeof(wol->sopass));
}
/**
* netdev_set_wol - set Wake-on-LAN support
* @dev: Network device.
* @wol: Ethtool Wake-on-LAN data structure.
*
* This function sets Wake-on-LAN support.
*
* Return 0 if successful; otherwise an error code.
*/
static int netdev_set_wol(struct net_device *dev,
struct ethtool_wolinfo *wol)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
/* Need to find a way to retrieve the device IP address. */
static const u8 net_addr[] = { 192, 168, 1, 1 };
if (wol->wolopts & ~hw_priv->wol_support)
return -EINVAL;
hw_priv->wol_enable = wol->wolopts;
/* Link wakeup cannot really be disabled. */
if (wol->wolopts)
hw_priv->wol_enable |= WAKE_PHY;
hw_enable_wol(&hw_priv->hw, hw_priv->wol_enable, net_addr);
return 0;
}
/**
* netdev_get_msglevel - get debug message level
* @dev: Network device.
*
* This function returns current debug message level.
*
* Return current debug message flags.
*/
static u32 netdev_get_msglevel(struct net_device *dev)
{
struct dev_priv *priv = netdev_priv(dev);
return priv->msg_enable;
}
/**
* netdev_set_msglevel - set debug message level
* @dev: Network device.
* @value: Debug message flags.
*
* This procedure sets debug message level.
*/
static void netdev_set_msglevel(struct net_device *dev, u32 value)
{
struct dev_priv *priv = netdev_priv(dev);
priv->msg_enable = value;
}
/**
* netdev_get_eeprom_len - get EEPROM length
* @dev: Network device.
*
* This function returns the length of the EEPROM.
*
* Return length of the EEPROM.
*/
static int netdev_get_eeprom_len(struct net_device *dev)
{
return EEPROM_SIZE * 2;
}
/**
* netdev_get_eeprom - get EEPROM data
* @dev: Network device.
* @eeprom: Ethtool EEPROM data structure.
* @data: Buffer to store the EEPROM data.
*
* This function dumps the EEPROM data in the provided buffer.
*
* Return 0 if successful; otherwise an error code.
*/
#define EEPROM_MAGIC 0x10A18842
static int netdev_get_eeprom(struct net_device *dev,
struct ethtool_eeprom *eeprom, u8 *data)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
u8 *eeprom_byte = (u8 *) eeprom_data;
int i;
int len;
len = (eeprom->offset + eeprom->len + 1) / 2;
for (i = eeprom->offset / 2; i < len; i++)
eeprom_data[i] = eeprom_read(&hw_priv->hw, i);
eeprom->magic = EEPROM_MAGIC;
memcpy(data, &eeprom_byte[eeprom->offset], eeprom->len);
return 0;
}
/**
* netdev_set_eeprom - write EEPROM data
* @dev: Network device.
* @eeprom: Ethtool EEPROM data structure.
* @data: Data buffer.
*
* This function modifies the EEPROM data one byte at a time.
*
* Return 0 if successful; otherwise an error code.
*/
static int netdev_set_eeprom(struct net_device *dev,
struct ethtool_eeprom *eeprom, u8 *data)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
u16 eeprom_word[EEPROM_SIZE];
u8 *eeprom_byte = (u8 *) eeprom_word;
int i;
int len;
if (eeprom->magic != EEPROM_MAGIC)
return -EINVAL;
len = (eeprom->offset + eeprom->len + 1) / 2;
for (i = eeprom->offset / 2; i < len; i++)
eeprom_data[i] = eeprom_read(&hw_priv->hw, i);
memcpy(eeprom_word, eeprom_data, EEPROM_SIZE * 2);
memcpy(&eeprom_byte[eeprom->offset], data, eeprom->len);
for (i = 0; i < EEPROM_SIZE; i++)
if (eeprom_word[i] != eeprom_data[i]) {
eeprom_data[i] = eeprom_word[i];
eeprom_write(&hw_priv->hw, i, eeprom_data[i]);
}
return 0;
}
/**
* netdev_get_pauseparam - get flow control parameters
* @dev: Network device.
* @pause: Ethtool PAUSE settings data structure.
*
* This procedure returns the PAUSE control flow settings.
*/
static void netdev_get_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *pause)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
pause->autoneg = (hw->overrides & PAUSE_FLOW_CTRL) ? 0 : 1;
if (!hw->ksz_switch) {
pause->rx_pause =
(hw->rx_cfg & DMA_RX_FLOW_ENABLE) ? 1 : 0;
pause->tx_pause =
(hw->tx_cfg & DMA_TX_FLOW_ENABLE) ? 1 : 0;
} else {
pause->rx_pause =
(sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
SWITCH_RX_FLOW_CTRL)) ? 1 : 0;
pause->tx_pause =
(sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
SWITCH_TX_FLOW_CTRL)) ? 1 : 0;
}
}
/**
* netdev_set_pauseparam - set flow control parameters
* @dev: Network device.
* @pause: Ethtool PAUSE settings data structure.
*
* This function sets the PAUSE control flow settings.
* Not implemented yet.
*
* Return 0 if successful; otherwise an error code.
*/
static int netdev_set_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *pause)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
struct ksz_port *port = &priv->port;
mutex_lock(&hw_priv->lock);
if (pause->autoneg) {
if (!pause->rx_pause && !pause->tx_pause)
port->flow_ctrl = PHY_NO_FLOW_CTRL;
else
port->flow_ctrl = PHY_FLOW_CTRL;
hw->overrides &= ~PAUSE_FLOW_CTRL;
port->force_link = 0;
if (hw->ksz_switch) {
sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
SWITCH_RX_FLOW_CTRL, 1);
sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
SWITCH_TX_FLOW_CTRL, 1);
}
port_set_link_speed(port);
} else {
hw->overrides |= PAUSE_FLOW_CTRL;
if (hw->ksz_switch) {
sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
SWITCH_RX_FLOW_CTRL, pause->rx_pause);
sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
SWITCH_TX_FLOW_CTRL, pause->tx_pause);
} else
set_flow_ctrl(hw, pause->rx_pause, pause->tx_pause);
}
mutex_unlock(&hw_priv->lock);
return 0;
}
/**
* netdev_get_ringparam - get tx/rx ring parameters
* @dev: Network device.
* @pause: Ethtool RING settings data structure.
*
* This procedure returns the TX/RX ring settings.
*/
static void netdev_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *ring)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
ring->tx_max_pending = (1 << 9);
ring->tx_pending = hw->tx_desc_info.alloc;
ring->rx_max_pending = (1 << 9);
ring->rx_pending = hw->rx_desc_info.alloc;
}
#define STATS_LEN (TOTAL_PORT_COUNTER_NUM)
static struct {
char string[ETH_GSTRING_LEN];
} ethtool_stats_keys[STATS_LEN] = {
{ "rx_lo_priority_octets" },
{ "rx_hi_priority_octets" },
{ "rx_undersize_packets" },
{ "rx_fragments" },
{ "rx_oversize_packets" },
{ "rx_jabbers" },
{ "rx_symbol_errors" },
{ "rx_crc_errors" },
{ "rx_align_errors" },
{ "rx_mac_ctrl_packets" },
{ "rx_pause_packets" },
{ "rx_bcast_packets" },
{ "rx_mcast_packets" },
{ "rx_ucast_packets" },
{ "rx_64_or_less_octet_packets" },
{ "rx_65_to_127_octet_packets" },
{ "rx_128_to_255_octet_packets" },
{ "rx_256_to_511_octet_packets" },
{ "rx_512_to_1023_octet_packets" },
{ "rx_1024_to_1522_octet_packets" },
{ "tx_lo_priority_octets" },
{ "tx_hi_priority_octets" },
{ "tx_late_collisions" },
{ "tx_pause_packets" },
{ "tx_bcast_packets" },
{ "tx_mcast_packets" },
{ "tx_ucast_packets" },
{ "tx_deferred" },
{ "tx_total_collisions" },
{ "tx_excessive_collisions" },
{ "tx_single_collisions" },
{ "tx_mult_collisions" },
{ "rx_discards" },
{ "tx_discards" },
};
/**
* netdev_get_strings - get statistics identity strings
* @dev: Network device.
* @stringset: String set identifier.
* @buf: Buffer to store the strings.
*
* This procedure returns the strings used to identify the statistics.
*/
static void netdev_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
if (ETH_SS_STATS == stringset)
memcpy(buf, ðtool_stats_keys,
ETH_GSTRING_LEN * hw->mib_cnt);
}
/**
* netdev_get_sset_count - get statistics size
* @dev: Network device.
* @sset: The statistics set number.
*
* This function returns the size of the statistics to be reported.
*
* Return size of the statistics to be reported.
*/
static int netdev_get_sset_count(struct net_device *dev, int sset)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
switch (sset) {
case ETH_SS_STATS:
return hw->mib_cnt;
default:
return -EOPNOTSUPP;
}
}
/**
* netdev_get_ethtool_stats - get network device statistics
* @dev: Network device.
* @stats: Ethtool statistics data structure.
* @data: Buffer to store the statistics.
*
* This procedure returns the statistics.
*/
static void netdev_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
struct ksz_port *port = &priv->port;
int n_stats = stats->n_stats;
int i;
int n;
int p;
int rc;
u64 counter[TOTAL_PORT_COUNTER_NUM];
mutex_lock(&hw_priv->lock);
n = SWITCH_PORT_NUM;
for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
if (media_connected == hw->port_mib[p].state) {
hw_priv->counter[p].read = 1;
/* Remember first port that requests read. */
if (n == SWITCH_PORT_NUM)
n = p;
}
}
mutex_unlock(&hw_priv->lock);
if (n < SWITCH_PORT_NUM)
schedule_work(&hw_priv->mib_read);
if (1 == port->mib_port_cnt && n < SWITCH_PORT_NUM) {
p = n;
rc = wait_event_interruptible_timeout(
hw_priv->counter[p].counter,
2 == hw_priv->counter[p].read,
HZ * 1);
} else
for (i = 0, p = n; i < port->mib_port_cnt - n; i++, p++) {
if (0 == i) {
rc = wait_event_interruptible_timeout(
hw_priv->counter[p].counter,
2 == hw_priv->counter[p].read,
HZ * 2);
} else if (hw->port_mib[p].cnt_ptr) {
rc = wait_event_interruptible_timeout(
hw_priv->counter[p].counter,
2 == hw_priv->counter[p].read,
HZ * 1);
}
}
get_mib_counters(hw, port->first_port, port->mib_port_cnt, counter);
n = hw->mib_cnt;
if (n > n_stats)
n = n_stats;
n_stats -= n;
for (i = 0; i < n; i++)
*data++ = counter[i];
}
/**
* netdev_set_features - set receive checksum support
* @dev: Network device.
* @features: New device features (offloads).
*
* This function sets receive checksum support setting.
*
* Return 0 if successful; otherwise an error code.
*/
static int netdev_set_features(struct net_device *dev, u32 features)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
mutex_lock(&hw_priv->lock);
/* see note in hw_setup() */
if (features & NETIF_F_RXCSUM)
hw->rx_cfg |= DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP;
else
hw->rx_cfg &= ~(DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP);
if (hw->enabled)
writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
mutex_unlock(&hw_priv->lock);
return 0;
}
static struct ethtool_ops netdev_ethtool_ops = {
.get_settings = netdev_get_settings,
.set_settings = netdev_set_settings,
.nway_reset = netdev_nway_reset,
.get_link = netdev_get_link,
.get_drvinfo = netdev_get_drvinfo,
.get_regs_len = netdev_get_regs_len,
.get_regs = netdev_get_regs,
.get_wol = netdev_get_wol,
.set_wol = netdev_set_wol,
.get_msglevel = netdev_get_msglevel,
.set_msglevel = netdev_set_msglevel,
.get_eeprom_len = netdev_get_eeprom_len,
.get_eeprom = netdev_get_eeprom,
.set_eeprom = netdev_set_eeprom,
.get_pauseparam = netdev_get_pauseparam,
.set_pauseparam = netdev_set_pauseparam,
.get_ringparam = netdev_get_ringparam,
.get_strings = netdev_get_strings,
.get_sset_count = netdev_get_sset_count,
.get_ethtool_stats = netdev_get_ethtool_stats,
};
/*
* Hardware monitoring
*/
static void update_link(struct net_device *dev, struct dev_priv *priv,
struct ksz_port *port)
{
if (priv->media_state != port->linked->state) {
priv->media_state = port->linked->state;
if (netif_running(dev))
set_media_state(dev, media_connected);
}
}
static void mib_read_work(struct work_struct *work)
{
struct dev_info *hw_priv =
container_of(work, struct dev_info, mib_read);
struct ksz_hw *hw = &hw_priv->hw;
struct ksz_port_mib *mib;
int i;
next_jiffies = jiffies;
for (i = 0; i < hw->mib_port_cnt; i++) {
mib = &hw->port_mib[i];
/* Reading MIB counters or requested to read. */
if (mib->cnt_ptr || 1 == hw_priv->counter[i].read) {
/* Need to process receive interrupt. */
if (port_r_cnt(hw, i))
break;
hw_priv->counter[i].read = 0;
/* Finish reading counters. */
if (0 == mib->cnt_ptr) {
hw_priv->counter[i].read = 2;
wake_up_interruptible(
&hw_priv->counter[i].counter);
}
} else if (jiffies >= hw_priv->counter[i].time) {
/* Only read MIB counters when the port is connected. */
if (media_connected == mib->state)
hw_priv->counter[i].read = 1;
next_jiffies += HZ * 1 * hw->mib_port_cnt;
hw_priv->counter[i].time = next_jiffies;
/* Port is just disconnected. */
} else if (mib->link_down) {
mib->link_down = 0;
/* Read counters one last time after link is lost. */
hw_priv->counter[i].read = 1;
}
}
}
static void mib_monitor(unsigned long ptr)
{
struct dev_info *hw_priv = (struct dev_info *) ptr;
mib_read_work(&hw_priv->mib_read);
/* This is used to verify Wake-on-LAN is working. */
if (hw_priv->pme_wait) {
if (hw_priv->pme_wait <= jiffies) {
hw_clr_wol_pme_status(&hw_priv->hw);
hw_priv->pme_wait = 0;
}
} else if (hw_chk_wol_pme_status(&hw_priv->hw)) {
/* PME is asserted. Wait 2 seconds to clear it. */
hw_priv->pme_wait = jiffies + HZ * 2;
}
ksz_update_timer(&hw_priv->mib_timer_info);
}
/**
* dev_monitor - periodic monitoring
* @ptr: Network device pointer.
*
* This routine is run in a kernel timer to monitor the network device.
*/
static void dev_monitor(unsigned long ptr)
{
struct net_device *dev = (struct net_device *) ptr;
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
struct ksz_port *port = &priv->port;
if (!(hw->features & LINK_INT_WORKING))
port_get_link_speed(port);
update_link(dev, priv, port);
ksz_update_timer(&priv->monitor_timer_info);
}
/*
* Linux network device interface functions
*/
/* Driver exported variables */
static int msg_enable;
static char *macaddr = ":";
static char *mac1addr = ":";
/*
* This enables multiple network device mode for KSZ8842, which contains a
* switch with two physical ports. Some users like to take control of the
* ports for running Spanning Tree Protocol. The driver will create an
* additional eth? device for the other port.
*
* Some limitations are the network devices cannot have different MTU and
* multicast hash tables.
*/
static int multi_dev;
/*
* As most users select multiple network device mode to use Spanning Tree
* Protocol, this enables a feature in which most unicast and multicast packets
* are forwarded inside the switch and not passed to the host. Only packets
* that need the host's attention are passed to it. This prevents the host
* wasting CPU time to examine each and every incoming packets and do the
* forwarding itself.
*
* As the hack requires the private bridge header, the driver cannot compile
* with just the kernel headers.
*
* Enabling STP support also turns on multiple network device mode.
*/
static int stp;
/*
* This enables fast aging in the KSZ8842 switch. Not sure what situation
* needs that. However, fast aging is used to flush the dynamic MAC table when
* STP suport is enabled.
*/
static int fast_aging;
/**
* netdev_init - initialize network device.
* @dev: Network device.
*
* This function initializes the network device.
*
* Return 0 if successful; otherwise an error code indicating failure.
*/
static int __init netdev_init(struct net_device *dev)
{
struct dev_priv *priv = netdev_priv(dev);
/* 500 ms timeout */
ksz_init_timer(&priv->monitor_timer_info, 500 * HZ / 1000,
dev_monitor, dev);
/* 500 ms timeout */
dev->watchdog_timeo = HZ / 2;
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_RXCSUM;
/*
* Hardware does not really support IPv6 checksum generation, but
* driver actually runs faster with this on.
*/
dev->hw_features |= NETIF_F_IPV6_CSUM;
dev->features |= dev->hw_features;
sema_init(&priv->proc_sem, 1);
priv->mii_if.phy_id_mask = 0x1;
priv->mii_if.reg_num_mask = 0x7;
priv->mii_if.dev = dev;
priv->mii_if.mdio_read = mdio_read;
priv->mii_if.mdio_write = mdio_write;
priv->mii_if.phy_id = priv->port.first_port + 1;
priv->msg_enable = netif_msg_init(msg_enable,
(NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK));
return 0;
}
static const struct net_device_ops netdev_ops = {
.ndo_init = netdev_init,
.ndo_open = netdev_open,
.ndo_stop = netdev_close,
.ndo_get_stats = netdev_query_statistics,
.ndo_start_xmit = netdev_tx,
.ndo_tx_timeout = netdev_tx_timeout,
.ndo_change_mtu = netdev_change_mtu,
.ndo_set_features = netdev_set_features,
.ndo_set_mac_address = netdev_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = netdev_ioctl,
.ndo_set_rx_mode = netdev_set_rx_mode,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = netdev_netpoll,
#endif
};
static void netdev_free(struct net_device *dev)
{
if (dev->watchdog_timeo)
unregister_netdev(dev);
free_netdev(dev);
}
struct platform_info {
struct dev_info dev_info;
struct net_device *netdev[SWITCH_PORT_NUM];
};
static int net_device_present;
static void get_mac_addr(struct dev_info *hw_priv, u8 *macaddr, int port)
{
int i;
int j;
int got_num;
int num;
i = j = num = got_num = 0;
while (j < MAC_ADDR_LEN) {
if (macaddr[i]) {
int digit;
got_num = 1;
digit = hex_to_bin(macaddr[i]);
if (digit >= 0)
num = num * 16 + digit;
else if (':' == macaddr[i])
got_num = 2;
else
break;
} else if (got_num)
got_num = 2;
else
break;
if (2 == got_num) {
if (MAIN_PORT == port) {
hw_priv->hw.override_addr[j++] = (u8) num;
hw_priv->hw.override_addr[5] +=
hw_priv->hw.id;
} else {
hw_priv->hw.ksz_switch->other_addr[j++] =
(u8) num;
hw_priv->hw.ksz_switch->other_addr[5] +=
hw_priv->hw.id;
}
num = got_num = 0;
}
i++;
}
if (MAC_ADDR_LEN == j) {
if (MAIN_PORT == port)
hw_priv->hw.mac_override = 1;
}
}
#define KS884X_DMA_MASK (~0x0UL)
static void read_other_addr(struct ksz_hw *hw)
{
int i;
u16 data[3];
struct ksz_switch *sw = hw->ksz_switch;
for (i = 0; i < 3; i++)
data[i] = eeprom_read(hw, i + EEPROM_DATA_OTHER_MAC_ADDR);
if ((data[0] || data[1] || data[2]) && data[0] != 0xffff) {
sw->other_addr[5] = (u8) data[0];
sw->other_addr[4] = (u8)(data[0] >> 8);
sw->other_addr[3] = (u8) data[1];
sw->other_addr[2] = (u8)(data[1] >> 8);
sw->other_addr[1] = (u8) data[2];
sw->other_addr[0] = (u8)(data[2] >> 8);
}
}
#ifndef PCI_VENDOR_ID_MICREL_KS
#define PCI_VENDOR_ID_MICREL_KS 0x16c6
#endif
static int __devinit pcidev_init(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct net_device *dev;
struct dev_priv *priv;
struct dev_info *hw_priv;
struct ksz_hw *hw;
struct platform_info *info;
struct ksz_port *port;
unsigned long reg_base;
unsigned long reg_len;
int cnt;
int i;
int mib_port_count;
int pi;
int port_count;
int result;
char banner[sizeof(version)];
struct ksz_switch *sw = NULL;
result = pci_enable_device(pdev);
if (result)
return result;
result = -ENODEV;
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) ||
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
return result;
reg_base = pci_resource_start(pdev, 0);
reg_len = pci_resource_len(pdev, 0);
if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0)
return result;
if (!request_mem_region(reg_base, reg_len, DRV_NAME))
return result;
pci_set_master(pdev);
result = -ENOMEM;
info = kzalloc(sizeof(struct platform_info), GFP_KERNEL);
if (!info)
goto pcidev_init_dev_err;
hw_priv = &info->dev_info;
hw_priv->pdev = pdev;
hw = &hw_priv->hw;
hw->io = ioremap(reg_base, reg_len);
if (!hw->io)
goto pcidev_init_io_err;
cnt = hw_init(hw);
if (!cnt) {
if (msg_enable & NETIF_MSG_PROBE)
pr_alert("chip not detected\n");
result = -ENODEV;
goto pcidev_init_alloc_err;
}
snprintf(banner, sizeof(banner), "%s", version);
banner[13] = cnt + '0'; /* Replace x in "Micrel KSZ884x" */
dev_info(&hw_priv->pdev->dev, "%s\n", banner);
dev_dbg(&hw_priv->pdev->dev, "Mem = %p; IRQ = %d\n", hw->io, pdev->irq);
/* Assume device is KSZ8841. */
hw->dev_count = 1;
port_count = 1;
mib_port_count = 1;
hw->addr_list_size = 0;
hw->mib_cnt = PORT_COUNTER_NUM;
hw->mib_port_cnt = 1;
/* KSZ8842 has a switch with multiple ports. */
if (2 == cnt) {
if (fast_aging)
hw->overrides |= FAST_AGING;
hw->mib_cnt = TOTAL_PORT_COUNTER_NUM;
/* Multiple network device interfaces are required. */
if (multi_dev) {
hw->dev_count = SWITCH_PORT_NUM;
hw->addr_list_size = SWITCH_PORT_NUM - 1;
}
/* Single network device has multiple ports. */
if (1 == hw->dev_count) {
port_count = SWITCH_PORT_NUM;
mib_port_count = SWITCH_PORT_NUM;
}
hw->mib_port_cnt = TOTAL_PORT_NUM;
hw->ksz_switch = kzalloc(sizeof(struct ksz_switch), GFP_KERNEL);
if (!hw->ksz_switch)
goto pcidev_init_alloc_err;
sw = hw->ksz_switch;
}
for (i = 0; i < hw->mib_port_cnt; i++)
hw->port_mib[i].mib_start = 0;
hw->parent = hw_priv;
/* Default MTU is 1500. */
hw_priv->mtu = (REGULAR_RX_BUF_SIZE + 3) & ~3;
if (ksz_alloc_mem(hw_priv))
goto pcidev_init_mem_err;
hw_priv->hw.id = net_device_present;
spin_lock_init(&hw_priv->hwlock);
mutex_init(&hw_priv->lock);
/* tasklet is enabled. */
tasklet_init(&hw_priv->rx_tasklet, rx_proc_task,
(unsigned long) hw_priv);
tasklet_init(&hw_priv->tx_tasklet, tx_proc_task,
(unsigned long) hw_priv);
/* tasklet_enable will decrement the atomic counter. */
tasklet_disable(&hw_priv->rx_tasklet);
tasklet_disable(&hw_priv->tx_tasklet);
for (i = 0; i < TOTAL_PORT_NUM; i++)
init_waitqueue_head(&hw_priv->counter[i].counter);
if (macaddr[0] != ':')
get_mac_addr(hw_priv, macaddr, MAIN_PORT);
/* Read MAC address and initialize override address if not overrided. */
hw_read_addr(hw);
/* Multiple device interfaces mode requires a second MAC address. */
if (hw->dev_count > 1) {
memcpy(sw->other_addr, hw->override_addr, MAC_ADDR_LEN);
read_other_addr(hw);
if (mac1addr[0] != ':')
get_mac_addr(hw_priv, mac1addr, OTHER_PORT);
}
hw_setup(hw);
if (hw->ksz_switch)
sw_setup(hw);
else {
hw_priv->wol_support = WOL_SUPPORT;
hw_priv->wol_enable = 0;
}
INIT_WORK(&hw_priv->mib_read, mib_read_work);
/* 500 ms timeout */
ksz_init_timer(&hw_priv->mib_timer_info, 500 * HZ / 1000,
mib_monitor, hw_priv);
for (i = 0; i < hw->dev_count; i++) {
dev = alloc_etherdev(sizeof(struct dev_priv));
if (!dev)
goto pcidev_init_reg_err;
info->netdev[i] = dev;
priv = netdev_priv(dev);
priv->adapter = hw_priv;
priv->id = net_device_present++;
port = &priv->port;
port->port_cnt = port_count;
port->mib_port_cnt = mib_port_count;
port->first_port = i;
port->flow_ctrl = PHY_FLOW_CTRL;
port->hw = hw;
port->linked = &hw->port_info[port->first_port];
for (cnt = 0, pi = i; cnt < port_count; cnt++, pi++) {
hw->port_info[pi].port_id = pi;
hw->port_info[pi].pdev = dev;
hw->port_info[pi].state = media_disconnected;
}
dev->mem_start = (unsigned long) hw->io;
dev->mem_end = dev->mem_start + reg_len - 1;
dev->irq = pdev->irq;
if (MAIN_PORT == i)
memcpy(dev->dev_addr, hw_priv->hw.override_addr,
MAC_ADDR_LEN);
else {
memcpy(dev->dev_addr, sw->other_addr,
MAC_ADDR_LEN);
if (!memcmp(sw->other_addr, hw->override_addr,
MAC_ADDR_LEN))
dev->dev_addr[5] += port->first_port;
}
dev->netdev_ops = &netdev_ops;
SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
if (register_netdev(dev))
goto pcidev_init_reg_err;
port_set_power_saving(port, true);
}
pci_dev_get(hw_priv->pdev);
pci_set_drvdata(pdev, info);
return 0;
pcidev_init_reg_err:
for (i = 0; i < hw->dev_count; i++) {
if (info->netdev[i]) {
netdev_free(info->netdev[i]);
info->netdev[i] = NULL;
}
}
pcidev_init_mem_err:
ksz_free_mem(hw_priv);
kfree(hw->ksz_switch);
pcidev_init_alloc_err:
iounmap(hw->io);
pcidev_init_io_err:
kfree(info);
pcidev_init_dev_err:
release_mem_region(reg_base, reg_len);
return result;
}
static void pcidev_exit(struct pci_dev *pdev)
{
int i;
struct platform_info *info = pci_get_drvdata(pdev);
struct dev_info *hw_priv = &info->dev_info;
pci_set_drvdata(pdev, NULL);
release_mem_region(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
for (i = 0; i < hw_priv->hw.dev_count; i++) {
if (info->netdev[i])
netdev_free(info->netdev[i]);
}
if (hw_priv->hw.io)
iounmap(hw_priv->hw.io);
ksz_free_mem(hw_priv);
kfree(hw_priv->hw.ksz_switch);
pci_dev_put(hw_priv->pdev);
kfree(info);
}
#ifdef CONFIG_PM
static int pcidev_resume(struct pci_dev *pdev)
{
int i;
struct platform_info *info = pci_get_drvdata(pdev);
struct dev_info *hw_priv = &info->dev_info;
struct ksz_hw *hw = &hw_priv->hw;
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
pci_enable_wake(pdev, PCI_D0, 0);
if (hw_priv->wol_enable)
hw_cfg_wol_pme(hw, 0);
for (i = 0; i < hw->dev_count; i++) {
if (info->netdev[i]) {
struct net_device *dev = info->netdev[i];
if (netif_running(dev)) {
netdev_open(dev);
netif_device_attach(dev);
}
}
}
return 0;
}
static int pcidev_suspend(struct pci_dev *pdev, pm_message_t state)
{
int i;
struct platform_info *info = pci_get_drvdata(pdev);
struct dev_info *hw_priv = &info->dev_info;
struct ksz_hw *hw = &hw_priv->hw;
/* Need to find a way to retrieve the device IP address. */
static const u8 net_addr[] = { 192, 168, 1, 1 };
for (i = 0; i < hw->dev_count; i++) {
if (info->netdev[i]) {
struct net_device *dev = info->netdev[i];
if (netif_running(dev)) {
netif_device_detach(dev);
netdev_close(dev);
}
}
}
if (hw_priv->wol_enable) {
hw_enable_wol(hw, hw_priv->wol_enable, net_addr);
hw_cfg_wol_pme(hw, 1);
}
pci_save_state(pdev);
pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
#endif
static char pcidev_name[] = "ksz884xp";
static struct pci_device_id pcidev_table[] = {
{ PCI_VENDOR_ID_MICREL_KS, 0x8841,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_MICREL_KS, 0x8842,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, pcidev_table);
static struct pci_driver pci_device_driver = {
#ifdef CONFIG_PM
.suspend = pcidev_suspend,
.resume = pcidev_resume,
#endif
.name = pcidev_name,
.id_table = pcidev_table,
.probe = pcidev_init,
.remove = pcidev_exit
};
static int __init ksz884x_init_module(void)
{
return pci_register_driver(&pci_device_driver);
}
static void __exit ksz884x_cleanup_module(void)
{
pci_unregister_driver(&pci_device_driver);
}
module_init(ksz884x_init_module);
module_exit(ksz884x_cleanup_module);
MODULE_DESCRIPTION("KSZ8841/2 PCI network driver");
MODULE_AUTHOR("Tristram Ha <Tristram.Ha@micrel.com>");
MODULE_LICENSE("GPL");
module_param_named(message, msg_enable, int, 0);
MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
module_param(macaddr, charp, 0);
module_param(mac1addr, charp, 0);
module_param(fast_aging, int, 0);
module_param(multi_dev, int, 0);
module_param(stp, int, 0);
MODULE_PARM_DESC(macaddr, "MAC address");
MODULE_PARM_DESC(mac1addr, "Second MAC address");
MODULE_PARM_DESC(fast_aging, "Fast aging");
MODULE_PARM_DESC(multi_dev, "Multiple device interfaces");
MODULE_PARM_DESC(stp, "STP support");
| gpl-2.0 |
m-stein/linux | drivers/mfd/arizona-spi.c | 386 | 2466 | /*
* arizona-spi.c -- Arizona SPI bus interface
*
* Copyright 2012 Wolfson Microelectronics plc
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/err.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/of.h>
#include <linux/mfd/arizona/core.h>
#include "arizona.h"
static int arizona_spi_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
struct arizona *arizona;
const struct regmap_config *regmap_config;
unsigned long type;
int ret;
if (spi->dev.of_node)
type = arizona_of_get_type(&spi->dev);
else
type = id->driver_data;
switch (type) {
#ifdef CONFIG_MFD_WM5102
case WM5102:
regmap_config = &wm5102_spi_regmap;
break;
#endif
#ifdef CONFIG_MFD_WM5110
case WM5110:
regmap_config = &wm5110_spi_regmap;
break;
#endif
default:
dev_err(&spi->dev, "Unknown device type %ld\n",
id->driver_data);
return -EINVAL;
}
arizona = devm_kzalloc(&spi->dev, sizeof(*arizona), GFP_KERNEL);
if (arizona == NULL)
return -ENOMEM;
arizona->regmap = devm_regmap_init_spi(spi, regmap_config);
if (IS_ERR(arizona->regmap)) {
ret = PTR_ERR(arizona->regmap);
dev_err(&spi->dev, "Failed to allocate register map: %d\n",
ret);
return ret;
}
arizona->type = id->driver_data;
arizona->dev = &spi->dev;
arizona->irq = spi->irq;
return arizona_dev_init(arizona);
}
static int arizona_spi_remove(struct spi_device *spi)
{
struct arizona *arizona = spi_get_drvdata(spi);
arizona_dev_exit(arizona);
return 0;
}
static const struct spi_device_id arizona_spi_ids[] = {
{ "wm5102", WM5102 },
{ "wm5110", WM5110 },
{ },
};
MODULE_DEVICE_TABLE(spi, arizona_spi_ids);
static struct spi_driver arizona_spi_driver = {
.driver = {
.name = "arizona",
.owner = THIS_MODULE,
.pm = &arizona_pm_ops,
.of_match_table = of_match_ptr(arizona_of_match),
},
.probe = arizona_spi_probe,
.remove = arizona_spi_remove,
.id_table = arizona_spi_ids,
};
module_spi_driver(arizona_spi_driver);
MODULE_DESCRIPTION("Arizona SPI bus interface");
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
hallor/linux | drivers/mmc/host/mvsdio.c | 898 | 24956 | /*
* Marvell MMC/SD/SDIO driver
*
* Authors: Maen Suleiman, Nicolas Pitre
* Copyright (C) 2008-2009 Marvell Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/mbus.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
#include <linux/irq.h>
#include <linux/clk.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
#include <linux/of_irq.h>
#include <linux/mmc/host.h>
#include <linux/mmc/slot-gpio.h>
#include <asm/sizes.h>
#include <asm/unaligned.h>
#include <linux/platform_data/mmc-mvsdio.h>
#include "mvsdio.h"
#define DRIVER_NAME "mvsdio"
static int maxfreq;
static int nodma;
struct mvsd_host {
void __iomem *base;
struct mmc_request *mrq;
spinlock_t lock;
unsigned int xfer_mode;
unsigned int intr_en;
unsigned int ctrl;
unsigned int pio_size;
void *pio_ptr;
unsigned int sg_frags;
unsigned int ns_per_clk;
unsigned int clock;
unsigned int base_clock;
struct timer_list timer;
struct mmc_host *mmc;
struct device *dev;
struct clk *clk;
};
#define mvsd_write(offs, val) writel(val, iobase + (offs))
#define mvsd_read(offs) readl(iobase + (offs))
static int mvsd_setup_data(struct mvsd_host *host, struct mmc_data *data)
{
void __iomem *iobase = host->base;
unsigned int tmout;
int tmout_index;
/*
* Hardware weirdness. The FIFO_EMPTY bit of the HW_STATE
* register is sometimes not set before a while when some
* "unusual" data block sizes are used (such as with the SWITCH
* command), even despite the fact that the XFER_DONE interrupt
* was raised. And if another data transfer starts before
* this bit comes to good sense (which eventually happens by
* itself) then the new transfer simply fails with a timeout.
*/
if (!(mvsd_read(MVSD_HW_STATE) & (1 << 13))) {
unsigned long t = jiffies + HZ;
unsigned int hw_state, count = 0;
do {
hw_state = mvsd_read(MVSD_HW_STATE);
if (time_after(jiffies, t)) {
dev_warn(host->dev, "FIFO_EMPTY bit missing\n");
break;
}
count++;
} while (!(hw_state & (1 << 13)));
dev_dbg(host->dev, "*** wait for FIFO_EMPTY bit "
"(hw=0x%04x, count=%d, jiffies=%ld)\n",
hw_state, count, jiffies - (t - HZ));
}
/* If timeout=0 then maximum timeout index is used. */
tmout = DIV_ROUND_UP(data->timeout_ns, host->ns_per_clk);
tmout += data->timeout_clks;
tmout_index = fls(tmout - 1) - 12;
if (tmout_index < 0)
tmout_index = 0;
if (tmout_index > MVSD_HOST_CTRL_TMOUT_MAX)
tmout_index = MVSD_HOST_CTRL_TMOUT_MAX;
dev_dbg(host->dev, "data %s at 0x%08x: blocks=%d blksz=%d tmout=%u (%d)\n",
(data->flags & MMC_DATA_READ) ? "read" : "write",
(u32)sg_virt(data->sg), data->blocks, data->blksz,
tmout, tmout_index);
host->ctrl &= ~MVSD_HOST_CTRL_TMOUT_MASK;
host->ctrl |= MVSD_HOST_CTRL_TMOUT(tmout_index);
mvsd_write(MVSD_HOST_CTRL, host->ctrl);
mvsd_write(MVSD_BLK_COUNT, data->blocks);
mvsd_write(MVSD_BLK_SIZE, data->blksz);
if (nodma || (data->blksz | data->sg->offset) & 3 ||
((!(data->flags & MMC_DATA_READ) && data->sg->offset & 0x3f))) {
/*
* We cannot do DMA on a buffer which offset or size
* is not aligned on a 4-byte boundary.
*
* It also appears the host to card DMA can corrupt
* data when the buffer is not aligned on a 64 byte
* boundary.
*/
host->pio_size = data->blocks * data->blksz;
host->pio_ptr = sg_virt(data->sg);
if (!nodma)
dev_dbg(host->dev, "fallback to PIO for data at 0x%p size %d\n",
host->pio_ptr, host->pio_size);
return 1;
} else {
dma_addr_t phys_addr;
int dma_dir = (data->flags & MMC_DATA_READ) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE;
host->sg_frags = dma_map_sg(mmc_dev(host->mmc), data->sg,
data->sg_len, dma_dir);
phys_addr = sg_dma_address(data->sg);
mvsd_write(MVSD_SYS_ADDR_LOW, (u32)phys_addr & 0xffff);
mvsd_write(MVSD_SYS_ADDR_HI, (u32)phys_addr >> 16);
return 0;
}
}
static void mvsd_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct mvsd_host *host = mmc_priv(mmc);
void __iomem *iobase = host->base;
struct mmc_command *cmd = mrq->cmd;
u32 cmdreg = 0, xfer = 0, intr = 0;
unsigned long flags;
BUG_ON(host->mrq != NULL);
host->mrq = mrq;
dev_dbg(host->dev, "cmd %d (hw state 0x%04x)\n",
cmd->opcode, mvsd_read(MVSD_HW_STATE));
cmdreg = MVSD_CMD_INDEX(cmd->opcode);
if (cmd->flags & MMC_RSP_BUSY)
cmdreg |= MVSD_CMD_RSP_48BUSY;
else if (cmd->flags & MMC_RSP_136)
cmdreg |= MVSD_CMD_RSP_136;
else if (cmd->flags & MMC_RSP_PRESENT)
cmdreg |= MVSD_CMD_RSP_48;
else
cmdreg |= MVSD_CMD_RSP_NONE;
if (cmd->flags & MMC_RSP_CRC)
cmdreg |= MVSD_CMD_CHECK_CMDCRC;
if (cmd->flags & MMC_RSP_OPCODE)
cmdreg |= MVSD_CMD_INDX_CHECK;
if (cmd->flags & MMC_RSP_PRESENT) {
cmdreg |= MVSD_UNEXPECTED_RESP;
intr |= MVSD_NOR_UNEXP_RSP;
}
if (mrq->data) {
struct mmc_data *data = mrq->data;
int pio;
cmdreg |= MVSD_CMD_DATA_PRESENT | MVSD_CMD_CHECK_DATACRC16;
xfer |= MVSD_XFER_MODE_HW_WR_DATA_EN;
if (data->flags & MMC_DATA_READ)
xfer |= MVSD_XFER_MODE_TO_HOST;
pio = mvsd_setup_data(host, data);
if (pio) {
xfer |= MVSD_XFER_MODE_PIO;
/* PIO section of mvsd_irq has comments on those bits */
if (data->flags & MMC_DATA_WRITE)
intr |= MVSD_NOR_TX_AVAIL;
else if (host->pio_size > 32)
intr |= MVSD_NOR_RX_FIFO_8W;
else
intr |= MVSD_NOR_RX_READY;
}
if (data->stop) {
struct mmc_command *stop = data->stop;
u32 cmd12reg = 0;
mvsd_write(MVSD_AUTOCMD12_ARG_LOW, stop->arg & 0xffff);
mvsd_write(MVSD_AUTOCMD12_ARG_HI, stop->arg >> 16);
if (stop->flags & MMC_RSP_BUSY)
cmd12reg |= MVSD_AUTOCMD12_BUSY;
if (stop->flags & MMC_RSP_OPCODE)
cmd12reg |= MVSD_AUTOCMD12_INDX_CHECK;
cmd12reg |= MVSD_AUTOCMD12_INDEX(stop->opcode);
mvsd_write(MVSD_AUTOCMD12_CMD, cmd12reg);
xfer |= MVSD_XFER_MODE_AUTO_CMD12;
intr |= MVSD_NOR_AUTOCMD12_DONE;
} else {
intr |= MVSD_NOR_XFER_DONE;
}
} else {
intr |= MVSD_NOR_CMD_DONE;
}
mvsd_write(MVSD_ARG_LOW, cmd->arg & 0xffff);
mvsd_write(MVSD_ARG_HI, cmd->arg >> 16);
spin_lock_irqsave(&host->lock, flags);
host->xfer_mode &= MVSD_XFER_MODE_INT_CHK_EN;
host->xfer_mode |= xfer;
mvsd_write(MVSD_XFER_MODE, host->xfer_mode);
mvsd_write(MVSD_NOR_INTR_STATUS, ~MVSD_NOR_CARD_INT);
mvsd_write(MVSD_ERR_INTR_STATUS, 0xffff);
mvsd_write(MVSD_CMD, cmdreg);
host->intr_en &= MVSD_NOR_CARD_INT;
host->intr_en |= intr | MVSD_NOR_ERROR;
mvsd_write(MVSD_NOR_INTR_EN, host->intr_en);
mvsd_write(MVSD_ERR_INTR_EN, 0xffff);
mod_timer(&host->timer, jiffies + 5 * HZ);
spin_unlock_irqrestore(&host->lock, flags);
}
static u32 mvsd_finish_cmd(struct mvsd_host *host, struct mmc_command *cmd,
u32 err_status)
{
void __iomem *iobase = host->base;
if (cmd->flags & MMC_RSP_136) {
unsigned int response[8], i;
for (i = 0; i < 8; i++)
response[i] = mvsd_read(MVSD_RSP(i));
cmd->resp[0] = ((response[0] & 0x03ff) << 22) |
((response[1] & 0xffff) << 6) |
((response[2] & 0xfc00) >> 10);
cmd->resp[1] = ((response[2] & 0x03ff) << 22) |
((response[3] & 0xffff) << 6) |
((response[4] & 0xfc00) >> 10);
cmd->resp[2] = ((response[4] & 0x03ff) << 22) |
((response[5] & 0xffff) << 6) |
((response[6] & 0xfc00) >> 10);
cmd->resp[3] = ((response[6] & 0x03ff) << 22) |
((response[7] & 0x3fff) << 8);
} else if (cmd->flags & MMC_RSP_PRESENT) {
unsigned int response[3], i;
for (i = 0; i < 3; i++)
response[i] = mvsd_read(MVSD_RSP(i));
cmd->resp[0] = ((response[2] & 0x003f) << (8 - 8)) |
((response[1] & 0xffff) << (14 - 8)) |
((response[0] & 0x03ff) << (30 - 8));
cmd->resp[1] = ((response[0] & 0xfc00) >> 10);
cmd->resp[2] = 0;
cmd->resp[3] = 0;
}
if (err_status & MVSD_ERR_CMD_TIMEOUT) {
cmd->error = -ETIMEDOUT;
} else if (err_status & (MVSD_ERR_CMD_CRC | MVSD_ERR_CMD_ENDBIT |
MVSD_ERR_CMD_INDEX | MVSD_ERR_CMD_STARTBIT)) {
cmd->error = -EILSEQ;
}
err_status &= ~(MVSD_ERR_CMD_TIMEOUT | MVSD_ERR_CMD_CRC |
MVSD_ERR_CMD_ENDBIT | MVSD_ERR_CMD_INDEX |
MVSD_ERR_CMD_STARTBIT);
return err_status;
}
static u32 mvsd_finish_data(struct mvsd_host *host, struct mmc_data *data,
u32 err_status)
{
void __iomem *iobase = host->base;
if (host->pio_ptr) {
host->pio_ptr = NULL;
host->pio_size = 0;
} else {
dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_frags,
(data->flags & MMC_DATA_READ) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE);
}
if (err_status & MVSD_ERR_DATA_TIMEOUT)
data->error = -ETIMEDOUT;
else if (err_status & (MVSD_ERR_DATA_CRC | MVSD_ERR_DATA_ENDBIT))
data->error = -EILSEQ;
else if (err_status & MVSD_ERR_XFER_SIZE)
data->error = -EBADE;
err_status &= ~(MVSD_ERR_DATA_TIMEOUT | MVSD_ERR_DATA_CRC |
MVSD_ERR_DATA_ENDBIT | MVSD_ERR_XFER_SIZE);
dev_dbg(host->dev, "data done: blocks_left=%d, bytes_left=%d\n",
mvsd_read(MVSD_CURR_BLK_LEFT), mvsd_read(MVSD_CURR_BYTE_LEFT));
data->bytes_xfered =
(data->blocks - mvsd_read(MVSD_CURR_BLK_LEFT)) * data->blksz;
/* We can't be sure about the last block when errors are detected */
if (data->bytes_xfered && data->error)
data->bytes_xfered -= data->blksz;
/* Handle Auto cmd 12 response */
if (data->stop) {
unsigned int response[3], i;
for (i = 0; i < 3; i++)
response[i] = mvsd_read(MVSD_AUTO_RSP(i));
data->stop->resp[0] = ((response[2] & 0x003f) << (8 - 8)) |
((response[1] & 0xffff) << (14 - 8)) |
((response[0] & 0x03ff) << (30 - 8));
data->stop->resp[1] = ((response[0] & 0xfc00) >> 10);
data->stop->resp[2] = 0;
data->stop->resp[3] = 0;
if (err_status & MVSD_ERR_AUTOCMD12) {
u32 err_cmd12 = mvsd_read(MVSD_AUTOCMD12_ERR_STATUS);
dev_dbg(host->dev, "c12err 0x%04x\n", err_cmd12);
if (err_cmd12 & MVSD_AUTOCMD12_ERR_NOTEXE)
data->stop->error = -ENOEXEC;
else if (err_cmd12 & MVSD_AUTOCMD12_ERR_TIMEOUT)
data->stop->error = -ETIMEDOUT;
else if (err_cmd12)
data->stop->error = -EILSEQ;
err_status &= ~MVSD_ERR_AUTOCMD12;
}
}
return err_status;
}
static irqreturn_t mvsd_irq(int irq, void *dev)
{
struct mvsd_host *host = dev;
void __iomem *iobase = host->base;
u32 intr_status, intr_done_mask;
int irq_handled = 0;
intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
dev_dbg(host->dev, "intr 0x%04x intr_en 0x%04x hw_state 0x%04x\n",
intr_status, mvsd_read(MVSD_NOR_INTR_EN),
mvsd_read(MVSD_HW_STATE));
/*
* It looks like, SDIO IP can issue one late, spurious irq
* although all irqs should be disabled. To work around this,
* bail out early, if we didn't expect any irqs to occur.
*/
if (!mvsd_read(MVSD_NOR_INTR_EN) && !mvsd_read(MVSD_ERR_INTR_EN)) {
dev_dbg(host->dev, "spurious irq detected intr 0x%04x intr_en 0x%04x erri 0x%04x erri_en 0x%04x\n",
mvsd_read(MVSD_NOR_INTR_STATUS),
mvsd_read(MVSD_NOR_INTR_EN),
mvsd_read(MVSD_ERR_INTR_STATUS),
mvsd_read(MVSD_ERR_INTR_EN));
return IRQ_HANDLED;
}
spin_lock(&host->lock);
/* PIO handling, if needed. Messy business... */
if (host->pio_size &&
(intr_status & host->intr_en &
(MVSD_NOR_RX_READY | MVSD_NOR_RX_FIFO_8W))) {
u16 *p = host->pio_ptr;
int s = host->pio_size;
while (s >= 32 && (intr_status & MVSD_NOR_RX_FIFO_8W)) {
readsw(iobase + MVSD_FIFO, p, 16);
p += 16;
s -= 32;
intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
}
/*
* Normally we'd use < 32 here, but the RX_FIFO_8W bit
* doesn't appear to assert when there is exactly 32 bytes
* (8 words) left to fetch in a transfer.
*/
if (s <= 32) {
while (s >= 4 && (intr_status & MVSD_NOR_RX_READY)) {
put_unaligned(mvsd_read(MVSD_FIFO), p++);
put_unaligned(mvsd_read(MVSD_FIFO), p++);
s -= 4;
intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
}
if (s && s < 4 && (intr_status & MVSD_NOR_RX_READY)) {
u16 val[2] = {0, 0};
val[0] = mvsd_read(MVSD_FIFO);
val[1] = mvsd_read(MVSD_FIFO);
memcpy(p, ((void *)&val) + 4 - s, s);
s = 0;
intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
}
if (s == 0) {
host->intr_en &=
~(MVSD_NOR_RX_READY | MVSD_NOR_RX_FIFO_8W);
mvsd_write(MVSD_NOR_INTR_EN, host->intr_en);
} else if (host->intr_en & MVSD_NOR_RX_FIFO_8W) {
host->intr_en &= ~MVSD_NOR_RX_FIFO_8W;
host->intr_en |= MVSD_NOR_RX_READY;
mvsd_write(MVSD_NOR_INTR_EN, host->intr_en);
}
}
dev_dbg(host->dev, "pio %d intr 0x%04x hw_state 0x%04x\n",
s, intr_status, mvsd_read(MVSD_HW_STATE));
host->pio_ptr = p;
host->pio_size = s;
irq_handled = 1;
} else if (host->pio_size &&
(intr_status & host->intr_en &
(MVSD_NOR_TX_AVAIL | MVSD_NOR_TX_FIFO_8W))) {
u16 *p = host->pio_ptr;
int s = host->pio_size;
/*
* The TX_FIFO_8W bit is unreliable. When set, bursting
* 16 halfwords all at once in the FIFO drops data. Actually
* TX_AVAIL does go off after only one word is pushed even if
* TX_FIFO_8W remains set.
*/
while (s >= 4 && (intr_status & MVSD_NOR_TX_AVAIL)) {
mvsd_write(MVSD_FIFO, get_unaligned(p++));
mvsd_write(MVSD_FIFO, get_unaligned(p++));
s -= 4;
intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
}
if (s < 4) {
if (s && (intr_status & MVSD_NOR_TX_AVAIL)) {
u16 val[2] = {0, 0};
memcpy(((void *)&val) + 4 - s, p, s);
mvsd_write(MVSD_FIFO, val[0]);
mvsd_write(MVSD_FIFO, val[1]);
s = 0;
intr_status = mvsd_read(MVSD_NOR_INTR_STATUS);
}
if (s == 0) {
host->intr_en &=
~(MVSD_NOR_TX_AVAIL | MVSD_NOR_TX_FIFO_8W);
mvsd_write(MVSD_NOR_INTR_EN, host->intr_en);
}
}
dev_dbg(host->dev, "pio %d intr 0x%04x hw_state 0x%04x\n",
s, intr_status, mvsd_read(MVSD_HW_STATE));
host->pio_ptr = p;
host->pio_size = s;
irq_handled = 1;
}
mvsd_write(MVSD_NOR_INTR_STATUS, intr_status);
intr_done_mask = MVSD_NOR_CARD_INT | MVSD_NOR_RX_READY |
MVSD_NOR_RX_FIFO_8W | MVSD_NOR_TX_FIFO_8W;
if (intr_status & host->intr_en & ~intr_done_mask) {
struct mmc_request *mrq = host->mrq;
struct mmc_command *cmd = mrq->cmd;
u32 err_status = 0;
del_timer(&host->timer);
host->mrq = NULL;
host->intr_en &= MVSD_NOR_CARD_INT;
mvsd_write(MVSD_NOR_INTR_EN, host->intr_en);
mvsd_write(MVSD_ERR_INTR_EN, 0);
spin_unlock(&host->lock);
if (intr_status & MVSD_NOR_UNEXP_RSP) {
cmd->error = -EPROTO;
} else if (intr_status & MVSD_NOR_ERROR) {
err_status = mvsd_read(MVSD_ERR_INTR_STATUS);
dev_dbg(host->dev, "err 0x%04x\n", err_status);
}
err_status = mvsd_finish_cmd(host, cmd, err_status);
if (mrq->data)
err_status = mvsd_finish_data(host, mrq->data, err_status);
if (err_status) {
dev_err(host->dev, "unhandled error status %#04x\n",
err_status);
cmd->error = -ENOMSG;
}
mmc_request_done(host->mmc, mrq);
irq_handled = 1;
} else
spin_unlock(&host->lock);
if (intr_status & MVSD_NOR_CARD_INT) {
mmc_signal_sdio_irq(host->mmc);
irq_handled = 1;
}
if (irq_handled)
return IRQ_HANDLED;
dev_err(host->dev, "unhandled interrupt status=0x%04x en=0x%04x pio=%d\n",
intr_status, host->intr_en, host->pio_size);
return IRQ_NONE;
}
static void mvsd_timeout_timer(unsigned long data)
{
struct mvsd_host *host = (struct mvsd_host *)data;
void __iomem *iobase = host->base;
struct mmc_request *mrq;
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
mrq = host->mrq;
if (mrq) {
dev_err(host->dev, "Timeout waiting for hardware interrupt.\n");
dev_err(host->dev, "hw_state=0x%04x, intr_status=0x%04x intr_en=0x%04x\n",
mvsd_read(MVSD_HW_STATE),
mvsd_read(MVSD_NOR_INTR_STATUS),
mvsd_read(MVSD_NOR_INTR_EN));
host->mrq = NULL;
mvsd_write(MVSD_SW_RESET, MVSD_SW_RESET_NOW);
host->xfer_mode &= MVSD_XFER_MODE_INT_CHK_EN;
mvsd_write(MVSD_XFER_MODE, host->xfer_mode);
host->intr_en &= MVSD_NOR_CARD_INT;
mvsd_write(MVSD_NOR_INTR_EN, host->intr_en);
mvsd_write(MVSD_ERR_INTR_EN, 0);
mvsd_write(MVSD_ERR_INTR_STATUS, 0xffff);
mrq->cmd->error = -ETIMEDOUT;
mvsd_finish_cmd(host, mrq->cmd, 0);
if (mrq->data) {
mrq->data->error = -ETIMEDOUT;
mvsd_finish_data(host, mrq->data, 0);
}
}
spin_unlock_irqrestore(&host->lock, flags);
if (mrq)
mmc_request_done(host->mmc, mrq);
}
static void mvsd_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
struct mvsd_host *host = mmc_priv(mmc);
void __iomem *iobase = host->base;
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
if (enable) {
host->xfer_mode |= MVSD_XFER_MODE_INT_CHK_EN;
host->intr_en |= MVSD_NOR_CARD_INT;
} else {
host->xfer_mode &= ~MVSD_XFER_MODE_INT_CHK_EN;
host->intr_en &= ~MVSD_NOR_CARD_INT;
}
mvsd_write(MVSD_XFER_MODE, host->xfer_mode);
mvsd_write(MVSD_NOR_INTR_EN, host->intr_en);
spin_unlock_irqrestore(&host->lock, flags);
}
static void mvsd_power_up(struct mvsd_host *host)
{
void __iomem *iobase = host->base;
dev_dbg(host->dev, "power up\n");
mvsd_write(MVSD_NOR_INTR_EN, 0);
mvsd_write(MVSD_ERR_INTR_EN, 0);
mvsd_write(MVSD_SW_RESET, MVSD_SW_RESET_NOW);
mvsd_write(MVSD_XFER_MODE, 0);
mvsd_write(MVSD_NOR_STATUS_EN, 0xffff);
mvsd_write(MVSD_ERR_STATUS_EN, 0xffff);
mvsd_write(MVSD_NOR_INTR_STATUS, 0xffff);
mvsd_write(MVSD_ERR_INTR_STATUS, 0xffff);
}
static void mvsd_power_down(struct mvsd_host *host)
{
void __iomem *iobase = host->base;
dev_dbg(host->dev, "power down\n");
mvsd_write(MVSD_NOR_INTR_EN, 0);
mvsd_write(MVSD_ERR_INTR_EN, 0);
mvsd_write(MVSD_SW_RESET, MVSD_SW_RESET_NOW);
mvsd_write(MVSD_XFER_MODE, MVSD_XFER_MODE_STOP_CLK);
mvsd_write(MVSD_NOR_STATUS_EN, 0);
mvsd_write(MVSD_ERR_STATUS_EN, 0);
mvsd_write(MVSD_NOR_INTR_STATUS, 0xffff);
mvsd_write(MVSD_ERR_INTR_STATUS, 0xffff);
}
static void mvsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct mvsd_host *host = mmc_priv(mmc);
void __iomem *iobase = host->base;
u32 ctrl_reg = 0;
if (ios->power_mode == MMC_POWER_UP)
mvsd_power_up(host);
if (ios->clock == 0) {
mvsd_write(MVSD_XFER_MODE, MVSD_XFER_MODE_STOP_CLK);
mvsd_write(MVSD_CLK_DIV, MVSD_BASE_DIV_MAX);
host->clock = 0;
dev_dbg(host->dev, "clock off\n");
} else if (ios->clock != host->clock) {
u32 m = DIV_ROUND_UP(host->base_clock, ios->clock) - 1;
if (m > MVSD_BASE_DIV_MAX)
m = MVSD_BASE_DIV_MAX;
mvsd_write(MVSD_CLK_DIV, m);
host->clock = ios->clock;
host->ns_per_clk = 1000000000 / (host->base_clock / (m+1));
dev_dbg(host->dev, "clock=%d (%d), div=0x%04x\n",
ios->clock, host->base_clock / (m+1), m);
}
/* default transfer mode */
ctrl_reg |= MVSD_HOST_CTRL_BIG_ENDIAN;
ctrl_reg &= ~MVSD_HOST_CTRL_LSB_FIRST;
/* default to maximum timeout */
ctrl_reg |= MVSD_HOST_CTRL_TMOUT_MASK;
ctrl_reg |= MVSD_HOST_CTRL_TMOUT_EN;
if (ios->bus_mode == MMC_BUSMODE_PUSHPULL)
ctrl_reg |= MVSD_HOST_CTRL_PUSH_PULL_EN;
if (ios->bus_width == MMC_BUS_WIDTH_4)
ctrl_reg |= MVSD_HOST_CTRL_DATA_WIDTH_4_BITS;
/*
* The HI_SPEED_EN bit is causing trouble with many (but not all)
* high speed SD, SDHC and SDIO cards. Not enabling that bit
* makes all cards work. So let's just ignore that bit for now
* and revisit this issue if problems for not enabling this bit
* are ever reported.
*/
#if 0
if (ios->timing == MMC_TIMING_MMC_HS ||
ios->timing == MMC_TIMING_SD_HS)
ctrl_reg |= MVSD_HOST_CTRL_HI_SPEED_EN;
#endif
host->ctrl = ctrl_reg;
mvsd_write(MVSD_HOST_CTRL, ctrl_reg);
dev_dbg(host->dev, "ctrl 0x%04x: %s %s %s\n", ctrl_reg,
(ctrl_reg & MVSD_HOST_CTRL_PUSH_PULL_EN) ?
"push-pull" : "open-drain",
(ctrl_reg & MVSD_HOST_CTRL_DATA_WIDTH_4_BITS) ?
"4bit-width" : "1bit-width",
(ctrl_reg & MVSD_HOST_CTRL_HI_SPEED_EN) ?
"high-speed" : "");
if (ios->power_mode == MMC_POWER_OFF)
mvsd_power_down(host);
}
static const struct mmc_host_ops mvsd_ops = {
.request = mvsd_request,
.get_ro = mmc_gpio_get_ro,
.set_ios = mvsd_set_ios,
.enable_sdio_irq = mvsd_enable_sdio_irq,
};
static void
mv_conf_mbus_windows(struct mvsd_host *host,
const struct mbus_dram_target_info *dram)
{
void __iomem *iobase = host->base;
int i;
for (i = 0; i < 4; i++) {
writel(0, iobase + MVSD_WINDOW_CTRL(i));
writel(0, iobase + MVSD_WINDOW_BASE(i));
}
for (i = 0; i < dram->num_cs; i++) {
const struct mbus_dram_window *cs = dram->cs + i;
writel(((cs->size - 1) & 0xffff0000) |
(cs->mbus_attr << 8) |
(dram->mbus_dram_target_id << 4) | 1,
iobase + MVSD_WINDOW_CTRL(i));
writel(cs->base, iobase + MVSD_WINDOW_BASE(i));
}
}
static int mvsd_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct mmc_host *mmc = NULL;
struct mvsd_host *host = NULL;
const struct mbus_dram_target_info *dram;
struct resource *r;
int ret, irq;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
if (!r || irq < 0)
return -ENXIO;
mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev);
if (!mmc) {
ret = -ENOMEM;
goto out;
}
host = mmc_priv(mmc);
host->mmc = mmc;
host->dev = &pdev->dev;
/*
* Some non-DT platforms do not pass a clock, and the clock
* frequency is passed through platform_data. On DT platforms,
* a clock must always be passed, even if there is no gatable
* clock associated to the SDIO interface (it can simply be a
* fixed rate clock).
*/
host->clk = devm_clk_get(&pdev->dev, NULL);
if (!IS_ERR(host->clk))
clk_prepare_enable(host->clk);
mmc->ops = &mvsd_ops;
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
mmc->f_min = DIV_ROUND_UP(host->base_clock, MVSD_BASE_DIV_MAX);
mmc->f_max = MVSD_CLOCKRATE_MAX;
mmc->max_blk_size = 2048;
mmc->max_blk_count = 65535;
mmc->max_segs = 1;
mmc->max_seg_size = mmc->max_blk_size * mmc->max_blk_count;
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
if (np) {
if (IS_ERR(host->clk)) {
dev_err(&pdev->dev, "DT platforms must have a clock associated\n");
ret = -EINVAL;
goto out;
}
host->base_clock = clk_get_rate(host->clk) / 2;
ret = mmc_of_parse(mmc);
if (ret < 0)
goto out;
} else {
const struct mvsdio_platform_data *mvsd_data;
mvsd_data = pdev->dev.platform_data;
if (!mvsd_data) {
ret = -ENXIO;
goto out;
}
mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ |
MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
host->base_clock = mvsd_data->clock / 2;
/* GPIO 0 regarded as invalid for backward compatibility */
if (mvsd_data->gpio_card_detect &&
gpio_is_valid(mvsd_data->gpio_card_detect)) {
ret = mmc_gpio_request_cd(mmc,
mvsd_data->gpio_card_detect,
0);
if (ret)
goto out;
} else {
mmc->caps |= MMC_CAP_NEEDS_POLL;
}
if (mvsd_data->gpio_write_protect &&
gpio_is_valid(mvsd_data->gpio_write_protect))
mmc_gpio_request_ro(mmc, mvsd_data->gpio_write_protect);
}
if (maxfreq)
mmc->f_max = maxfreq;
spin_lock_init(&host->lock);
host->base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(host->base)) {
ret = PTR_ERR(host->base);
goto out;
}
/* (Re-)program MBUS remapping windows if we are asked to. */
dram = mv_mbus_dram_info();
if (dram)
mv_conf_mbus_windows(host, dram);
mvsd_power_down(host);
ret = devm_request_irq(&pdev->dev, irq, mvsd_irq, 0, DRIVER_NAME, host);
if (ret) {
dev_err(&pdev->dev, "cannot assign irq %d\n", irq);
goto out;
}
setup_timer(&host->timer, mvsd_timeout_timer, (unsigned long)host);
platform_set_drvdata(pdev, mmc);
ret = mmc_add_host(mmc);
if (ret)
goto out;
if (!(mmc->caps & MMC_CAP_NEEDS_POLL))
dev_dbg(&pdev->dev, "using GPIO for card detection\n");
else
dev_dbg(&pdev->dev, "lacking card detect (fall back to polling)\n");
return 0;
out:
if (mmc) {
if (!IS_ERR(host->clk))
clk_disable_unprepare(host->clk);
mmc_free_host(mmc);
}
return ret;
}
static int mvsd_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
struct mvsd_host *host = mmc_priv(mmc);
mmc_remove_host(mmc);
del_timer_sync(&host->timer);
mvsd_power_down(host);
if (!IS_ERR(host->clk))
clk_disable_unprepare(host->clk);
mmc_free_host(mmc);
return 0;
}
static const struct of_device_id mvsdio_dt_ids[] = {
{ .compatible = "marvell,orion-sdio" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mvsdio_dt_ids);
static struct platform_driver mvsd_driver = {
.probe = mvsd_probe,
.remove = mvsd_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = mvsdio_dt_ids,
},
};
module_platform_driver(mvsd_driver);
/* maximum card clock frequency (default 50MHz) */
module_param(maxfreq, int, 0);
/* force PIO transfers all the time */
module_param(nodma, int, 0);
MODULE_AUTHOR("Maen Suleiman, Nicolas Pitre");
MODULE_DESCRIPTION("Marvell MMC,SD,SDIO Host Controller driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:mvsdio");
| gpl-2.0 |
skulldreamz/bullhead_kernel | fs/squashfs/lzo_wrapper.c | 2178 | 3122 | /*
* Squashfs - a compressed read only filesystem for Linux
*
* Copyright (c) 2010 LG Electronics
* Chan Jeong <chan.jeong@lge.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2,
* or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* lzo_wrapper.c
*/
#include <linux/mutex.h>
#include <linux/buffer_head.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/lzo.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "squashfs.h"
#include "decompressor.h"
#include "page_actor.h"
struct squashfs_lzo {
void *input;
void *output;
};
static void *lzo_init(struct squashfs_sb_info *msblk, void *buff)
{
int block_size = max_t(int, msblk->block_size, SQUASHFS_METADATA_SIZE);
struct squashfs_lzo *stream = kzalloc(sizeof(*stream), GFP_KERNEL);
if (stream == NULL)
goto failed;
stream->input = vmalloc(block_size);
if (stream->input == NULL)
goto failed;
stream->output = vmalloc(block_size);
if (stream->output == NULL)
goto failed2;
return stream;
failed2:
vfree(stream->input);
failed:
ERROR("Failed to allocate lzo workspace\n");
kfree(stream);
return ERR_PTR(-ENOMEM);
}
static void lzo_free(void *strm)
{
struct squashfs_lzo *stream = strm;
if (stream) {
vfree(stream->input);
vfree(stream->output);
}
kfree(stream);
}
static int lzo_uncompress(struct squashfs_sb_info *msblk, void *strm,
struct buffer_head **bh, int b, int offset, int length,
struct squashfs_page_actor *output)
{
struct squashfs_lzo *stream = strm;
void *buff = stream->input, *data;
int avail, i, bytes = length, res;
size_t out_len = output->length;
for (i = 0; i < b; i++) {
avail = min(bytes, msblk->devblksize - offset);
memcpy(buff, bh[i]->b_data + offset, avail);
buff += avail;
bytes -= avail;
offset = 0;
put_bh(bh[i]);
}
res = lzo1x_decompress_safe(stream->input, (size_t)length,
stream->output, &out_len);
if (res != LZO_E_OK)
goto failed;
res = bytes = (int)out_len;
data = squashfs_first_page(output);
buff = stream->output;
while (data) {
if (bytes <= PAGE_CACHE_SIZE) {
memcpy(data, buff, bytes);
break;
} else {
memcpy(data, buff, PAGE_CACHE_SIZE);
buff += PAGE_CACHE_SIZE;
bytes -= PAGE_CACHE_SIZE;
data = squashfs_next_page(output);
}
}
squashfs_finish_page(output);
return res;
failed:
return -EIO;
}
const struct squashfs_decompressor squashfs_lzo_comp_ops = {
.init = lzo_init,
.free = lzo_free,
.decompress = lzo_uncompress,
.id = LZO_COMPRESSION,
.name = "lzo",
.supported = 1
};
| gpl-2.0 |
CryToCry96/android_kernel_ef52 | drivers/net/wireless/bcmdhd/wldev_common.c | 2178 | 9307 | /*
* Common function shared by Linux WEXT, cfg80211 and p2p drivers
*
* Copyright (C) 1999-2012, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
*
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* you also meet, for each linked independent module, the terms and conditions of
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
*
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* $Id: wldev_common.c,v 1.1.4.1.2.14 2011-02-09 01:40:07 $
*/
#include <osl.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/netdevice.h>
#include <wldev_common.h>
#include <bcmutils.h>
#define htod32(i) i
#define htod16(i) i
#define dtoh32(i) i
#define dtoh16(i) i
#define htodchanspec(i) i
#define dtohchanspec(i) i
#define WLDEV_ERROR(args) \
do { \
printk(KERN_ERR "WLDEV-ERROR) %s : ", __func__); \
printk args; \
} while (0)
extern int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd);
s32 wldev_ioctl(
struct net_device *dev, u32 cmd, void *arg, u32 len, u32 set)
{
s32 ret = 0;
struct wl_ioctl ioc;
memset(&ioc, 0, sizeof(ioc));
ioc.cmd = cmd;
ioc.buf = arg;
ioc.len = len;
ioc.set = set;
ret = dhd_ioctl_entry_local(dev, &ioc, cmd);
return ret;
}
/* Format a iovar buffer, not bsscfg indexed. The bsscfg index will be
* taken care of in dhd_ioctl_entry. Internal use only, not exposed to
* wl_iw, wl_cfg80211 and wl_cfgp2p
*/
static s32 wldev_mkiovar(
s8 *iovar_name, s8 *param, s32 paramlen,
s8 *iovar_buf, u32 buflen)
{
s32 iolen = 0;
iolen = bcm_mkiovar(iovar_name, param, paramlen, iovar_buf, buflen);
return iolen;
}
s32 wldev_iovar_getbuf(
struct net_device *dev, s8 *iovar_name,
void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync)
{
s32 ret = 0;
if (buf_sync) {
mutex_lock(buf_sync);
}
wldev_mkiovar(iovar_name, param, paramlen, buf, buflen);
ret = wldev_ioctl(dev, WLC_GET_VAR, buf, buflen, FALSE);
if (buf_sync)
mutex_unlock(buf_sync);
return ret;
}
s32 wldev_iovar_setbuf(
struct net_device *dev, s8 *iovar_name,
void *param, s32 paramlen, void *buf, s32 buflen, struct mutex* buf_sync)
{
s32 ret = 0;
s32 iovar_len;
if (buf_sync) {
mutex_lock(buf_sync);
}
iovar_len = wldev_mkiovar(iovar_name, param, paramlen, buf, buflen);
ret = wldev_ioctl(dev, WLC_SET_VAR, buf, iovar_len, TRUE);
if (buf_sync)
mutex_unlock(buf_sync);
return ret;
}
s32 wldev_iovar_setint(
struct net_device *dev, s8 *iovar, s32 val)
{
s8 iovar_buf[WLC_IOCTL_SMLEN];
val = htod32(val);
memset(iovar_buf, 0, sizeof(iovar_buf));
return wldev_iovar_setbuf(dev, iovar, &val, sizeof(val), iovar_buf,
sizeof(iovar_buf), NULL);
}
s32 wldev_iovar_getint(
struct net_device *dev, s8 *iovar, s32 *pval)
{
s8 iovar_buf[WLC_IOCTL_SMLEN];
s32 err;
memset(iovar_buf, 0, sizeof(iovar_buf));
err = wldev_iovar_getbuf(dev, iovar, pval, sizeof(*pval), iovar_buf,
sizeof(iovar_buf), NULL);
if (err == 0)
{
memcpy(pval, iovar_buf, sizeof(*pval));
*pval = dtoh32(*pval);
}
return err;
}
/** Format a bsscfg indexed iovar buffer. The bsscfg index will be
* taken care of in dhd_ioctl_entry. Internal use only, not exposed to
* wl_iw, wl_cfg80211 and wl_cfgp2p
*/
s32 wldev_mkiovar_bsscfg(
const s8 *iovar_name, s8 *param, s32 paramlen,
s8 *iovar_buf, s32 buflen, s32 bssidx)
{
const s8 *prefix = "bsscfg:";
s8 *p;
u32 prefixlen;
u32 namelen;
u32 iolen;
if (bssidx == 0) {
return wldev_mkiovar((s8*)iovar_name, (s8 *)param, paramlen,
(s8 *) iovar_buf, buflen);
}
prefixlen = (u32) strlen(prefix); /* lengh of bsscfg prefix */
namelen = (u32) strlen(iovar_name) + 1; /* lengh of iovar name + null */
iolen = prefixlen + namelen + sizeof(u32) + paramlen;
if (buflen < 0 || iolen > (u32)buflen)
{
WLDEV_ERROR(("%s: buffer is too short\n", __FUNCTION__));
return BCME_BUFTOOSHORT;
}
p = (s8 *)iovar_buf;
/* copy prefix, no null */
memcpy(p, prefix, prefixlen);
p += prefixlen;
/* copy iovar name including null */
memcpy(p, iovar_name, namelen);
p += namelen;
/* bss config index as first param */
bssidx = htod32(bssidx);
memcpy(p, &bssidx, sizeof(u32));
p += sizeof(u32);
/* parameter buffer follows */
if (paramlen)
memcpy(p, param, paramlen);
return iolen;
}
s32 wldev_iovar_getbuf_bsscfg(
struct net_device *dev, s8 *iovar_name,
void *param, s32 paramlen, void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync)
{
s32 ret = 0;
if (buf_sync) {
mutex_lock(buf_sync);
}
wldev_mkiovar_bsscfg(iovar_name, param, paramlen, buf, buflen, bsscfg_idx);
ret = wldev_ioctl(dev, WLC_GET_VAR, buf, buflen, FALSE);
if (buf_sync) {
mutex_unlock(buf_sync);
}
return ret;
}
s32 wldev_iovar_setbuf_bsscfg(
struct net_device *dev, s8 *iovar_name,
void *param, s32 paramlen, void *buf, s32 buflen, s32 bsscfg_idx, struct mutex* buf_sync)
{
s32 ret = 0;
s32 iovar_len;
if (buf_sync) {
mutex_lock(buf_sync);
}
iovar_len = wldev_mkiovar_bsscfg(iovar_name, param, paramlen, buf, buflen, bsscfg_idx);
ret = wldev_ioctl(dev, WLC_SET_VAR, buf, iovar_len, TRUE);
if (buf_sync) {
mutex_unlock(buf_sync);
}
return ret;
}
s32 wldev_iovar_setint_bsscfg(
struct net_device *dev, s8 *iovar, s32 val, s32 bssidx)
{
s8 iovar_buf[WLC_IOCTL_SMLEN];
val = htod32(val);
memset(iovar_buf, 0, sizeof(iovar_buf));
return wldev_iovar_setbuf_bsscfg(dev, iovar, &val, sizeof(val), iovar_buf,
sizeof(iovar_buf), bssidx, NULL);
}
s32 wldev_iovar_getint_bsscfg(
struct net_device *dev, s8 *iovar, s32 *pval, s32 bssidx)
{
s8 iovar_buf[WLC_IOCTL_SMLEN];
s32 err;
memset(iovar_buf, 0, sizeof(iovar_buf));
err = wldev_iovar_getbuf_bsscfg(dev, iovar, pval, sizeof(*pval), iovar_buf,
sizeof(iovar_buf), bssidx, NULL);
if (err == 0)
{
memcpy(pval, iovar_buf, sizeof(*pval));
*pval = dtoh32(*pval);
}
return err;
}
int wldev_get_link_speed(
struct net_device *dev, int *plink_speed)
{
int error;
if (!plink_speed)
return -ENOMEM;
error = wldev_ioctl(dev, WLC_GET_RATE, plink_speed, sizeof(int), 0);
if (unlikely(error))
return error;
/* Convert internal 500Kbps to Kbps */
*plink_speed *= 500;
return error;
}
int wldev_get_rssi(
struct net_device *dev, int *prssi)
{
scb_val_t scb_val;
int error;
if (!prssi)
return -ENOMEM;
bzero(&scb_val, sizeof(scb_val_t));
error = wldev_ioctl(dev, WLC_GET_RSSI, &scb_val, sizeof(scb_val_t), 0);
if (unlikely(error))
return error;
*prssi = dtoh32(scb_val.val);
return error;
}
int wldev_get_ssid(
struct net_device *dev, wlc_ssid_t *pssid)
{
int error;
if (!pssid)
return -ENOMEM;
error = wldev_ioctl(dev, WLC_GET_SSID, pssid, sizeof(wlc_ssid_t), 0);
if (unlikely(error))
return error;
pssid->SSID_len = dtoh32(pssid->SSID_len);
return error;
}
int wldev_get_band(
struct net_device *dev, uint *pband)
{
int error;
error = wldev_ioctl(dev, WLC_GET_BAND, pband, sizeof(uint), 0);
return error;
}
int wldev_set_band(
struct net_device *dev, uint band)
{
int error = -1;
if ((band == WLC_BAND_AUTO) || (band == WLC_BAND_5G) || (band == WLC_BAND_2G)) {
error = wldev_ioctl(dev, WLC_SET_BAND, &band, sizeof(band), 1);
}
return error;
}
int wldev_set_country(
struct net_device *dev, char *country_code)
{
int error = -1;
wl_country_t cspec = {{0}, 0, {0}};
scb_val_t scbval;
char smbuf[WLC_IOCTL_SMLEN];
if (!country_code)
return error;
error = wldev_iovar_getbuf(dev, "country", &cspec, sizeof(cspec),
smbuf, sizeof(smbuf), NULL);
if (error < 0)
WLDEV_ERROR(("%s: get country failed = %d\n", __FUNCTION__, error));
if ((error < 0) ||
(strncmp(country_code, smbuf, WLC_CNTRY_BUF_SZ) != 0)) {
bzero(&scbval, sizeof(scb_val_t));
error = wldev_ioctl(dev, WLC_DISASSOC, &scbval, sizeof(scb_val_t), 1);
if (error < 0) {
WLDEV_ERROR(("%s: set country failed due to Disassoc error %d\n",
__FUNCTION__, error));
return error;
}
}
cspec.rev = -1;
memcpy(cspec.country_abbrev, country_code, WLC_CNTRY_BUF_SZ);
memcpy(cspec.ccode, country_code, WLC_CNTRY_BUF_SZ);
get_customized_country_code((char *)&cspec.country_abbrev, &cspec);
error = wldev_iovar_setbuf(dev, "country", &cspec, sizeof(cspec),
smbuf, sizeof(smbuf), NULL);
if (error < 0) {
WLDEV_ERROR(("%s: set country for %s as %s rev %d failed\n",
__FUNCTION__, country_code, cspec.ccode, cspec.rev));
return error;
}
dhd_bus_country_set(dev, &cspec);
WLDEV_ERROR(("%s: set country for %s as %s rev %d\n",
__FUNCTION__, country_code, cspec.ccode, cspec.rev));
return 0;
}
| gpl-2.0 |
DJNoXD/rockchip-kernel-rk2918 | drivers/gpio/74x164.c | 2434 | 4172 | /*
* 74Hx164 - Generic serial-in/parallel-out 8-bits shift register GPIO driver
*
* Copyright (C) 2010 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2010 Miguel Gaio <miguel.gaio@efixo.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/spi/spi.h>
#include <linux/spi/74x164.h>
#include <linux/gpio.h>
#include <linux/slab.h>
#define GEN_74X164_GPIO_COUNT 8
struct gen_74x164_chip {
struct spi_device *spi;
struct gpio_chip gpio_chip;
struct mutex lock;
u8 port_config;
};
static void gen_74x164_set_value(struct gpio_chip *, unsigned, int);
static struct gen_74x164_chip *gpio_to_chip(struct gpio_chip *gc)
{
return container_of(gc, struct gen_74x164_chip, gpio_chip);
}
static int __gen_74x164_write_config(struct gen_74x164_chip *chip)
{
return spi_write(chip->spi,
&chip->port_config, sizeof(chip->port_config));
}
static int gen_74x164_direction_output(struct gpio_chip *gc,
unsigned offset, int val)
{
gen_74x164_set_value(gc, offset, val);
return 0;
}
static int gen_74x164_get_value(struct gpio_chip *gc, unsigned offset)
{
struct gen_74x164_chip *chip = gpio_to_chip(gc);
int ret;
mutex_lock(&chip->lock);
ret = (chip->port_config >> offset) & 0x1;
mutex_unlock(&chip->lock);
return ret;
}
static void gen_74x164_set_value(struct gpio_chip *gc,
unsigned offset, int val)
{
struct gen_74x164_chip *chip = gpio_to_chip(gc);
mutex_lock(&chip->lock);
if (val)
chip->port_config |= (1 << offset);
else
chip->port_config &= ~(1 << offset);
__gen_74x164_write_config(chip);
mutex_unlock(&chip->lock);
}
static int __devinit gen_74x164_probe(struct spi_device *spi)
{
struct gen_74x164_chip *chip;
struct gen_74x164_chip_platform_data *pdata;
int ret;
pdata = spi->dev.platform_data;
if (!pdata || !pdata->base) {
dev_dbg(&spi->dev, "incorrect or missing platform data\n");
return -EINVAL;
}
/*
* bits_per_word cannot be configured in platform data
*/
spi->bits_per_word = 8;
ret = spi_setup(spi);
if (ret < 0)
return ret;
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
mutex_init(&chip->lock);
dev_set_drvdata(&spi->dev, chip);
chip->spi = spi;
chip->gpio_chip.label = GEN_74X164_DRIVER_NAME,
chip->gpio_chip.direction_output = gen_74x164_direction_output;
chip->gpio_chip.get = gen_74x164_get_value;
chip->gpio_chip.set = gen_74x164_set_value;
chip->gpio_chip.base = pdata->base;
chip->gpio_chip.ngpio = GEN_74X164_GPIO_COUNT;
chip->gpio_chip.can_sleep = 1;
chip->gpio_chip.dev = &spi->dev;
chip->gpio_chip.owner = THIS_MODULE;
ret = __gen_74x164_write_config(chip);
if (ret) {
dev_err(&spi->dev, "Failed writing: %d\n", ret);
goto exit_destroy;
}
ret = gpiochip_add(&chip->gpio_chip);
if (ret)
goto exit_destroy;
return ret;
exit_destroy:
dev_set_drvdata(&spi->dev, NULL);
mutex_destroy(&chip->lock);
kfree(chip);
return ret;
}
static int __devexit gen_74x164_remove(struct spi_device *spi)
{
struct gen_74x164_chip *chip;
int ret;
chip = dev_get_drvdata(&spi->dev);
if (chip == NULL)
return -ENODEV;
dev_set_drvdata(&spi->dev, NULL);
ret = gpiochip_remove(&chip->gpio_chip);
if (!ret) {
mutex_destroy(&chip->lock);
kfree(chip);
} else
dev_err(&spi->dev, "Failed to remove the GPIO controller: %d\n",
ret);
return ret;
}
static struct spi_driver gen_74x164_driver = {
.driver = {
.name = GEN_74X164_DRIVER_NAME,
.owner = THIS_MODULE,
},
.probe = gen_74x164_probe,
.remove = __devexit_p(gen_74x164_remove),
};
static int __init gen_74x164_init(void)
{
return spi_register_driver(&gen_74x164_driver);
}
subsys_initcall(gen_74x164_init);
static void __exit gen_74x164_exit(void)
{
spi_unregister_driver(&gen_74x164_driver);
}
module_exit(gen_74x164_exit);
MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
MODULE_AUTHOR("Miguel Gaio <miguel.gaio@efixo.com>");
MODULE_DESCRIPTION("GPIO expander driver for 74X164 8-bits shift register");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
OtherCrashOverride/linux | drivers/spi/spi_oc_tiny.c | 2946 | 10626 | /*
* OpenCores tiny SPI master driver
*
* http://opencores.org/project,tiny_spi
*
* Copyright (C) 2011 Thomas Chou <thomas@wytron.com.tw>
*
* Based on spi_s3c24xx.c, which is:
* Copyright (c) 2006 Ben Dooks
* Copyright (c) 2006 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/spi/spi_oc_tiny.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/of.h>
#define DRV_NAME "spi_oc_tiny"
#define TINY_SPI_RXDATA 0
#define TINY_SPI_TXDATA 4
#define TINY_SPI_STATUS 8
#define TINY_SPI_CONTROL 12
#define TINY_SPI_BAUD 16
#define TINY_SPI_STATUS_TXE 0x1
#define TINY_SPI_STATUS_TXR 0x2
struct tiny_spi {
/* bitbang has to be first */
struct spi_bitbang bitbang;
struct completion done;
void __iomem *base;
int irq;
unsigned int freq;
unsigned int baudwidth;
unsigned int baud;
unsigned int speed_hz;
unsigned int mode;
unsigned int len;
unsigned int txc, rxc;
const u8 *txp;
u8 *rxp;
unsigned int gpio_cs_count;
int *gpio_cs;
};
static inline struct tiny_spi *tiny_spi_to_hw(struct spi_device *sdev)
{
return spi_master_get_devdata(sdev->master);
}
static unsigned int tiny_spi_baud(struct spi_device *spi, unsigned int hz)
{
struct tiny_spi *hw = tiny_spi_to_hw(spi);
return min(DIV_ROUND_UP(hw->freq, hz * 2), (1U << hw->baudwidth)) - 1;
}
static void tiny_spi_chipselect(struct spi_device *spi, int is_active)
{
struct tiny_spi *hw = tiny_spi_to_hw(spi);
if (hw->gpio_cs_count) {
gpio_set_value(hw->gpio_cs[spi->chip_select],
(spi->mode & SPI_CS_HIGH) ? is_active : !is_active);
}
}
static int tiny_spi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
struct tiny_spi *hw = tiny_spi_to_hw(spi);
unsigned int baud = hw->baud;
if (t) {
if (t->speed_hz && t->speed_hz != hw->speed_hz)
baud = tiny_spi_baud(spi, t->speed_hz);
}
writel(baud, hw->base + TINY_SPI_BAUD);
writel(hw->mode, hw->base + TINY_SPI_CONTROL);
return 0;
}
static int tiny_spi_setup(struct spi_device *spi)
{
struct tiny_spi *hw = tiny_spi_to_hw(spi);
if (spi->max_speed_hz != hw->speed_hz) {
hw->speed_hz = spi->max_speed_hz;
hw->baud = tiny_spi_baud(spi, hw->speed_hz);
}
hw->mode = spi->mode & (SPI_CPOL | SPI_CPHA);
return 0;
}
static inline void tiny_spi_wait_txr(struct tiny_spi *hw)
{
while (!(readb(hw->base + TINY_SPI_STATUS) &
TINY_SPI_STATUS_TXR))
cpu_relax();
}
static inline void tiny_spi_wait_txe(struct tiny_spi *hw)
{
while (!(readb(hw->base + TINY_SPI_STATUS) &
TINY_SPI_STATUS_TXE))
cpu_relax();
}
static int tiny_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
{
struct tiny_spi *hw = tiny_spi_to_hw(spi);
const u8 *txp = t->tx_buf;
u8 *rxp = t->rx_buf;
unsigned int i;
if (hw->irq >= 0) {
/* use intrrupt driven data transfer */
hw->len = t->len;
hw->txp = t->tx_buf;
hw->rxp = t->rx_buf;
hw->txc = 0;
hw->rxc = 0;
/* send the first byte */
if (t->len > 1) {
writeb(hw->txp ? *hw->txp++ : 0,
hw->base + TINY_SPI_TXDATA);
hw->txc++;
writeb(hw->txp ? *hw->txp++ : 0,
hw->base + TINY_SPI_TXDATA);
hw->txc++;
writeb(TINY_SPI_STATUS_TXR, hw->base + TINY_SPI_STATUS);
} else {
writeb(hw->txp ? *hw->txp++ : 0,
hw->base + TINY_SPI_TXDATA);
hw->txc++;
writeb(TINY_SPI_STATUS_TXE, hw->base + TINY_SPI_STATUS);
}
wait_for_completion(&hw->done);
} else if (txp && rxp) {
/* we need to tighten the transfer loop */
writeb(*txp++, hw->base + TINY_SPI_TXDATA);
if (t->len > 1) {
writeb(*txp++, hw->base + TINY_SPI_TXDATA);
for (i = 2; i < t->len; i++) {
u8 rx, tx = *txp++;
tiny_spi_wait_txr(hw);
rx = readb(hw->base + TINY_SPI_TXDATA);
writeb(tx, hw->base + TINY_SPI_TXDATA);
*rxp++ = rx;
}
tiny_spi_wait_txr(hw);
*rxp++ = readb(hw->base + TINY_SPI_TXDATA);
}
tiny_spi_wait_txe(hw);
*rxp++ = readb(hw->base + TINY_SPI_RXDATA);
} else if (rxp) {
writeb(0, hw->base + TINY_SPI_TXDATA);
if (t->len > 1) {
writeb(0,
hw->base + TINY_SPI_TXDATA);
for (i = 2; i < t->len; i++) {
u8 rx;
tiny_spi_wait_txr(hw);
rx = readb(hw->base + TINY_SPI_TXDATA);
writeb(0, hw->base + TINY_SPI_TXDATA);
*rxp++ = rx;
}
tiny_spi_wait_txr(hw);
*rxp++ = readb(hw->base + TINY_SPI_TXDATA);
}
tiny_spi_wait_txe(hw);
*rxp++ = readb(hw->base + TINY_SPI_RXDATA);
} else if (txp) {
writeb(*txp++, hw->base + TINY_SPI_TXDATA);
if (t->len > 1) {
writeb(*txp++, hw->base + TINY_SPI_TXDATA);
for (i = 2; i < t->len; i++) {
u8 tx = *txp++;
tiny_spi_wait_txr(hw);
writeb(tx, hw->base + TINY_SPI_TXDATA);
}
}
tiny_spi_wait_txe(hw);
} else {
writeb(0, hw->base + TINY_SPI_TXDATA);
if (t->len > 1) {
writeb(0, hw->base + TINY_SPI_TXDATA);
for (i = 2; i < t->len; i++) {
tiny_spi_wait_txr(hw);
writeb(0, hw->base + TINY_SPI_TXDATA);
}
}
tiny_spi_wait_txe(hw);
}
return t->len;
}
static irqreturn_t tiny_spi_irq(int irq, void *dev)
{
struct tiny_spi *hw = dev;
writeb(0, hw->base + TINY_SPI_STATUS);
if (hw->rxc + 1 == hw->len) {
if (hw->rxp)
*hw->rxp++ = readb(hw->base + TINY_SPI_RXDATA);
hw->rxc++;
complete(&hw->done);
} else {
if (hw->rxp)
*hw->rxp++ = readb(hw->base + TINY_SPI_TXDATA);
hw->rxc++;
if (hw->txc < hw->len) {
writeb(hw->txp ? *hw->txp++ : 0,
hw->base + TINY_SPI_TXDATA);
hw->txc++;
writeb(TINY_SPI_STATUS_TXR,
hw->base + TINY_SPI_STATUS);
} else {
writeb(TINY_SPI_STATUS_TXE,
hw->base + TINY_SPI_STATUS);
}
}
return IRQ_HANDLED;
}
#ifdef CONFIG_OF
#include <linux/of_gpio.h>
static int __devinit tiny_spi_of_probe(struct platform_device *pdev)
{
struct tiny_spi *hw = platform_get_drvdata(pdev);
struct device_node *np = pdev->dev.of_node;
unsigned int i;
const __be32 *val;
int len;
if (!np)
return 0;
hw->gpio_cs_count = of_gpio_count(np);
if (hw->gpio_cs_count) {
hw->gpio_cs = devm_kzalloc(&pdev->dev,
hw->gpio_cs_count * sizeof(unsigned int),
GFP_KERNEL);
if (!hw->gpio_cs)
return -ENOMEM;
}
for (i = 0; i < hw->gpio_cs_count; i++) {
hw->gpio_cs[i] = of_get_gpio_flags(np, i, NULL);
if (hw->gpio_cs[i] < 0)
return -ENODEV;
}
hw->bitbang.master->dev.of_node = pdev->dev.of_node;
val = of_get_property(pdev->dev.of_node,
"clock-frequency", &len);
if (val && len >= sizeof(__be32))
hw->freq = be32_to_cpup(val);
val = of_get_property(pdev->dev.of_node, "baud-width", &len);
if (val && len >= sizeof(__be32))
hw->baudwidth = be32_to_cpup(val);
return 0;
}
#else /* !CONFIG_OF */
static int __devinit tiny_spi_of_probe(struct platform_device *pdev)
{
return 0;
}
#endif /* CONFIG_OF */
static int __devinit tiny_spi_probe(struct platform_device *pdev)
{
struct tiny_spi_platform_data *platp = pdev->dev.platform_data;
struct tiny_spi *hw;
struct spi_master *master;
struct resource *res;
unsigned int i;
int err = -ENODEV;
master = spi_alloc_master(&pdev->dev, sizeof(struct tiny_spi));
if (!master)
return err;
/* setup the master state. */
master->bus_num = pdev->id;
master->num_chipselect = 255;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->setup = tiny_spi_setup;
hw = spi_master_get_devdata(master);
platform_set_drvdata(pdev, hw);
/* setup the state for the bitbang driver */
hw->bitbang.master = spi_master_get(master);
if (!hw->bitbang.master)
return err;
hw->bitbang.setup_transfer = tiny_spi_setup_transfer;
hw->bitbang.chipselect = tiny_spi_chipselect;
hw->bitbang.txrx_bufs = tiny_spi_txrx_bufs;
/* find and map our resources */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
goto exit_busy;
if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
pdev->name))
goto exit_busy;
hw->base = devm_ioremap_nocache(&pdev->dev, res->start,
resource_size(res));
if (!hw->base)
goto exit_busy;
/* irq is optional */
hw->irq = platform_get_irq(pdev, 0);
if (hw->irq >= 0) {
init_completion(&hw->done);
err = devm_request_irq(&pdev->dev, hw->irq, tiny_spi_irq, 0,
pdev->name, hw);
if (err)
goto exit;
}
/* find platform data */
if (platp) {
hw->gpio_cs_count = platp->gpio_cs_count;
hw->gpio_cs = platp->gpio_cs;
if (platp->gpio_cs_count && !platp->gpio_cs)
goto exit_busy;
hw->freq = platp->freq;
hw->baudwidth = platp->baudwidth;
} else {
err = tiny_spi_of_probe(pdev);
if (err)
goto exit;
}
for (i = 0; i < hw->gpio_cs_count; i++) {
err = gpio_request(hw->gpio_cs[i], dev_name(&pdev->dev));
if (err)
goto exit_gpio;
gpio_direction_output(hw->gpio_cs[i], 1);
}
hw->bitbang.master->num_chipselect = max(1U, hw->gpio_cs_count);
/* register our spi controller */
err = spi_bitbang_start(&hw->bitbang);
if (err)
goto exit;
dev_info(&pdev->dev, "base %p, irq %d\n", hw->base, hw->irq);
return 0;
exit_gpio:
while (i-- > 0)
gpio_free(hw->gpio_cs[i]);
exit_busy:
err = -EBUSY;
exit:
platform_set_drvdata(pdev, NULL);
spi_master_put(master);
return err;
}
static int __devexit tiny_spi_remove(struct platform_device *pdev)
{
struct tiny_spi *hw = platform_get_drvdata(pdev);
struct spi_master *master = hw->bitbang.master;
unsigned int i;
spi_bitbang_stop(&hw->bitbang);
for (i = 0; i < hw->gpio_cs_count; i++)
gpio_free(hw->gpio_cs[i]);
platform_set_drvdata(pdev, NULL);
spi_master_put(master);
return 0;
}
#ifdef CONFIG_OF
static const struct of_device_id tiny_spi_match[] = {
{ .compatible = "opencores,tiny-spi-rtlsvn2", },
{},
};
MODULE_DEVICE_TABLE(of, tiny_spi_match);
#else /* CONFIG_OF */
#define tiny_spi_match NULL
#endif /* CONFIG_OF */
static struct platform_driver tiny_spi_driver = {
.probe = tiny_spi_probe,
.remove = __devexit_p(tiny_spi_remove),
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
.pm = NULL,
.of_match_table = tiny_spi_match,
},
};
static int __init tiny_spi_init(void)
{
return platform_driver_register(&tiny_spi_driver);
}
module_init(tiny_spi_init);
static void __exit tiny_spi_exit(void)
{
platform_driver_unregister(&tiny_spi_driver);
}
module_exit(tiny_spi_exit);
MODULE_DESCRIPTION("OpenCores tiny SPI driver");
MODULE_AUTHOR("Thomas Chou <thomas@wytron.com.tw>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
| gpl-2.0 |
insanelycool/VICK | drivers/staging/intel_sst/intelmid_msic_control.c | 2946 | 25995 | /*
* intelmid_vm_control.c - Intel Sound card driver for MID
*
* Copyright (C) 2010 Intel Corp
* Authors: Vinod Koul <vinod.koul@intel.com>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This file contains the control operations of msic vendors
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/pci.h>
#include <linux/file.h>
#include <linux/delay.h>
#include <sound/control.h>
#include "intel_sst.h"
#include <linux/input.h>
#include "intelmid_snd_control.h"
#include "intelmid.h"
#define AUDIOMUX12 0x24c
#define AUDIOMUX34 0x24d
static int msic_init_card(void)
{
struct sc_reg_access sc_access[] = {
/* dmic configuration */
{0x241, 0x85, 0},
{0x242, 0x02, 0},
/* audio paths config */
{0x24C, 0x10, 0},
{0x24D, 0x32, 0},
/* PCM2 interface slots */
/* preconfigured slots for 0-5 both tx, rx */
{0x272, 0x10, 0},
{0x273, 0x32, 0},
{0x274, 0xFF, 0},
{0x275, 0x10, 0},
{0x276, 0x32, 0},
{0x277, 0x54, 0},
/*Sinc5 decimator*/
{0x24E, 0x28, 0},
/*TI vibra w/a settings*/
{0x384, 0x80, 0},
{0x385, 0x80, 0},
{0x267, 0x00, 0},
{0x261, 0x00, 0},
/* pcm port setting */
{0x278, 0x00, 0},
{0x27B, 0x01, 0},
{0x27C, 0x0a, 0},
/* Set vol HSLRVOLCTRL, IHFVOL */
{0x259, 0x08, 0},
{0x25A, 0x08, 0},
{0x25B, 0x08, 0},
{0x25C, 0x08, 0},
/* HSEPRXCTRL Enable the headset left and right FIR filters */
{0x250, 0x30, 0},
/* HSMIXER */
{0x256, 0x11, 0},
/* amic configuration */
{0x249, 0x01, 0x0},
{0x24A, 0x01, 0x0},
/* unmask ocaudio/accdet interrupts */
{0x1d, 0x00, 0x00},
{0x1e, 0x00, 0x00},
};
snd_msic_ops.card_status = SND_CARD_INIT_DONE;
sst_sc_reg_access(sc_access, PMIC_WRITE, 28);
snd_msic_ops.pb_on = 0;
snd_msic_ops.pbhs_on = 0;
snd_msic_ops.cap_on = 0;
snd_msic_ops.input_dev_id = DMIC; /*def dev*/
snd_msic_ops.output_dev_id = STEREO_HEADPHONE;
snd_msic_ops.jack_interrupt_status = false;
pr_debug("msic init complete!!\n");
return 0;
}
static int msic_line_out_restore(u8 value)
{
struct sc_reg_access hs_drv_en[] = {
{0x25d, 0x03, 0x03},
};
struct sc_reg_access ep_drv_en[] = {
{0x25d, 0x40, 0x40},
};
struct sc_reg_access ihf_drv_en[] = {
{0x25d, 0x0c, 0x0c},
};
struct sc_reg_access vib1_drv_en[] = {
{0x25d, 0x10, 0x10},
};
struct sc_reg_access vib2_drv_en[] = {
{0x25d, 0x20, 0x20},
};
struct sc_reg_access pmode_enable[] = {
{0x381, 0x10, 0x10},
};
int retval = 0;
pr_debug("msic_lineout_restore_lineout_dev:%d\n", value);
switch (value) {
case HEADSET:
pr_debug("Selecting Lineout-HEADSET-restore\n");
if (snd_msic_ops.output_dev_id == STEREO_HEADPHONE)
retval = sst_sc_reg_access(hs_drv_en,
PMIC_READ_MODIFY, 1);
else
retval = sst_sc_reg_access(ep_drv_en,
PMIC_READ_MODIFY, 1);
break;
case IHF:
pr_debug("Selecting Lineout-IHF-restore\n");
retval = sst_sc_reg_access(ihf_drv_en, PMIC_READ_MODIFY, 1);
if (retval)
return retval;
retval = sst_sc_reg_access(pmode_enable, PMIC_READ_MODIFY, 1);
break;
case VIBRA1:
pr_debug("Selecting Lineout-Vibra1-restore\n");
retval = sst_sc_reg_access(vib1_drv_en, PMIC_READ_MODIFY, 1);
break;
case VIBRA2:
pr_debug("Selecting Lineout-VIBRA2-restore\n");
retval = sst_sc_reg_access(vib2_drv_en, PMIC_READ_MODIFY, 1);
break;
case NONE:
pr_debug("Selecting Lineout-NONE-restore\n");
break;
default:
return -EINVAL;
}
return retval;
}
static int msic_get_lineout_prvstate(void)
{
struct sc_reg_access hs_ihf_drv[2] = {
{0x257, 0x0, 0x0},
{0x25d, 0x0, 0x0},
};
struct sc_reg_access vib1drv[2] = {
{0x264, 0x0, 0x0},
{0x25D, 0x0, 0x0},
};
struct sc_reg_access vib2drv[2] = {
{0x26A, 0x0, 0x0},
{0x25D, 0x0, 0x0},
};
int retval = 0, drv_en, dac_en, dev_id, mask;
for (dev_id = 0; dev_id < snd_msic_ops.line_out_names_cnt; dev_id++) {
switch (dev_id) {
case HEADSET:
pr_debug("msic_get_lineout_prvs_state: HEADSET\n");
sst_sc_reg_access(hs_ihf_drv, PMIC_READ, 2);
mask = (MASK0|MASK1);
dac_en = (hs_ihf_drv[0].value) & mask;
mask = ((MASK0|MASK1)|MASK6);
drv_en = (hs_ihf_drv[1].value) & mask;
if (dac_en && (!drv_en)) {
snd_msic_ops.prev_lineout_dev_id = HEADSET;
return retval;
}
break;
case IHF:
pr_debug("msic_get_lineout_prvstate: IHF\n");
sst_sc_reg_access(hs_ihf_drv, PMIC_READ, 2);
mask = (MASK2 | MASK3);
dac_en = (hs_ihf_drv[0].value) & mask;
mask = (MASK2 | MASK3);
drv_en = (hs_ihf_drv[1].value) & mask;
if (dac_en && (!drv_en)) {
snd_msic_ops.prev_lineout_dev_id = IHF;
return retval;
}
break;
case VIBRA1:
pr_debug("msic_get_lineout_prvstate: vibra1\n");
sst_sc_reg_access(vib1drv, PMIC_READ, 2);
mask = MASK1;
dac_en = (vib1drv[0].value) & mask;
mask = MASK4;
drv_en = (vib1drv[1].value) & mask;
if (dac_en && (!drv_en)) {
snd_msic_ops.prev_lineout_dev_id = VIBRA1;
return retval;
}
break;
case VIBRA2:
pr_debug("msic_get_lineout_prvstate: vibra2\n");
sst_sc_reg_access(vib2drv, PMIC_READ, 2);
mask = MASK1;
dac_en = (vib2drv[0].value) & mask;
mask = MASK5;
drv_en = ((vib2drv[1].value) & mask);
if (dac_en && (!drv_en)) {
snd_msic_ops.prev_lineout_dev_id = VIBRA2;
return retval;
}
break;
case NONE:
pr_debug("msic_get_lineout_prvstate: NONE\n");
snd_msic_ops.prev_lineout_dev_id = NONE;
return retval;
default:
pr_debug("Invalid device id\n");
snd_msic_ops.prev_lineout_dev_id = NONE;
return -EINVAL;
}
}
return retval;
}
static int msic_set_selected_lineout_dev(u8 value)
{
struct sc_reg_access lout_hs[] = {
{0x25e, 0x33, 0xFF},
{0x25d, 0x0, 0x43},
};
struct sc_reg_access lout_ihf[] = {
{0x25e, 0x55, 0xff},
{0x25d, 0x0, 0x0c},
};
struct sc_reg_access lout_vibra1[] = {
{0x25e, 0x61, 0xff},
{0x25d, 0x0, 0x10},
};
struct sc_reg_access lout_vibra2[] = {
{0x25e, 0x16, 0xff},
{0x25d, 0x0, 0x20},
};
struct sc_reg_access lout_def[] = {
{0x25e, 0x66, 0x0},
};
struct sc_reg_access pmode_disable[] = {
{0x381, 0x00, 0x10},
};
struct sc_reg_access pmode_enable[] = {
{0x381, 0x10, 0x10},
};
int retval = 0;
pr_debug("msic_set_selected_lineout_dev:%d\n", value);
msic_get_lineout_prvstate();
msic_line_out_restore(snd_msic_ops.prev_lineout_dev_id);
snd_msic_ops.lineout_dev_id = value;
switch (value) {
case HEADSET:
pr_debug("Selecting Lineout-HEADSET\n");
if (snd_msic_ops.pb_on)
retval = sst_sc_reg_access(lout_hs,
PMIC_READ_MODIFY, 2);
if (retval)
return retval;
retval = sst_sc_reg_access(pmode_disable,
PMIC_READ_MODIFY, 1);
break;
case IHF:
pr_debug("Selecting Lineout-IHF\n");
if (snd_msic_ops.pb_on)
retval = sst_sc_reg_access(lout_ihf,
PMIC_READ_MODIFY, 2);
if (retval)
return retval;
retval = sst_sc_reg_access(pmode_enable,
PMIC_READ_MODIFY, 1);
break;
case VIBRA1:
pr_debug("Selecting Lineout-Vibra1\n");
if (snd_msic_ops.pb_on)
retval = sst_sc_reg_access(lout_vibra1,
PMIC_READ_MODIFY, 2);
if (retval)
return retval;
retval = sst_sc_reg_access(pmode_disable,
PMIC_READ_MODIFY, 1);
break;
case VIBRA2:
pr_debug("Selecting Lineout-VIBRA2\n");
if (snd_msic_ops.pb_on)
retval = sst_sc_reg_access(lout_vibra2,
PMIC_READ_MODIFY, 2);
if (retval)
return retval;
retval = sst_sc_reg_access(pmode_disable,
PMIC_READ_MODIFY, 1);
break;
case NONE:
pr_debug("Selecting Lineout-NONE\n");
retval = sst_sc_reg_access(lout_def,
PMIC_WRITE, 1);
if (retval)
return retval;
retval = sst_sc_reg_access(pmode_disable,
PMIC_READ_MODIFY, 1);
break;
default:
return -EINVAL;
}
return retval;
}
static int msic_power_up_pb(unsigned int device)
{
struct sc_reg_access vaud[] = {
/* turn on the audio power supplies */
{0x0DB, 0x07, 0},
};
struct sc_reg_access pll[] = {
/* turn on PLL */
{0x240, 0x20, 0},
};
struct sc_reg_access vhs[] = {
/* VHSP */
{0x0DC, 0x3D, 0},
/* VHSN */
{0x0DD, 0x3F, 0},
};
struct sc_reg_access hsdac[] = {
{0x382, 0x40, 0x40},
/* disable driver */
{0x25D, 0x0, 0x43},
/* DAC CONFIG ; both HP, LP on */
{0x257, 0x03, 0x03},
};
struct sc_reg_access hs_filter[] = {
/* HSEPRXCTRL Enable the headset left and right FIR filters */
{0x250, 0x30, 0},
/* HSMIXER */
{0x256, 0x11, 0},
};
struct sc_reg_access hs_enable[] = {
/* enable driver */
{0x25D, 0x3, 0x3},
{0x26C, 0x0, 0x2},
/* unmute the headset */
{ 0x259, 0x80, 0x80},
{ 0x25A, 0x80, 0x80},
};
struct sc_reg_access vihf[] = {
/* VIHF ON */
{0x0C9, 0x27, 0x00},
};
struct sc_reg_access ihf_filter[] = {
/* disable driver */
{0x25D, 0x00, 0x0C},
/*Filer DAC enable*/
{0x251, 0x03, 0x03},
{0x257, 0x0C, 0x0C},
};
struct sc_reg_access ihf_en[] = {
/*enable drv*/
{0x25D, 0x0C, 0x0c},
};
struct sc_reg_access ihf_unmute[] = {
/*unmute headset*/
{0x25B, 0x80, 0x80},
{0x25C, 0x80, 0x80},
};
struct sc_reg_access epdac[] = {
/* disable driver */
{0x25D, 0x0, 0x43},
/* DAC CONFIG ; both HP, LP on */
{0x257, 0x03, 0x03},
};
struct sc_reg_access ep_enable[] = {
/* enable driver */
{0x25D, 0x40, 0x40},
/* unmute the headset */
{ 0x259, 0x80, 0x80},
{ 0x25A, 0x80, 0x80},
};
struct sc_reg_access vib1_en[] = {
/* enable driver, ADC */
{0x25D, 0x10, 0x10},
{0x264, 0x02, 0x82},
};
struct sc_reg_access vib2_en[] = {
/* enable driver, ADC */
{0x25D, 0x20, 0x20},
{0x26A, 0x02, 0x82},
};
struct sc_reg_access pcm2_en[] = {
/* enable pcm 2 */
{0x27C, 0x1, 0x1},
};
int retval = 0;
if (snd_msic_ops.card_status == SND_CARD_UN_INIT) {
retval = msic_init_card();
if (retval)
return retval;
}
pr_debug("powering up pb.... Device %d\n", device);
sst_sc_reg_access(vaud, PMIC_WRITE, 1);
msleep(1);
sst_sc_reg_access(pll, PMIC_WRITE, 1);
msleep(1);
switch (device) {
case SND_SST_DEVICE_HEADSET:
snd_msic_ops.pb_on = 1;
snd_msic_ops.pbhs_on = 1;
if (snd_msic_ops.output_dev_id == STEREO_HEADPHONE) {
sst_sc_reg_access(vhs, PMIC_WRITE, 2);
sst_sc_reg_access(hsdac, PMIC_READ_MODIFY, 3);
sst_sc_reg_access(hs_filter, PMIC_WRITE, 2);
sst_sc_reg_access(hs_enable, PMIC_READ_MODIFY, 4);
} else {
sst_sc_reg_access(epdac, PMIC_READ_MODIFY, 2);
sst_sc_reg_access(hs_filter, PMIC_WRITE, 2);
sst_sc_reg_access(ep_enable, PMIC_READ_MODIFY, 3);
}
if (snd_msic_ops.lineout_dev_id == HEADSET)
msic_set_selected_lineout_dev(HEADSET);
break;
case SND_SST_DEVICE_IHF:
snd_msic_ops.pb_on = 1;
sst_sc_reg_access(vihf, PMIC_WRITE, 1);
sst_sc_reg_access(ihf_filter, PMIC_READ_MODIFY, 3);
sst_sc_reg_access(ihf_en, PMIC_READ_MODIFY, 1);
sst_sc_reg_access(ihf_unmute, PMIC_READ_MODIFY, 2);
if (snd_msic_ops.lineout_dev_id == IHF)
msic_set_selected_lineout_dev(IHF);
break;
case SND_SST_DEVICE_VIBRA:
snd_msic_ops.pb_on = 1;
sst_sc_reg_access(vib1_en, PMIC_READ_MODIFY, 2);
if (snd_msic_ops.lineout_dev_id == VIBRA1)
msic_set_selected_lineout_dev(VIBRA1);
break;
case SND_SST_DEVICE_HAPTIC:
snd_msic_ops.pb_on = 1;
sst_sc_reg_access(vib2_en, PMIC_READ_MODIFY, 2);
if (snd_msic_ops.lineout_dev_id == VIBRA2)
msic_set_selected_lineout_dev(VIBRA2);
break;
default:
pr_warn("Wrong Device %d, selected %d\n",
device, snd_msic_ops.output_dev_id);
}
return sst_sc_reg_access(pcm2_en, PMIC_READ_MODIFY, 1);
}
static int msic_power_up_cp(unsigned int device)
{
struct sc_reg_access vaud[] = {
/* turn on the audio power supplies */
{0x0DB, 0x07, 0},
};
struct sc_reg_access pll[] = {
/* turn on PLL */
{0x240, 0x20, 0},
};
struct sc_reg_access dmic_bias[] = {
/* Turn on AMIC supply */
{0x247, 0xA0, 0xA0},
};
struct sc_reg_access dmic[] = {
/* mic demux enable */
{0x245, 0x3F, 0x3F},
{0x246, 0x07, 0x07},
};
struct sc_reg_access amic_bias[] = {
/* Turn on AMIC supply */
{0x247, 0xFC, 0xFC},
};
struct sc_reg_access amic[] = {
/*MIC EN*/
{0x249, 0x01, 0x01},
{0x24A, 0x01, 0x01},
/*ADC EN*/
{0x248, 0x05, 0x0F},
};
struct sc_reg_access pcm2[] = {
/* enable pcm 2 */
{0x27C, 0x1, 0x1},
};
struct sc_reg_access tx_on[] = {
/*wait for mic to stabalize before turning on audio channels*/
{0x24F, 0x3C, 0x0},
};
int retval = 0;
if (snd_msic_ops.card_status == SND_CARD_UN_INIT) {
retval = msic_init_card();
if (retval)
return retval;
}
pr_debug("powering up cp....%d\n", snd_msic_ops.input_dev_id);
sst_sc_reg_access(vaud, PMIC_WRITE, 1);
msleep(500);/*FIXME need optimzed value here*/
sst_sc_reg_access(pll, PMIC_WRITE, 1);
msleep(1);
snd_msic_ops.cap_on = 1;
if (snd_msic_ops.input_dev_id == AMIC) {
sst_sc_reg_access(amic_bias, PMIC_READ_MODIFY, 1);
msleep(1);
sst_sc_reg_access(amic, PMIC_READ_MODIFY, 3);
} else {
sst_sc_reg_access(dmic_bias, PMIC_READ_MODIFY, 1);
msleep(1);
sst_sc_reg_access(dmic, PMIC_READ_MODIFY, 2);
}
msleep(1);
sst_sc_reg_access(tx_on, PMIC_WRITE, 1);
return sst_sc_reg_access(pcm2, PMIC_READ_MODIFY, 1);
}
static int msic_power_down(void)
{
struct sc_reg_access power_dn[] = {
/* VHSP */
{0x0DC, 0xC4, 0},
/* VHSN */
{0x0DD, 0x04, 0},
/* VIHF */
{0x0C9, 0x24, 0},
};
struct sc_reg_access pll[] = {
/* turn off PLL*/
{0x240, 0x00, 0x0},
};
struct sc_reg_access vaud[] = {
/* turn off VAUD*/
{0x0DB, 0x04, 0},
};
pr_debug("powering dn msic\n");
snd_msic_ops.pbhs_on = 0;
snd_msic_ops.pb_on = 0;
snd_msic_ops.cap_on = 0;
sst_sc_reg_access(power_dn, PMIC_WRITE, 3);
msleep(1);
sst_sc_reg_access(pll, PMIC_WRITE, 1);
msleep(1);
sst_sc_reg_access(vaud, PMIC_WRITE, 1);
return 0;
}
static int msic_power_down_pb(unsigned int device)
{
struct sc_reg_access drv_enable[] = {
{0x25D, 0x00, 0x00},
};
struct sc_reg_access hs_mute[] = {
{0x259, 0x80, 0x80},
{0x25A, 0x80, 0x80},
{0x26C, 0x02, 0x02},
};
struct sc_reg_access hs_off[] = {
{0x257, 0x00, 0x03},
{0x250, 0x00, 0x30},
{0x382, 0x00, 0x40},
};
struct sc_reg_access ihf_mute[] = {
{0x25B, 0x80, 0x80},
{0x25C, 0x80, 0x80},
};
struct sc_reg_access ihf_off[] = {
{0x257, 0x00, 0x0C},
{0x251, 0x00, 0x03},
};
struct sc_reg_access vib1_off[] = {
{0x264, 0x00, 0x82},
};
struct sc_reg_access vib2_off[] = {
{0x26A, 0x00, 0x82},
};
struct sc_reg_access lout_off[] = {
{0x25e, 0x66, 0x00},
};
struct sc_reg_access pmode_disable[] = {
{0x381, 0x00, 0x10},
};
pr_debug("powering dn pb for device %d\n", device);
switch (device) {
case SND_SST_DEVICE_HEADSET:
snd_msic_ops.pbhs_on = 0;
sst_sc_reg_access(hs_mute, PMIC_READ_MODIFY, 3);
drv_enable[0].mask = 0x43;
sst_sc_reg_access(drv_enable, PMIC_READ_MODIFY, 1);
sst_sc_reg_access(hs_off, PMIC_READ_MODIFY, 3);
if (snd_msic_ops.lineout_dev_id == HEADSET)
sst_sc_reg_access(lout_off, PMIC_WRITE, 1);
break;
case SND_SST_DEVICE_IHF:
sst_sc_reg_access(ihf_mute, PMIC_READ_MODIFY, 2);
drv_enable[0].mask = 0x0C;
sst_sc_reg_access(drv_enable, PMIC_READ_MODIFY, 1);
sst_sc_reg_access(ihf_off, PMIC_READ_MODIFY, 2);
if (snd_msic_ops.lineout_dev_id == IHF) {
sst_sc_reg_access(lout_off, PMIC_WRITE, 1);
sst_sc_reg_access(pmode_disable, PMIC_READ_MODIFY, 1);
}
break;
case SND_SST_DEVICE_VIBRA:
sst_sc_reg_access(vib1_off, PMIC_READ_MODIFY, 1);
drv_enable[0].mask = 0x10;
sst_sc_reg_access(drv_enable, PMIC_READ_MODIFY, 1);
if (snd_msic_ops.lineout_dev_id == VIBRA1)
sst_sc_reg_access(lout_off, PMIC_WRITE, 1);
break;
case SND_SST_DEVICE_HAPTIC:
sst_sc_reg_access(vib2_off, PMIC_READ_MODIFY, 1);
drv_enable[0].mask = 0x20;
sst_sc_reg_access(drv_enable, PMIC_READ_MODIFY, 1);
if (snd_msic_ops.lineout_dev_id == VIBRA2)
sst_sc_reg_access(lout_off, PMIC_WRITE, 1);
break;
}
return 0;
}
static int msic_power_down_cp(unsigned int device)
{
struct sc_reg_access dmic[] = {
{0x247, 0x00, 0xA0},
{0x245, 0x00, 0x38},
{0x246, 0x00, 0x07},
};
struct sc_reg_access amic[] = {
{0x248, 0x00, 0x05},
{0x249, 0x00, 0x01},
{0x24A, 0x00, 0x01},
{0x247, 0x00, 0xA3},
};
struct sc_reg_access tx_off[] = {
{0x24F, 0x00, 0x3C},
};
pr_debug("powering dn cp....\n");
snd_msic_ops.cap_on = 0;
sst_sc_reg_access(tx_off, PMIC_READ_MODIFY, 1);
if (snd_msic_ops.input_dev_id == DMIC)
sst_sc_reg_access(dmic, PMIC_READ_MODIFY, 3);
else
sst_sc_reg_access(amic, PMIC_READ_MODIFY, 4);
return 0;
}
static int msic_set_selected_output_dev(u8 value)
{
int retval = 0;
pr_debug("msic set selected output:%d\n", value);
snd_msic_ops.output_dev_id = value;
if (snd_msic_ops.pbhs_on)
msic_power_up_pb(SND_SST_DEVICE_HEADSET);
return retval;
}
static int msic_set_selected_input_dev(u8 value)
{
struct sc_reg_access sc_access_dmic[] = {
{0x24C, 0x10, 0x0},
};
struct sc_reg_access sc_access_amic[] = {
{0x24C, 0x76, 0x0},
};
int retval = 0;
pr_debug("msic_set_selected_input_dev:%d\n", value);
snd_msic_ops.input_dev_id = value;
switch (value) {
case AMIC:
pr_debug("Selecting AMIC1\n");
retval = sst_sc_reg_access(sc_access_amic, PMIC_WRITE, 1);
break;
case DMIC:
pr_debug("Selecting DMIC1\n");
retval = sst_sc_reg_access(sc_access_dmic, PMIC_WRITE, 1);
break;
default:
return -EINVAL;
}
if (snd_msic_ops.cap_on)
retval = msic_power_up_cp(SND_SST_DEVICE_CAPTURE);
return retval;
}
static int msic_set_hw_dmic_route(u8 hw_ch_index)
{
struct sc_reg_access sc_access_router;
int retval = -EINVAL;
switch (hw_ch_index) {
case HW_CH0:
sc_access_router.reg_addr = AUDIOMUX12;
sc_access_router.value = snd_msic_ops.hw_dmic_map[0];
sc_access_router.mask = (MASK2 | MASK1 | MASK0);
pr_debug("hw_ch0. value = 0x%x\n",
sc_access_router.value);
retval = sst_sc_reg_access(&sc_access_router,
PMIC_READ_MODIFY, 1);
break;
case HW_CH1:
sc_access_router.reg_addr = AUDIOMUX12;
sc_access_router.value = (snd_msic_ops.hw_dmic_map[1]) << 4;
sc_access_router.mask = (MASK6 | MASK5 | MASK4);
pr_debug("### hw_ch1. value = 0x%x\n",
sc_access_router.value);
retval = sst_sc_reg_access(&sc_access_router,
PMIC_READ_MODIFY, 1);
break;
case HW_CH2:
sc_access_router.reg_addr = AUDIOMUX34;
sc_access_router.value = snd_msic_ops.hw_dmic_map[2];
sc_access_router.mask = (MASK2 | MASK1 | MASK0);
pr_debug("hw_ch2. value = 0x%x\n",
sc_access_router.value);
retval = sst_sc_reg_access(&sc_access_router,
PMIC_READ_MODIFY, 1);
break;
case HW_CH3:
sc_access_router.reg_addr = AUDIOMUX34;
sc_access_router.value = (snd_msic_ops.hw_dmic_map[3]) << 4;
sc_access_router.mask = (MASK6 | MASK5 | MASK4);
pr_debug("hw_ch3. value = 0x%x\n",
sc_access_router.value);
retval = sst_sc_reg_access(&sc_access_router,
PMIC_READ_MODIFY, 1);
break;
}
return retval;
}
static int msic_set_pcm_voice_params(void)
{
return 0;
}
static int msic_set_pcm_audio_params(int sfreq, int word_size, int num_channel)
{
return 0;
}
static int msic_set_audio_port(int status)
{
return 0;
}
static int msic_set_voice_port(int status)
{
return 0;
}
static int msic_set_mute(int dev_id, u8 value)
{
return 0;
}
static int msic_set_vol(int dev_id, int value)
{
return 0;
}
static int msic_get_mute(int dev_id, u8 *value)
{
return 0;
}
static int msic_get_vol(int dev_id, int *value)
{
return 0;
}
static int msic_set_headset_state(int state)
{
struct sc_reg_access hs_enable[] = {
{0x25D, 0x03, 0x03},
};
if (state)
/*enable*/
sst_sc_reg_access(hs_enable, PMIC_READ_MODIFY, 1);
else {
hs_enable[0].value = 0;
sst_sc_reg_access(hs_enable, PMIC_READ_MODIFY, 1);
}
return 0;
}
static int msic_enable_mic_bias(void)
{
struct sc_reg_access jack_interrupt_reg[] = {
{0x0DB, 0x07, 0x00},
};
struct sc_reg_access jack_bias_reg[] = {
{0x247, 0x0C, 0x0C},
};
sst_sc_reg_access(jack_interrupt_reg, PMIC_WRITE, 1);
sst_sc_reg_access(jack_bias_reg, PMIC_READ_MODIFY, 1);
return 0;
}
static int msic_disable_mic_bias(void)
{
if (snd_msic_ops.jack_interrupt_status == true)
return 0;
if (!(snd_msic_ops.pb_on || snd_msic_ops.cap_on))
msic_power_down();
return 0;
}
static int msic_disable_jack_btn(void)
{
struct sc_reg_access btn_disable[] = {
{0x26C, 0x00, 0x01}
};
if (!(snd_msic_ops.pb_on || snd_msic_ops.cap_on))
msic_power_down();
snd_msic_ops.jack_interrupt_status = false;
return sst_sc_reg_access(btn_disable, PMIC_READ_MODIFY, 1);
}
static int msic_enable_jack_btn(void)
{
struct sc_reg_access btn_enable[] = {
{0x26b, 0x77, 0x00},
{0x26C, 0x01, 0x00},
};
return sst_sc_reg_access(btn_enable, PMIC_WRITE, 2);
}
static int msic_convert_adc_to_mvolt(unsigned int mic_bias)
{
return (ADC_ONE_LSB_MULTIPLIER * mic_bias) / 1000;
}
int msic_get_headset_state(int mic_bias)
{
struct sc_reg_access msic_hs_toggle[] = {
{0x070, 0x00, 0x01},
};
if (mic_bias >= 0 && mic_bias < 400) {
pr_debug("Detected Headphone!!!\n");
sst_sc_reg_access(msic_hs_toggle, PMIC_READ_MODIFY, 1);
} else if (mic_bias > 400 && mic_bias < 650) {
pr_debug("Detected American headset\n");
msic_hs_toggle[0].value = 0x01;
sst_sc_reg_access(msic_hs_toggle, PMIC_READ_MODIFY, 1);
} else if (mic_bias >= 650 && mic_bias < 2000) {
pr_debug("Detected Headset!!!\n");
sst_sc_reg_access(msic_hs_toggle, PMIC_READ_MODIFY, 1);
/*power on jack and btn*/
snd_msic_ops.jack_interrupt_status = true;
msic_enable_jack_btn();
msic_enable_mic_bias();
return SND_JACK_HEADSET;
} else
pr_debug("Detected Open Cable!!!\n");
return SND_JACK_HEADPHONE;
}
static int msic_get_mic_bias(void *arg)
{
struct snd_intelmad *intelmad_drv = (struct snd_intelmad *)arg;
u16 adc_adr = intelmad_drv->adc_address;
u16 adc_val;
int ret;
struct sc_reg_access adc_ctrl3[2] = {
{0x1C2, 0x05, 0x0},
};
struct sc_reg_access audio_adc_reg1 = {0,};
struct sc_reg_access audio_adc_reg2 = {0,};
msic_enable_mic_bias();
/* Enable the msic for conversion before reading */
ret = sst_sc_reg_access(adc_ctrl3, PMIC_WRITE, 1);
if (ret)
return ret;
adc_ctrl3[0].value = 0x04;
/* Re-toggle the RRDATARD bit */
ret = sst_sc_reg_access(adc_ctrl3, PMIC_WRITE, 1);
if (ret)
return ret;
audio_adc_reg1.reg_addr = adc_adr;
/* Read the higher bits of data */
msleep(1000);
ret = sst_sc_reg_access(&audio_adc_reg1, PMIC_READ, 1);
if (ret)
return ret;
pr_debug("adc read value %x", audio_adc_reg1.value);
/* Shift bits to accomodate the lower two data bits */
adc_val = (audio_adc_reg1.value << 2);
adc_adr++;
audio_adc_reg2. reg_addr = adc_adr;
ret = sst_sc_reg_access(&audio_adc_reg2, PMIC_READ, 1);
if (ret)
return ret;
pr_debug("adc read value %x", audio_adc_reg2.value);
/* Adding lower two bits to the higher bits */
audio_adc_reg2.value &= 03;
adc_val += audio_adc_reg2.value;
pr_debug("ADC value 0x%x", adc_val);
msic_disable_mic_bias();
return adc_val;
}
static void msic_pmic_irq_cb(void *cb_data, u8 intsts)
{
struct mad_jack *mjack = NULL;
unsigned int present = 0, jack_event_flag = 0, buttonpressflag = 0;
struct snd_intelmad *intelmaddata = cb_data;
int retval = 0;
pr_debug("value returned = 0x%x\n", intsts);
if (snd_msic_ops.card_status == SND_CARD_UN_INIT) {
retval = msic_init_card();
if (retval)
return;
}
mjack = &intelmaddata->jack[0];
if (intsts & 0x1) {
pr_debug("MAD short_push detected\n");
present = SND_JACK_BTN_0;
jack_event_flag = buttonpressflag = 1;
mjack->jack.type = SND_JACK_BTN_0;
mjack->jack.key[0] = BTN_0 ;
}
if (intsts & 0x2) {
pr_debug(":MAD long_push detected\n");
jack_event_flag = buttonpressflag = 1;
mjack->jack.type = present = SND_JACK_BTN_1;
mjack->jack.key[1] = BTN_1;
}
if (intsts & 0x4) {
unsigned int mic_bias;
jack_event_flag = 1;
buttonpressflag = 0;
mic_bias = msic_get_mic_bias(intelmaddata);
pr_debug("mic_bias = %d\n", mic_bias);
mic_bias = msic_convert_adc_to_mvolt(mic_bias);
pr_debug("mic_bias after conversion = %d mV\n", mic_bias);
mjack->jack_dev_state = msic_get_headset_state(mic_bias);
mjack->jack.type = present = mjack->jack_dev_state;
}
if (intsts & 0x8) {
mjack->jack.type = mjack->jack_dev_state;
present = 0;
jack_event_flag = 1;
buttonpressflag = 0;
msic_disable_jack_btn();
msic_disable_mic_bias();
}
if (jack_event_flag)
sst_mad_send_jack_report(&mjack->jack,
buttonpressflag, present);
}
struct snd_pmic_ops snd_msic_ops = {
.set_input_dev = msic_set_selected_input_dev,
.set_output_dev = msic_set_selected_output_dev,
.set_lineout_dev = msic_set_selected_lineout_dev,
.set_hw_dmic_route = msic_set_hw_dmic_route,
.set_mute = msic_set_mute,
.get_mute = msic_get_mute,
.set_vol = msic_set_vol,
.get_vol = msic_get_vol,
.init_card = msic_init_card,
.set_pcm_audio_params = msic_set_pcm_audio_params,
.set_pcm_voice_params = msic_set_pcm_voice_params,
.set_voice_port = msic_set_voice_port,
.set_audio_port = msic_set_audio_port,
.power_up_pmic_pb = msic_power_up_pb,
.power_up_pmic_cp = msic_power_up_cp,
.power_down_pmic_pb = msic_power_down_pb,
.power_down_pmic_cp = msic_power_down_cp,
.power_down_pmic = msic_power_down,
.pmic_irq_cb = msic_pmic_irq_cb,
.pmic_jack_enable = msic_enable_mic_bias,
.pmic_get_mic_bias = msic_get_mic_bias,
.pmic_set_headset_state = msic_set_headset_state,
};
| gpl-2.0 |
airidosas252/linux_sun4i_kernel | drivers/staging/phison/phison.c | 3202 | 2598 | /*
* Copyright (C) 2006 Red Hat <evan_ko@phison.com>
*
* May be copied or modified under the terms of the GNU General Public License
*
* [Modify History]
* #0001, Evan, 2008.10.22, V0.00, New release.
* #0002, Evan, 2008.11.01, V0.90, Test Work In Ubuntu Linux 8.04.
* #0003, Evan, 2008.01.08, V0.91, Change Name "PCIE-SSD" to "E-BOX".
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/ata.h>
#define PHISON_DEBUG
#define DRV_NAME "phison_e-box" /* #0003 */
#define DRV_VERSION "0.91" /* #0003 */
#define PCI_VENDOR_ID_PHISON 0x1987
#define PCI_DEVICE_ID_PS5000 0x5000
static int phison_pre_reset(struct ata_link *link, unsigned long deadline)
{
int ret;
struct ata_port *ap = link->ap;
ap->cbl = ATA_CBL_NONE;
ret = ata_std_prereset(link, deadline);
dev_dbg(ap->dev, "phison_pre_reset(), ret = %x\n", ret);
return ret;
}
static struct scsi_host_template phison_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations phison_ops = {
.inherits = &ata_bmdma_port_ops,
.prereset = phison_pre_reset,
};
static int phison_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
int ret;
struct ata_port_info info = {
.flags = ATA_FLAG_NO_ATAPI,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = ATA_UDMA5,
.port_ops = &phison_ops,
};
const struct ata_port_info *ppi[] = { &info, NULL };
ret = ata_pci_bmdma_init_one(pdev, ppi, &phison_sht, NULL, 0);
dev_dbg(&pdev->dev, "phison_init_one(), ret = %x\n", ret);
return ret;
}
static DEFINE_PCI_DEVICE_TABLE(phison_pci_tbl) = {
{ PCI_VENDOR_ID_PHISON, PCI_DEVICE_ID_PS5000, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 0 },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, phison_pci_tbl);
static struct pci_driver phison_pci_driver = {
.name = DRV_NAME,
.id_table = phison_pci_tbl,
.probe = phison_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM /* haven't tested it. */
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
};
static int __init phison_ide_init(void)
{
return pci_register_driver(&phison_pci_driver);
}
static void __exit phison_ide_exit(void)
{
pci_unregister_driver(&phison_pci_driver);
}
module_init(phison_ide_init);
module_exit(phison_ide_exit);
MODULE_AUTHOR("Evan Ko");
MODULE_DESCRIPTION("PCIE driver module for PHISON PS5000 E-BOX");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
| gpl-2.0 |
sledges/msm | drivers/gpio/gpio-mxs.c | 4226 | 8102 | /*
* MXC GPIO support. (c) 2008 Daniel Mack <daniel@caiaq.de>
* Copyright 2008 Juergen Beisert, kernel@pengutronix.de
*
* Based on code from Freescale,
* Copyright (C) 2004-2010 Freescale Semiconductor, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/basic_mmio_gpio.h>
#include <linux/module.h>
#include <mach/mxs.h>
#define MXS_SET 0x4
#define MXS_CLR 0x8
#define PINCTRL_DOUT(n) ((cpu_is_mx23() ? 0x0500 : 0x0700) + (n) * 0x10)
#define PINCTRL_DIN(n) ((cpu_is_mx23() ? 0x0600 : 0x0900) + (n) * 0x10)
#define PINCTRL_DOE(n) ((cpu_is_mx23() ? 0x0700 : 0x0b00) + (n) * 0x10)
#define PINCTRL_PIN2IRQ(n) ((cpu_is_mx23() ? 0x0800 : 0x1000) + (n) * 0x10)
#define PINCTRL_IRQEN(n) ((cpu_is_mx23() ? 0x0900 : 0x1100) + (n) * 0x10)
#define PINCTRL_IRQLEV(n) ((cpu_is_mx23() ? 0x0a00 : 0x1200) + (n) * 0x10)
#define PINCTRL_IRQPOL(n) ((cpu_is_mx23() ? 0x0b00 : 0x1300) + (n) * 0x10)
#define PINCTRL_IRQSTAT(n) ((cpu_is_mx23() ? 0x0c00 : 0x1400) + (n) * 0x10)
#define GPIO_INT_FALL_EDGE 0x0
#define GPIO_INT_LOW_LEV 0x1
#define GPIO_INT_RISE_EDGE 0x2
#define GPIO_INT_HIGH_LEV 0x3
#define GPIO_INT_LEV_MASK (1 << 0)
#define GPIO_INT_POL_MASK (1 << 1)
#define irq_to_gpio(irq) ((irq) - MXS_GPIO_IRQ_START)
struct mxs_gpio_port {
void __iomem *base;
int id;
int irq;
int virtual_irq_start;
struct bgpio_chip bgc;
};
/* Note: This driver assumes 32 GPIOs are handled in one register */
static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type)
{
u32 gpio = irq_to_gpio(d->irq);
u32 pin_mask = 1 << (gpio & 31);
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mxs_gpio_port *port = gc->private;
void __iomem *pin_addr;
int edge;
switch (type) {
case IRQ_TYPE_EDGE_RISING:
edge = GPIO_INT_RISE_EDGE;
break;
case IRQ_TYPE_EDGE_FALLING:
edge = GPIO_INT_FALL_EDGE;
break;
case IRQ_TYPE_LEVEL_LOW:
edge = GPIO_INT_LOW_LEV;
break;
case IRQ_TYPE_LEVEL_HIGH:
edge = GPIO_INT_HIGH_LEV;
break;
default:
return -EINVAL;
}
/* set level or edge */
pin_addr = port->base + PINCTRL_IRQLEV(port->id);
if (edge & GPIO_INT_LEV_MASK)
writel(pin_mask, pin_addr + MXS_SET);
else
writel(pin_mask, pin_addr + MXS_CLR);
/* set polarity */
pin_addr = port->base + PINCTRL_IRQPOL(port->id);
if (edge & GPIO_INT_POL_MASK)
writel(pin_mask, pin_addr + MXS_SET);
else
writel(pin_mask, pin_addr + MXS_CLR);
writel(1 << (gpio & 0x1f),
port->base + PINCTRL_IRQSTAT(port->id) + MXS_CLR);
return 0;
}
/* MXS has one interrupt *per* gpio port */
static void mxs_gpio_irq_handler(u32 irq, struct irq_desc *desc)
{
u32 irq_stat;
struct mxs_gpio_port *port = irq_get_handler_data(irq);
u32 gpio_irq_no_base = port->virtual_irq_start;
desc->irq_data.chip->irq_ack(&desc->irq_data);
irq_stat = readl(port->base + PINCTRL_IRQSTAT(port->id)) &
readl(port->base + PINCTRL_IRQEN(port->id));
while (irq_stat != 0) {
int irqoffset = fls(irq_stat) - 1;
generic_handle_irq(gpio_irq_no_base + irqoffset);
irq_stat &= ~(1 << irqoffset);
}
}
/*
* Set interrupt number "irq" in the GPIO as a wake-up source.
* While system is running, all registered GPIO interrupts need to have
* wake-up enabled. When system is suspended, only selected GPIO interrupts
* need to have wake-up enabled.
* @param irq interrupt source number
* @param enable enable as wake-up if equal to non-zero
* @return This function returns 0 on success.
*/
static int mxs_gpio_set_wake_irq(struct irq_data *d, unsigned int enable)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mxs_gpio_port *port = gc->private;
if (enable)
enable_irq_wake(port->irq);
else
disable_irq_wake(port->irq);
return 0;
}
static void __init mxs_gpio_init_gc(struct mxs_gpio_port *port)
{
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
gc = irq_alloc_generic_chip("gpio-mxs", 1, port->virtual_irq_start,
port->base, handle_level_irq);
gc->private = port;
ct = gc->chip_types;
ct->chip.irq_ack = irq_gc_ack_set_bit;
ct->chip.irq_mask = irq_gc_mask_clr_bit;
ct->chip.irq_unmask = irq_gc_mask_set_bit;
ct->chip.irq_set_type = mxs_gpio_set_irq_type;
ct->chip.irq_set_wake = mxs_gpio_set_wake_irq;
ct->regs.ack = PINCTRL_IRQSTAT(port->id) + MXS_CLR;
ct->regs.mask = PINCTRL_IRQEN(port->id);
irq_setup_generic_chip(gc, IRQ_MSK(32), 0, IRQ_NOREQUEST, 0);
}
static int mxs_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
{
struct bgpio_chip *bgc = to_bgpio_chip(gc);
struct mxs_gpio_port *port =
container_of(bgc, struct mxs_gpio_port, bgc);
return port->virtual_irq_start + offset;
}
static int __devinit mxs_gpio_probe(struct platform_device *pdev)
{
static void __iomem *base;
struct mxs_gpio_port *port;
struct resource *iores = NULL;
int err;
port = kzalloc(sizeof(struct mxs_gpio_port), GFP_KERNEL);
if (!port)
return -ENOMEM;
port->id = pdev->id;
port->virtual_irq_start = MXS_GPIO_IRQ_START + port->id * 32;
/*
* map memory region only once, as all the gpio ports
* share the same one
*/
if (!base) {
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!iores) {
err = -ENODEV;
goto out_kfree;
}
if (!request_mem_region(iores->start, resource_size(iores),
pdev->name)) {
err = -EBUSY;
goto out_kfree;
}
base = ioremap(iores->start, resource_size(iores));
if (!base) {
err = -ENOMEM;
goto out_release_mem;
}
}
port->base = base;
port->irq = platform_get_irq(pdev, 0);
if (port->irq < 0) {
err = -EINVAL;
goto out_iounmap;
}
/*
* select the pin interrupt functionality but initially
* disable the interrupts
*/
writel(~0U, port->base + PINCTRL_PIN2IRQ(port->id));
writel(0, port->base + PINCTRL_IRQEN(port->id));
/* clear address has to be used to clear IRQSTAT bits */
writel(~0U, port->base + PINCTRL_IRQSTAT(port->id) + MXS_CLR);
/* gpio-mxs can be a generic irq chip */
mxs_gpio_init_gc(port);
/* setup one handler for each entry */
irq_set_chained_handler(port->irq, mxs_gpio_irq_handler);
irq_set_handler_data(port->irq, port);
err = bgpio_init(&port->bgc, &pdev->dev, 4,
port->base + PINCTRL_DIN(port->id),
port->base + PINCTRL_DOUT(port->id), NULL,
port->base + PINCTRL_DOE(port->id), NULL, false);
if (err)
goto out_iounmap;
port->bgc.gc.to_irq = mxs_gpio_to_irq;
port->bgc.gc.base = port->id * 32;
err = gpiochip_add(&port->bgc.gc);
if (err)
goto out_bgpio_remove;
return 0;
out_bgpio_remove:
bgpio_remove(&port->bgc);
out_iounmap:
if (iores)
iounmap(port->base);
out_release_mem:
if (iores)
release_mem_region(iores->start, resource_size(iores));
out_kfree:
kfree(port);
dev_info(&pdev->dev, "%s failed with errno %d\n", __func__, err);
return err;
}
static struct platform_driver mxs_gpio_driver = {
.driver = {
.name = "gpio-mxs",
.owner = THIS_MODULE,
},
.probe = mxs_gpio_probe,
};
static int __init mxs_gpio_init(void)
{
return platform_driver_register(&mxs_gpio_driver);
}
postcore_initcall(mxs_gpio_init);
MODULE_AUTHOR("Freescale Semiconductor, "
"Daniel Mack <danielncaiaq.de>, "
"Juergen Beisert <kernel@pengutronix.de>");
MODULE_DESCRIPTION("Freescale MXS GPIO");
MODULE_LICENSE("GPL");
| gpl-2.0 |
cnbin/linux | sound/usb/caiaq/input.c | 4482 | 27017 | /*
* Copyright (c) 2006,2007 Daniel Mack, Tim Ruetz
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/device.h>
#include <linux/gfp.h>
#include <linux/init.h>
#include <linux/usb.h>
#include <linux/usb/input.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include "device.h"
#include "input.h"
static unsigned short keycode_ak1[] = { KEY_C, KEY_B, KEY_A };
static unsigned short keycode_rk2[] = { KEY_1, KEY_2, KEY_3, KEY_4,
KEY_5, KEY_6, KEY_7 };
static unsigned short keycode_rk3[] = { KEY_1, KEY_2, KEY_3, KEY_4,
KEY_5, KEY_6, KEY_7, KEY_8, KEY_9 };
static unsigned short keycode_kore[] = {
KEY_FN_F1, /* "menu" */
KEY_FN_F7, /* "lcd backlight */
KEY_FN_F2, /* "control" */
KEY_FN_F3, /* "enter" */
KEY_FN_F4, /* "view" */
KEY_FN_F5, /* "esc" */
KEY_FN_F6, /* "sound" */
KEY_FN_F8, /* array spacer, never triggered. */
KEY_RIGHT,
KEY_DOWN,
KEY_UP,
KEY_LEFT,
KEY_SOUND, /* "listen" */
KEY_RECORD,
KEY_PLAYPAUSE,
KEY_STOP,
BTN_4, /* 8 softkeys */
BTN_3,
BTN_2,
BTN_1,
BTN_8,
BTN_7,
BTN_6,
BTN_5,
KEY_BRL_DOT4, /* touch sensitive knobs */
KEY_BRL_DOT3,
KEY_BRL_DOT2,
KEY_BRL_DOT1,
KEY_BRL_DOT8,
KEY_BRL_DOT7,
KEY_BRL_DOT6,
KEY_BRL_DOT5
};
#define MASCHINE_BUTTONS (42)
#define MASCHINE_BUTTON(X) ((X) + BTN_MISC)
#define MASCHINE_PADS (16)
#define MASCHINE_PAD(X) ((X) + ABS_PRESSURE)
static unsigned short keycode_maschine[] = {
MASCHINE_BUTTON(40), /* mute */
MASCHINE_BUTTON(39), /* solo */
MASCHINE_BUTTON(38), /* select */
MASCHINE_BUTTON(37), /* duplicate */
MASCHINE_BUTTON(36), /* navigate */
MASCHINE_BUTTON(35), /* pad mode */
MASCHINE_BUTTON(34), /* pattern */
MASCHINE_BUTTON(33), /* scene */
KEY_RESERVED, /* spacer */
MASCHINE_BUTTON(30), /* rec */
MASCHINE_BUTTON(31), /* erase */
MASCHINE_BUTTON(32), /* shift */
MASCHINE_BUTTON(28), /* grid */
MASCHINE_BUTTON(27), /* > */
MASCHINE_BUTTON(26), /* < */
MASCHINE_BUTTON(25), /* restart */
MASCHINE_BUTTON(21), /* E */
MASCHINE_BUTTON(22), /* F */
MASCHINE_BUTTON(23), /* G */
MASCHINE_BUTTON(24), /* H */
MASCHINE_BUTTON(20), /* D */
MASCHINE_BUTTON(19), /* C */
MASCHINE_BUTTON(18), /* B */
MASCHINE_BUTTON(17), /* A */
MASCHINE_BUTTON(0), /* control */
MASCHINE_BUTTON(2), /* browse */
MASCHINE_BUTTON(4), /* < */
MASCHINE_BUTTON(6), /* snap */
MASCHINE_BUTTON(7), /* autowrite */
MASCHINE_BUTTON(5), /* > */
MASCHINE_BUTTON(3), /* sampling */
MASCHINE_BUTTON(1), /* step */
MASCHINE_BUTTON(15), /* 8 softkeys */
MASCHINE_BUTTON(14),
MASCHINE_BUTTON(13),
MASCHINE_BUTTON(12),
MASCHINE_BUTTON(11),
MASCHINE_BUTTON(10),
MASCHINE_BUTTON(9),
MASCHINE_BUTTON(8),
MASCHINE_BUTTON(16), /* note repeat */
MASCHINE_BUTTON(29) /* play */
};
#define KONTROLX1_INPUTS (40)
#define KONTROLS4_BUTTONS (12 * 8)
#define KONTROLS4_AXIS (46)
#define KONTROLS4_BUTTON(X) ((X) + BTN_MISC)
#define KONTROLS4_ABS(X) ((X) + ABS_HAT0X)
#define DEG90 (range / 2)
#define DEG180 (range)
#define DEG270 (DEG90 + DEG180)
#define DEG360 (DEG180 * 2)
#define HIGH_PEAK (268)
#define LOW_PEAK (-7)
/* some of these devices have endless rotation potentiometers
* built in which use two tapers, 90 degrees phase shifted.
* this algorithm decodes them to one single value, ranging
* from 0 to 999 */
static unsigned int decode_erp(unsigned char a, unsigned char b)
{
int weight_a, weight_b;
int pos_a, pos_b;
int ret;
int range = HIGH_PEAK - LOW_PEAK;
int mid_value = (HIGH_PEAK + LOW_PEAK) / 2;
weight_b = abs(mid_value - a) - (range / 2 - 100) / 2;
if (weight_b < 0)
weight_b = 0;
if (weight_b > 100)
weight_b = 100;
weight_a = 100 - weight_b;
if (a < mid_value) {
/* 0..90 and 270..360 degrees */
pos_b = b - LOW_PEAK + DEG270;
if (pos_b >= DEG360)
pos_b -= DEG360;
} else
/* 90..270 degrees */
pos_b = HIGH_PEAK - b + DEG90;
if (b > mid_value)
/* 0..180 degrees */
pos_a = a - LOW_PEAK;
else
/* 180..360 degrees */
pos_a = HIGH_PEAK - a + DEG180;
/* interpolate both slider values, depending on weight factors */
/* 0..99 x DEG360 */
ret = pos_a * weight_a + pos_b * weight_b;
/* normalize to 0..999 */
ret *= 10;
ret /= DEG360;
if (ret < 0)
ret += 1000;
if (ret >= 1000)
ret -= 1000;
return ret;
}
#undef DEG90
#undef DEG180
#undef DEG270
#undef DEG360
#undef HIGH_PEAK
#undef LOW_PEAK
static inline void snd_caiaq_input_report_abs(struct snd_usb_caiaqdev *cdev,
int axis, const unsigned char *buf,
int offset)
{
input_report_abs(cdev->input_dev, axis,
(buf[offset * 2] << 8) | buf[offset * 2 + 1]);
}
static void snd_caiaq_input_read_analog(struct snd_usb_caiaqdev *cdev,
const unsigned char *buf,
unsigned int len)
{
struct input_dev *input_dev = cdev->input_dev;
switch (cdev->chip.usb_id) {
case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_RIGKONTROL2):
snd_caiaq_input_report_abs(cdev, ABS_X, buf, 2);
snd_caiaq_input_report_abs(cdev, ABS_Y, buf, 0);
snd_caiaq_input_report_abs(cdev, ABS_Z, buf, 1);
break;
case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_RIGKONTROL3):
case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_KORECONTROLLER):
case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_KORECONTROLLER2):
snd_caiaq_input_report_abs(cdev, ABS_X, buf, 0);
snd_caiaq_input_report_abs(cdev, ABS_Y, buf, 1);
snd_caiaq_input_report_abs(cdev, ABS_Z, buf, 2);
break;
case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_TRAKTORKONTROLX1):
snd_caiaq_input_report_abs(cdev, ABS_HAT0X, buf, 4);
snd_caiaq_input_report_abs(cdev, ABS_HAT0Y, buf, 2);
snd_caiaq_input_report_abs(cdev, ABS_HAT1X, buf, 6);
snd_caiaq_input_report_abs(cdev, ABS_HAT1Y, buf, 1);
snd_caiaq_input_report_abs(cdev, ABS_HAT2X, buf, 7);
snd_caiaq_input_report_abs(cdev, ABS_HAT2Y, buf, 0);
snd_caiaq_input_report_abs(cdev, ABS_HAT3X, buf, 5);
snd_caiaq_input_report_abs(cdev, ABS_HAT3Y, buf, 3);
break;
}
input_sync(input_dev);
}
static void snd_caiaq_input_read_erp(struct snd_usb_caiaqdev *cdev,
const char *buf, unsigned int len)
{
struct input_dev *input_dev = cdev->input_dev;
int i;
switch (cdev->chip.usb_id) {
case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AK1):
i = decode_erp(buf[0], buf[1]);
input_report_abs(input_dev, ABS_X, i);
input_sync(input_dev);
break;
case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_KORECONTROLLER):
case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_KORECONTROLLER2):
i = decode_erp(buf[7], buf[5]);
input_report_abs(input_dev, ABS_HAT0X, i);
i = decode_erp(buf[12], buf[14]);
input_report_abs(input_dev, ABS_HAT0Y, i);
i = decode_erp(buf[15], buf[13]);
input_report_abs(input_dev, ABS_HAT1X, i);
i = decode_erp(buf[0], buf[2]);
input_report_abs(input_dev, ABS_HAT1Y, i);
i = decode_erp(buf[3], buf[1]);
input_report_abs(input_dev, ABS_HAT2X, i);
i = decode_erp(buf[8], buf[10]);
input_report_abs(input_dev, ABS_HAT2Y, i);
i = decode_erp(buf[11], buf[9]);
input_report_abs(input_dev, ABS_HAT3X, i);
i = decode_erp(buf[4], buf[6]);
input_report_abs(input_dev, ABS_HAT3Y, i);
input_sync(input_dev);
break;
case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_MASCHINECONTROLLER):
/* 4 under the left screen */
input_report_abs(input_dev, ABS_HAT0X, decode_erp(buf[21], buf[20]));
input_report_abs(input_dev, ABS_HAT0Y, decode_erp(buf[15], buf[14]));
input_report_abs(input_dev, ABS_HAT1X, decode_erp(buf[9], buf[8]));
input_report_abs(input_dev, ABS_HAT1Y, decode_erp(buf[3], buf[2]));
/* 4 under the right screen */
input_report_abs(input_dev, ABS_HAT2X, decode_erp(buf[19], buf[18]));
input_report_abs(input_dev, ABS_HAT2Y, decode_erp(buf[13], buf[12]));
input_report_abs(input_dev, ABS_HAT3X, decode_erp(buf[7], buf[6]));
input_report_abs(input_dev, ABS_HAT3Y, decode_erp(buf[1], buf[0]));
/* volume */
input_report_abs(input_dev, ABS_RX, decode_erp(buf[17], buf[16]));
/* tempo */
input_report_abs(input_dev, ABS_RY, decode_erp(buf[11], buf[10]));
/* swing */
input_report_abs(input_dev, ABS_RZ, decode_erp(buf[5], buf[4]));
input_sync(input_dev);
break;
}
}
static void snd_caiaq_input_read_io(struct snd_usb_caiaqdev *cdev,
unsigned char *buf, unsigned int len)
{
struct input_dev *input_dev = cdev->input_dev;
unsigned short *keycode = input_dev->keycode;
int i;
if (!keycode)
return;
if (input_dev->id.product == USB_PID_RIGKONTROL2)
for (i = 0; i < len; i++)
buf[i] = ~buf[i];
for (i = 0; i < input_dev->keycodemax && i < len * 8; i++)
input_report_key(input_dev, keycode[i],
buf[i / 8] & (1 << (i % 8)));
switch (cdev->chip.usb_id) {
case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_KORECONTROLLER):
case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_KORECONTROLLER2):
input_report_abs(cdev->input_dev, ABS_MISC, 255 - buf[4]);
break;
case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_TRAKTORKONTROLX1):
/* rotary encoders */
input_report_abs(cdev->input_dev, ABS_X, buf[5] & 0xf);
input_report_abs(cdev->input_dev, ABS_Y, buf[5] >> 4);
input_report_abs(cdev->input_dev, ABS_Z, buf[6] & 0xf);
input_report_abs(cdev->input_dev, ABS_MISC, buf[6] >> 4);
break;
}
input_sync(input_dev);
}
#define TKS4_MSGBLOCK_SIZE 16
static void snd_usb_caiaq_tks4_dispatch(struct snd_usb_caiaqdev *cdev,
const unsigned char *buf,
unsigned int len)
{
struct device *dev = caiaqdev_to_dev(cdev);
while (len) {
unsigned int i, block_id = (buf[0] << 8) | buf[1];
switch (block_id) {
case 0:
/* buttons */
for (i = 0; i < KONTROLS4_BUTTONS; i++)
input_report_key(cdev->input_dev, KONTROLS4_BUTTON(i),
(buf[4 + (i / 8)] >> (i % 8)) & 1);
break;
case 1:
/* left wheel */
input_report_abs(cdev->input_dev, KONTROLS4_ABS(36), buf[9] | ((buf[8] & 0x3) << 8));
/* right wheel */
input_report_abs(cdev->input_dev, KONTROLS4_ABS(37), buf[13] | ((buf[12] & 0x3) << 8));
/* rotary encoders */
input_report_abs(cdev->input_dev, KONTROLS4_ABS(38), buf[3] & 0xf);
input_report_abs(cdev->input_dev, KONTROLS4_ABS(39), buf[4] >> 4);
input_report_abs(cdev->input_dev, KONTROLS4_ABS(40), buf[4] & 0xf);
input_report_abs(cdev->input_dev, KONTROLS4_ABS(41), buf[5] >> 4);
input_report_abs(cdev->input_dev, KONTROLS4_ABS(42), buf[5] & 0xf);
input_report_abs(cdev->input_dev, KONTROLS4_ABS(43), buf[6] >> 4);
input_report_abs(cdev->input_dev, KONTROLS4_ABS(44), buf[6] & 0xf);
input_report_abs(cdev->input_dev, KONTROLS4_ABS(45), buf[7] >> 4);
input_report_abs(cdev->input_dev, KONTROLS4_ABS(46), buf[7] & 0xf);
break;
case 2:
/* Volume Fader Channel D */
snd_caiaq_input_report_abs(cdev, KONTROLS4_ABS(0), buf, 1);
/* Volume Fader Channel B */
snd_caiaq_input_report_abs(cdev, KONTROLS4_ABS(1), buf, 2);
/* Volume Fader Channel A */
snd_caiaq_input_report_abs(cdev, KONTROLS4_ABS(2), buf, 3);
/* Volume Fader Channel C */
snd_caiaq_input_report_abs(cdev, KONTROLS4_ABS(3), buf, 4);
/* Loop Volume */
snd_caiaq_input_report_abs(cdev, KONTROLS4_ABS(4), buf, 6);
/* Crossfader */
snd_caiaq_input_report_abs(cdev, KONTROLS4_ABS(7), buf, 7);
break;
case 3:
/* Tempo Fader R */
snd_caiaq_input_report_abs(cdev, KONTROLS4_ABS(6), buf, 3);
/* Tempo Fader L */
snd_caiaq_input_report_abs(cdev, KONTROLS4_ABS(5), buf, 4);
/* Mic Volume */
snd_caiaq_input_report_abs(cdev, KONTROLS4_ABS(8), buf, 6);
/* Cue Mix */
snd_caiaq_input_report_abs(cdev, KONTROLS4_ABS(9), buf, 7);
break;
case 4:
/* Wheel distance sensor L */
snd_caiaq_input_report_abs(cdev, KONTROLS4_ABS(10), buf, 1);
/* Wheel distance sensor R */
snd_caiaq_input_report_abs(cdev, KONTROLS4_ABS(11), buf, 2);
/* Channel D EQ - Filter */
snd_caiaq_input_report_abs(cdev, KONTROLS4_ABS(12), buf, 3);
/* Channel D EQ - Low */
snd_caiaq_input_report_abs(cdev, KONTROLS4_ABS(13), buf, 4);
/* Channel D EQ - Mid */
snd_caiaq_input_report_abs(cdev, KONTROLS4_ABS(14), buf, 5);
/* Channel D EQ - Hi */
snd_caiaq_input_report_abs(cdev, KONTROLS4_ABS(15), buf, 6);
/* FX2 - dry/wet */
snd_caiaq_input_report_abs(cdev, KONTROLS4_ABS(16), buf, 7);
break;
case 5:
/* FX2 - 1 */
snd_caiaq_input_report_abs(cdev, KONTROLS4_ABS(17), buf, 1);
/* FX2 - 2 */
snd_caiaq_input_report_abs(cdev, KONTROLS4_ABS(18), buf, 2);
/* FX2 - 3 */
snd_caiaq_input_report_abs(cdev, KONTROLS4_ABS(19), buf, 3);
/* Channel B EQ - Filter */
snd_caiaq_input_report_abs(cdev, KONTROLS4_ABS(20), buf, 4);
/* Channel B EQ - Low */
snd_caiaq_input_report_abs(cdev, KONTROLS4_ABS(21), buf, 5);
/* Channel B EQ - Mid */
snd_caiaq_input_report_abs(cdev, KONTROLS4_ABS(22), buf, 6);
/* Channel B EQ - Hi */
snd_caiaq_input_report_abs(cdev, KONTROLS4_ABS(23), buf, 7);
break;
case 6:
/* Channel A EQ - Filter */
snd_caiaq_input_report_abs(cdev, KONTROLS4_ABS(24), buf, 1);
/* Channel A EQ - Low */
snd_caiaq_input_report_abs(cdev, KONTROLS4_ABS(25), buf, 2);
/* Channel A EQ - Mid */
snd_caiaq_input_report_abs(cdev, KONTROLS4_ABS(26), buf, 3);
/* Channel A EQ - Hi */
snd_caiaq_input_report_abs(cdev, KONTROLS4_ABS(27), buf, 4);
/* Channel C EQ - Filter */
snd_caiaq_input_report_abs(cdev, KONTROLS4_ABS(28), buf, 5);
/* Channel C EQ - Low */
snd_caiaq_input_report_abs(cdev, KONTROLS4_ABS(29), buf, 6);
/* Channel C EQ - Mid */
snd_caiaq_input_report_abs(cdev, KONTROLS4_ABS(30), buf, 7);
break;
case 7:
/* Channel C EQ - Hi */
snd_caiaq_input_report_abs(cdev, KONTROLS4_ABS(31), buf, 1);
/* FX1 - wet/dry */
snd_caiaq_input_report_abs(cdev, KONTROLS4_ABS(32), buf, 2);
/* FX1 - 1 */
snd_caiaq_input_report_abs(cdev, KONTROLS4_ABS(33), buf, 3);
/* FX1 - 2 */
snd_caiaq_input_report_abs(cdev, KONTROLS4_ABS(34), buf, 4);
/* FX1 - 3 */
snd_caiaq_input_report_abs(cdev, KONTROLS4_ABS(35), buf, 5);
break;
default:
dev_dbg(dev, "%s(): bogus block (id %d)\n",
__func__, block_id);
return;
}
len -= TKS4_MSGBLOCK_SIZE;
buf += TKS4_MSGBLOCK_SIZE;
}
input_sync(cdev->input_dev);
}
#define MASCHINE_MSGBLOCK_SIZE 2
static void snd_usb_caiaq_maschine_dispatch(struct snd_usb_caiaqdev *cdev,
const unsigned char *buf,
unsigned int len)
{
unsigned int i, pad_id;
__le16 *pressure = (__le16 *) buf;
for (i = 0; i < MASCHINE_PADS; i++) {
pad_id = le16_to_cpu(*pressure) >> 12;
input_report_abs(cdev->input_dev, MASCHINE_PAD(pad_id),
le16_to_cpu(*pressure) & 0xfff);
pressure++;
}
input_sync(cdev->input_dev);
}
static void snd_usb_caiaq_ep4_reply_dispatch(struct urb *urb)
{
struct snd_usb_caiaqdev *cdev = urb->context;
unsigned char *buf = urb->transfer_buffer;
struct device *dev = &urb->dev->dev;
int ret;
if (urb->status || !cdev || urb != cdev->ep4_in_urb)
return;
switch (cdev->chip.usb_id) {
case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_TRAKTORKONTROLX1):
if (urb->actual_length < 24)
goto requeue;
if (buf[0] & 0x3)
snd_caiaq_input_read_io(cdev, buf + 1, 7);
if (buf[0] & 0x4)
snd_caiaq_input_read_analog(cdev, buf + 8, 16);
break;
case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_TRAKTORKONTROLS4):
snd_usb_caiaq_tks4_dispatch(cdev, buf, urb->actual_length);
break;
case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_MASCHINECONTROLLER):
if (urb->actual_length < (MASCHINE_PADS * MASCHINE_MSGBLOCK_SIZE))
goto requeue;
snd_usb_caiaq_maschine_dispatch(cdev, buf, urb->actual_length);
break;
}
requeue:
cdev->ep4_in_urb->actual_length = 0;
ret = usb_submit_urb(cdev->ep4_in_urb, GFP_ATOMIC);
if (ret < 0)
dev_err(dev, "unable to submit urb. OOM!?\n");
}
static int snd_usb_caiaq_input_open(struct input_dev *idev)
{
struct snd_usb_caiaqdev *cdev = input_get_drvdata(idev);
if (!cdev)
return -EINVAL;
switch (cdev->chip.usb_id) {
case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_TRAKTORKONTROLX1):
case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_TRAKTORKONTROLS4):
case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_MASCHINECONTROLLER):
if (usb_submit_urb(cdev->ep4_in_urb, GFP_KERNEL) != 0)
return -EIO;
break;
}
return 0;
}
static void snd_usb_caiaq_input_close(struct input_dev *idev)
{
struct snd_usb_caiaqdev *cdev = input_get_drvdata(idev);
if (!cdev)
return;
switch (cdev->chip.usb_id) {
case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_TRAKTORKONTROLX1):
case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_TRAKTORKONTROLS4):
case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_MASCHINECONTROLLER):
usb_kill_urb(cdev->ep4_in_urb);
break;
}
}
void snd_usb_caiaq_input_dispatch(struct snd_usb_caiaqdev *cdev,
char *buf,
unsigned int len)
{
if (!cdev->input_dev || len < 1)
return;
switch (buf[0]) {
case EP1_CMD_READ_ANALOG:
snd_caiaq_input_read_analog(cdev, buf + 1, len - 1);
break;
case EP1_CMD_READ_ERP:
snd_caiaq_input_read_erp(cdev, buf + 1, len - 1);
break;
case EP1_CMD_READ_IO:
snd_caiaq_input_read_io(cdev, buf + 1, len - 1);
break;
}
}
int snd_usb_caiaq_input_init(struct snd_usb_caiaqdev *cdev)
{
struct usb_device *usb_dev = cdev->chip.dev;
struct input_dev *input;
int i, ret = 0;
input = input_allocate_device();
if (!input)
return -ENOMEM;
usb_make_path(usb_dev, cdev->phys, sizeof(cdev->phys));
strlcat(cdev->phys, "/input0", sizeof(cdev->phys));
input->name = cdev->product_name;
input->phys = cdev->phys;
usb_to_input_id(usb_dev, &input->id);
input->dev.parent = &usb_dev->dev;
input_set_drvdata(input, cdev);
switch (cdev->chip.usb_id) {
case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_RIGKONTROL2):
input->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
input->absbit[0] = BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) |
BIT_MASK(ABS_Z);
BUILD_BUG_ON(sizeof(cdev->keycode) < sizeof(keycode_rk2));
memcpy(cdev->keycode, keycode_rk2, sizeof(keycode_rk2));
input->keycodemax = ARRAY_SIZE(keycode_rk2);
input_set_abs_params(input, ABS_X, 0, 4096, 0, 10);
input_set_abs_params(input, ABS_Y, 0, 4096, 0, 10);
input_set_abs_params(input, ABS_Z, 0, 4096, 0, 10);
snd_usb_caiaq_set_auto_msg(cdev, 1, 10, 0);
break;
case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_RIGKONTROL3):
input->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
input->absbit[0] = BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) |
BIT_MASK(ABS_Z);
BUILD_BUG_ON(sizeof(cdev->keycode) < sizeof(keycode_rk3));
memcpy(cdev->keycode, keycode_rk3, sizeof(keycode_rk3));
input->keycodemax = ARRAY_SIZE(keycode_rk3);
input_set_abs_params(input, ABS_X, 0, 1024, 0, 10);
input_set_abs_params(input, ABS_Y, 0, 1024, 0, 10);
input_set_abs_params(input, ABS_Z, 0, 1024, 0, 10);
snd_usb_caiaq_set_auto_msg(cdev, 1, 10, 0);
break;
case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_AK1):
input->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
input->absbit[0] = BIT_MASK(ABS_X);
BUILD_BUG_ON(sizeof(cdev->keycode) < sizeof(keycode_ak1));
memcpy(cdev->keycode, keycode_ak1, sizeof(keycode_ak1));
input->keycodemax = ARRAY_SIZE(keycode_ak1);
input_set_abs_params(input, ABS_X, 0, 999, 0, 10);
snd_usb_caiaq_set_auto_msg(cdev, 1, 0, 5);
break;
case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_KORECONTROLLER):
case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_KORECONTROLLER2):
input->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
input->absbit[0] = BIT_MASK(ABS_HAT0X) | BIT_MASK(ABS_HAT0Y) |
BIT_MASK(ABS_HAT1X) | BIT_MASK(ABS_HAT1Y) |
BIT_MASK(ABS_HAT2X) | BIT_MASK(ABS_HAT2Y) |
BIT_MASK(ABS_HAT3X) | BIT_MASK(ABS_HAT3Y) |
BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) |
BIT_MASK(ABS_Z);
input->absbit[BIT_WORD(ABS_MISC)] |= BIT_MASK(ABS_MISC);
BUILD_BUG_ON(sizeof(cdev->keycode) < sizeof(keycode_kore));
memcpy(cdev->keycode, keycode_kore, sizeof(keycode_kore));
input->keycodemax = ARRAY_SIZE(keycode_kore);
input_set_abs_params(input, ABS_HAT0X, 0, 999, 0, 10);
input_set_abs_params(input, ABS_HAT0Y, 0, 999, 0, 10);
input_set_abs_params(input, ABS_HAT1X, 0, 999, 0, 10);
input_set_abs_params(input, ABS_HAT1Y, 0, 999, 0, 10);
input_set_abs_params(input, ABS_HAT2X, 0, 999, 0, 10);
input_set_abs_params(input, ABS_HAT2Y, 0, 999, 0, 10);
input_set_abs_params(input, ABS_HAT3X, 0, 999, 0, 10);
input_set_abs_params(input, ABS_HAT3Y, 0, 999, 0, 10);
input_set_abs_params(input, ABS_X, 0, 4096, 0, 10);
input_set_abs_params(input, ABS_Y, 0, 4096, 0, 10);
input_set_abs_params(input, ABS_Z, 0, 4096, 0, 10);
input_set_abs_params(input, ABS_MISC, 0, 255, 0, 1);
snd_usb_caiaq_set_auto_msg(cdev, 1, 10, 5);
break;
case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_TRAKTORKONTROLX1):
input->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
input->absbit[0] = BIT_MASK(ABS_HAT0X) | BIT_MASK(ABS_HAT0Y) |
BIT_MASK(ABS_HAT1X) | BIT_MASK(ABS_HAT1Y) |
BIT_MASK(ABS_HAT2X) | BIT_MASK(ABS_HAT2Y) |
BIT_MASK(ABS_HAT3X) | BIT_MASK(ABS_HAT3Y) |
BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) |
BIT_MASK(ABS_Z);
input->absbit[BIT_WORD(ABS_MISC)] |= BIT_MASK(ABS_MISC);
BUILD_BUG_ON(sizeof(cdev->keycode) < KONTROLX1_INPUTS);
for (i = 0; i < KONTROLX1_INPUTS; i++)
cdev->keycode[i] = BTN_MISC + i;
input->keycodemax = KONTROLX1_INPUTS;
/* analog potentiometers */
input_set_abs_params(input, ABS_HAT0X, 0, 4096, 0, 10);
input_set_abs_params(input, ABS_HAT0Y, 0, 4096, 0, 10);
input_set_abs_params(input, ABS_HAT1X, 0, 4096, 0, 10);
input_set_abs_params(input, ABS_HAT1Y, 0, 4096, 0, 10);
input_set_abs_params(input, ABS_HAT2X, 0, 4096, 0, 10);
input_set_abs_params(input, ABS_HAT2Y, 0, 4096, 0, 10);
input_set_abs_params(input, ABS_HAT3X, 0, 4096, 0, 10);
input_set_abs_params(input, ABS_HAT3Y, 0, 4096, 0, 10);
/* rotary encoders */
input_set_abs_params(input, ABS_X, 0, 0xf, 0, 1);
input_set_abs_params(input, ABS_Y, 0, 0xf, 0, 1);
input_set_abs_params(input, ABS_Z, 0, 0xf, 0, 1);
input_set_abs_params(input, ABS_MISC, 0, 0xf, 0, 1);
cdev->ep4_in_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!cdev->ep4_in_urb) {
ret = -ENOMEM;
goto exit_free_idev;
}
usb_fill_bulk_urb(cdev->ep4_in_urb, usb_dev,
usb_rcvbulkpipe(usb_dev, 0x4),
cdev->ep4_in_buf, EP4_BUFSIZE,
snd_usb_caiaq_ep4_reply_dispatch, cdev);
snd_usb_caiaq_set_auto_msg(cdev, 1, 10, 5);
break;
case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_TRAKTORKONTROLS4):
input->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
BUILD_BUG_ON(sizeof(cdev->keycode) < KONTROLS4_BUTTONS);
for (i = 0; i < KONTROLS4_BUTTONS; i++)
cdev->keycode[i] = KONTROLS4_BUTTON(i);
input->keycodemax = KONTROLS4_BUTTONS;
for (i = 0; i < KONTROLS4_AXIS; i++) {
int axis = KONTROLS4_ABS(i);
input->absbit[BIT_WORD(axis)] |= BIT_MASK(axis);
}
/* 36 analog potentiometers and faders */
for (i = 0; i < 36; i++)
input_set_abs_params(input, KONTROLS4_ABS(i), 0, 0xfff, 0, 10);
/* 2 encoder wheels */
input_set_abs_params(input, KONTROLS4_ABS(36), 0, 0x3ff, 0, 1);
input_set_abs_params(input, KONTROLS4_ABS(37), 0, 0x3ff, 0, 1);
/* 9 rotary encoders */
for (i = 0; i < 9; i++)
input_set_abs_params(input, KONTROLS4_ABS(38+i), 0, 0xf, 0, 1);
cdev->ep4_in_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!cdev->ep4_in_urb) {
ret = -ENOMEM;
goto exit_free_idev;
}
usb_fill_bulk_urb(cdev->ep4_in_urb, usb_dev,
usb_rcvbulkpipe(usb_dev, 0x4),
cdev->ep4_in_buf, EP4_BUFSIZE,
snd_usb_caiaq_ep4_reply_dispatch, cdev);
snd_usb_caiaq_set_auto_msg(cdev, 1, 10, 5);
break;
case USB_ID(USB_VID_NATIVEINSTRUMENTS, USB_PID_MASCHINECONTROLLER):
input->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
input->absbit[0] = BIT_MASK(ABS_HAT0X) | BIT_MASK(ABS_HAT0Y) |
BIT_MASK(ABS_HAT1X) | BIT_MASK(ABS_HAT1Y) |
BIT_MASK(ABS_HAT2X) | BIT_MASK(ABS_HAT2Y) |
BIT_MASK(ABS_HAT3X) | BIT_MASK(ABS_HAT3Y) |
BIT_MASK(ABS_RX) | BIT_MASK(ABS_RY) |
BIT_MASK(ABS_RZ);
BUILD_BUG_ON(sizeof(cdev->keycode) < sizeof(keycode_maschine));
memcpy(cdev->keycode, keycode_maschine, sizeof(keycode_maschine));
input->keycodemax = ARRAY_SIZE(keycode_maschine);
for (i = 0; i < MASCHINE_PADS; i++) {
input->absbit[0] |= MASCHINE_PAD(i);
input_set_abs_params(input, MASCHINE_PAD(i), 0, 0xfff, 5, 10);
}
input_set_abs_params(input, ABS_HAT0X, 0, 999, 0, 10);
input_set_abs_params(input, ABS_HAT0Y, 0, 999, 0, 10);
input_set_abs_params(input, ABS_HAT1X, 0, 999, 0, 10);
input_set_abs_params(input, ABS_HAT1Y, 0, 999, 0, 10);
input_set_abs_params(input, ABS_HAT2X, 0, 999, 0, 10);
input_set_abs_params(input, ABS_HAT2Y, 0, 999, 0, 10);
input_set_abs_params(input, ABS_HAT3X, 0, 999, 0, 10);
input_set_abs_params(input, ABS_HAT3Y, 0, 999, 0, 10);
input_set_abs_params(input, ABS_RX, 0, 999, 0, 10);
input_set_abs_params(input, ABS_RY, 0, 999, 0, 10);
input_set_abs_params(input, ABS_RZ, 0, 999, 0, 10);
cdev->ep4_in_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!cdev->ep4_in_urb) {
ret = -ENOMEM;
goto exit_free_idev;
}
usb_fill_bulk_urb(cdev->ep4_in_urb, usb_dev,
usb_rcvbulkpipe(usb_dev, 0x4),
cdev->ep4_in_buf, EP4_BUFSIZE,
snd_usb_caiaq_ep4_reply_dispatch, cdev);
snd_usb_caiaq_set_auto_msg(cdev, 1, 10, 5);
break;
default:
/* no input methods supported on this device */
goto exit_free_idev;
}
input->open = snd_usb_caiaq_input_open;
input->close = snd_usb_caiaq_input_close;
input->keycode = cdev->keycode;
input->keycodesize = sizeof(unsigned short);
for (i = 0; i < input->keycodemax; i++)
__set_bit(cdev->keycode[i], input->keybit);
cdev->input_dev = input;
ret = input_register_device(input);
if (ret < 0)
goto exit_free_idev;
return 0;
exit_free_idev:
input_free_device(input);
cdev->input_dev = NULL;
return ret;
}
void snd_usb_caiaq_input_free(struct snd_usb_caiaqdev *cdev)
{
if (!cdev || !cdev->input_dev)
return;
usb_kill_urb(cdev->ep4_in_urb);
usb_free_urb(cdev->ep4_in_urb);
cdev->ep4_in_urb = NULL;
input_unregister_device(cdev->input_dev);
cdev->input_dev = NULL;
}
| gpl-2.0 |
zombi-x/android_kernel_lge_mako | arch/arm/mach-s3c24xx/clock-s3c2443.c | 4738 | 5846 | /* linux/arch/arm/mach-s3c2443/clock.c
*
* Copyright (c) 2007, 2010 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* S3C2443 Clock control support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/device.h>
#include <linux/clk.h>
#include <linux/mutex.h>
#include <linux/serial_core.h>
#include <linux/io.h>
#include <asm/mach/map.h>
#include <mach/hardware.h>
#include <mach/regs-s3c2443-clock.h>
#include <plat/cpu-freq.h>
#include <plat/s3c2443.h>
#include <plat/clock.h>
#include <plat/clock-clksrc.h>
#include <plat/cpu.h>
/* We currently have to assume that the system is running
* from the XTPll input, and that all ***REFCLKs are being
* fed from it, as we cannot read the state of OM[4] from
* software.
*
* It would be possible for each board initialisation to
* set the correct muxing at initialisation
*/
/* clock selections */
/* armdiv
*
* this clock is sourced from msysclk and can have a number of
* divider values applied to it to then be fed into armclk.
* The real clock definition is done in s3c2443-clock.c,
* only the armdiv divisor table must be defined here.
*/
static unsigned int armdiv[16] = {
[S3C2443_CLKDIV0_ARMDIV_1 >> S3C2443_CLKDIV0_ARMDIV_SHIFT] = 1,
[S3C2443_CLKDIV0_ARMDIV_2 >> S3C2443_CLKDIV0_ARMDIV_SHIFT] = 2,
[S3C2443_CLKDIV0_ARMDIV_3 >> S3C2443_CLKDIV0_ARMDIV_SHIFT] = 3,
[S3C2443_CLKDIV0_ARMDIV_4 >> S3C2443_CLKDIV0_ARMDIV_SHIFT] = 4,
[S3C2443_CLKDIV0_ARMDIV_6 >> S3C2443_CLKDIV0_ARMDIV_SHIFT] = 6,
[S3C2443_CLKDIV0_ARMDIV_8 >> S3C2443_CLKDIV0_ARMDIV_SHIFT] = 8,
[S3C2443_CLKDIV0_ARMDIV_12 >> S3C2443_CLKDIV0_ARMDIV_SHIFT] = 12,
[S3C2443_CLKDIV0_ARMDIV_16 >> S3C2443_CLKDIV0_ARMDIV_SHIFT] = 16,
};
/* hsspi
*
* high-speed spi clock, sourced from esysclk
*/
static struct clksrc_clk clk_hsspi = {
.clk = {
.name = "hsspi-if",
.parent = &clk_esysclk.clk,
.ctrlbit = S3C2443_SCLKCON_HSSPICLK,
.enable = s3c2443_clkcon_enable_s,
},
.reg_div = { .reg = S3C2443_CLKDIV1, .size = 2, .shift = 4 },
};
/* clk_hsmcc_div
*
* this clock is sourced from epll, and is fed through a divider,
* to a mux controlled by sclkcon where either it or a extclk can
* be fed to the hsmmc block
*/
static struct clksrc_clk clk_hsmmc_div = {
.clk = {
.name = "hsmmc-div",
.devname = "s3c-sdhci.1",
.parent = &clk_esysclk.clk,
},
.reg_div = { .reg = S3C2443_CLKDIV1, .size = 2, .shift = 6 },
};
static int s3c2443_setparent_hsmmc(struct clk *clk, struct clk *parent)
{
unsigned long clksrc = __raw_readl(S3C2443_SCLKCON);
clksrc &= ~(S3C2443_SCLKCON_HSMMCCLK_EXT |
S3C2443_SCLKCON_HSMMCCLK_EPLL);
if (parent == &clk_epll)
clksrc |= S3C2443_SCLKCON_HSMMCCLK_EPLL;
else if (parent == &clk_ext)
clksrc |= S3C2443_SCLKCON_HSMMCCLK_EXT;
else
return -EINVAL;
if (clk->usage > 0) {
__raw_writel(clksrc, S3C2443_SCLKCON);
}
clk->parent = parent;
return 0;
}
static int s3c2443_enable_hsmmc(struct clk *clk, int enable)
{
return s3c2443_setparent_hsmmc(clk, clk->parent);
}
static struct clk clk_hsmmc = {
.name = "hsmmc-if",
.devname = "s3c-sdhci.1",
.parent = &clk_hsmmc_div.clk,
.enable = s3c2443_enable_hsmmc,
.ops = &(struct clk_ops) {
.set_parent = s3c2443_setparent_hsmmc,
},
};
/* standard clock definitions */
static struct clk init_clocks_off[] = {
{
.name = "sdi",
.parent = &clk_p,
.enable = s3c2443_clkcon_enable_p,
.ctrlbit = S3C2443_PCLKCON_SDI,
}, {
.name = "spi",
.devname = "s3c2410-spi.0",
.parent = &clk_p,
.enable = s3c2443_clkcon_enable_p,
.ctrlbit = S3C2443_PCLKCON_SPI0,
}, {
.name = "spi",
.devname = "s3c2410-spi.1",
.parent = &clk_p,
.enable = s3c2443_clkcon_enable_p,
.ctrlbit = S3C2443_PCLKCON_SPI1,
}
};
/* clocks to add straight away */
static struct clksrc_clk *clksrcs[] __initdata = {
&clk_hsspi,
&clk_hsmmc_div,
};
static struct clk *clks[] __initdata = {
&clk_hsmmc,
};
void __init s3c2443_init_clocks(int xtal)
{
unsigned long epllcon = __raw_readl(S3C2443_EPLLCON);
int ptr;
clk_epll.rate = s3c2443_get_epll(epllcon, xtal);
clk_epll.parent = &clk_epllref.clk;
s3c2443_common_init_clocks(xtal, s3c2443_get_mpll,
armdiv, ARRAY_SIZE(armdiv),
S3C2443_CLKDIV0_ARMDIV_MASK);
s3c24xx_register_clocks(clks, ARRAY_SIZE(clks));
for (ptr = 0; ptr < ARRAY_SIZE(clksrcs); ptr++)
s3c_register_clksrc(clksrcs[ptr], 1);
/* We must be careful disabling the clocks we are not intending to
* be using at boot time, as subsystems such as the LCD which do
* their own DMA requests to the bus can cause the system to lockup
* if they where in the middle of requesting bus access.
*
* Disabling the LCD clock if the LCD is active is very dangerous,
* and therefore the bootloader should be careful to not enable
* the LCD clock if it is not needed.
*/
/* install (and disable) the clocks we do not need immediately */
s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
s3c_pwmclk_init();
}
| gpl-2.0 |
davidmueller13/custom_kernel_lt03lte_aosp_6.0 | drivers/net/can/sja1000/ems_pcmcia.c | 5250 | 8471 | /*
* Copyright (C) 2008 Sebastian Haas (initial chardev implementation)
* Copyright (C) 2010 Markus Plessing <plessing@ems-wuensche.com>
* Rework for mainline by Oliver Hartkopp <socketcan@hartkopp.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the version 2 of the GNU General Public License
* as published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
#include <linux/can.h>
#include <linux/can/dev.h>
#include "sja1000.h"
#define DRV_NAME "ems_pcmcia"
MODULE_AUTHOR("Markus Plessing <plessing@ems-wuensche.com>");
MODULE_DESCRIPTION("Socket-CAN driver for EMS CPC-CARD cards");
MODULE_SUPPORTED_DEVICE("EMS CPC-CARD CAN card");
MODULE_LICENSE("GPL v2");
#define EMS_PCMCIA_MAX_CHAN 2
struct ems_pcmcia_card {
int channels;
struct pcmcia_device *pcmcia_dev;
struct net_device *net_dev[EMS_PCMCIA_MAX_CHAN];
void __iomem *base_addr;
};
#define EMS_PCMCIA_CAN_CLOCK (16000000 / 2)
/*
* The board configuration is probably following:
* RX1 is connected to ground.
* TX1 is not connected.
* CLKO is not connected.
* Setting the OCR register to 0xDA is a good idea.
* This means normal output mode , push-pull and the correct polarity.
*/
#define EMS_PCMCIA_OCR (OCR_TX0_PUSHPULL | OCR_TX1_PUSHPULL)
/*
* In the CDR register, you should set CBP to 1.
* You will probably also want to set the clock divider value to 7
* (meaning direct oscillator output) because the second SJA1000 chip
* is driven by the first one CLKOUT output.
*/
#define EMS_PCMCIA_CDR (CDR_CBP | CDR_CLKOUT_MASK)
#define EMS_PCMCIA_MEM_SIZE 4096 /* Size of the remapped io-memory */
#define EMS_PCMCIA_CAN_BASE_OFFSET 0x100 /* Offset where controllers starts */
#define EMS_PCMCIA_CAN_CTRL_SIZE 0x80 /* Memory size for each controller */
#define EMS_CMD_RESET 0x00 /* Perform a reset of the card */
#define EMS_CMD_MAP 0x03 /* Map CAN controllers into card' memory */
#define EMS_CMD_UMAP 0x02 /* Unmap CAN controllers from card' memory */
static struct pcmcia_device_id ems_pcmcia_tbl[] = {
PCMCIA_DEVICE_PROD_ID123("EMS_T_W", "CPC-Card", "V2.0", 0xeab1ea23,
0xa338573f, 0xe4575800),
PCMCIA_DEVICE_NULL,
};
MODULE_DEVICE_TABLE(pcmcia, ems_pcmcia_tbl);
static u8 ems_pcmcia_read_reg(const struct sja1000_priv *priv, int port)
{
return readb(priv->reg_base + port);
}
static void ems_pcmcia_write_reg(const struct sja1000_priv *priv, int port,
u8 val)
{
writeb(val, priv->reg_base + port);
}
static irqreturn_t ems_pcmcia_interrupt(int irq, void *dev_id)
{
struct ems_pcmcia_card *card = dev_id;
struct net_device *dev;
irqreturn_t retval = IRQ_NONE;
int i, again;
/* Card not present */
if (readw(card->base_addr) != 0xAA55)
return IRQ_HANDLED;
do {
again = 0;
/* Check interrupt for each channel */
for (i = 0; i < card->channels; i++) {
dev = card->net_dev[i];
if (!dev)
continue;
if (sja1000_interrupt(irq, dev) == IRQ_HANDLED)
again = 1;
}
/* At least one channel handled the interrupt */
if (again)
retval = IRQ_HANDLED;
} while (again);
return retval;
}
/*
* Check if a CAN controller is present at the specified location
* by trying to set 'em into the PeliCAN mode
*/
static inline int ems_pcmcia_check_chan(struct sja1000_priv *priv)
{
/* Make sure SJA1000 is in reset mode */
ems_pcmcia_write_reg(priv, REG_MOD, 1);
ems_pcmcia_write_reg(priv, REG_CDR, CDR_PELICAN);
/* read reset-values */
if (ems_pcmcia_read_reg(priv, REG_CDR) == CDR_PELICAN)
return 1;
return 0;
}
static void ems_pcmcia_del_card(struct pcmcia_device *pdev)
{
struct ems_pcmcia_card *card = pdev->priv;
struct net_device *dev;
int i;
free_irq(pdev->irq, card);
for (i = 0; i < card->channels; i++) {
dev = card->net_dev[i];
if (!dev)
continue;
printk(KERN_INFO "%s: removing %s on channel #%d\n",
DRV_NAME, dev->name, i);
unregister_sja1000dev(dev);
free_sja1000dev(dev);
}
writeb(EMS_CMD_UMAP, card->base_addr);
iounmap(card->base_addr);
kfree(card);
pdev->priv = NULL;
}
/*
* Probe PCI device for EMS CAN signature and register each available
* CAN channel to SJA1000 Socket-CAN subsystem.
*/
static int __devinit ems_pcmcia_add_card(struct pcmcia_device *pdev,
unsigned long base)
{
struct sja1000_priv *priv;
struct net_device *dev;
struct ems_pcmcia_card *card;
int err, i;
/* Allocating card structures to hold addresses, ... */
card = kzalloc(sizeof(struct ems_pcmcia_card), GFP_KERNEL);
if (!card)
return -ENOMEM;
pdev->priv = card;
card->channels = 0;
card->base_addr = ioremap(base, EMS_PCMCIA_MEM_SIZE);
if (!card->base_addr) {
err = -ENOMEM;
goto failure_cleanup;
}
/* Check for unique EMS CAN signature */
if (readw(card->base_addr) != 0xAA55) {
err = -ENODEV;
goto failure_cleanup;
}
/* Request board reset */
writeb(EMS_CMD_RESET, card->base_addr);
/* Make sure CAN controllers are mapped into card's memory space */
writeb(EMS_CMD_MAP, card->base_addr);
/* Detect available channels */
for (i = 0; i < EMS_PCMCIA_MAX_CHAN; i++) {
dev = alloc_sja1000dev(0);
if (!dev) {
err = -ENOMEM;
goto failure_cleanup;
}
card->net_dev[i] = dev;
priv = netdev_priv(dev);
priv->priv = card;
SET_NETDEV_DEV(dev, &pdev->dev);
priv->irq_flags = IRQF_SHARED;
dev->irq = pdev->irq;
priv->reg_base = card->base_addr + EMS_PCMCIA_CAN_BASE_OFFSET +
(i * EMS_PCMCIA_CAN_CTRL_SIZE);
/* Check if channel is present */
if (ems_pcmcia_check_chan(priv)) {
priv->read_reg = ems_pcmcia_read_reg;
priv->write_reg = ems_pcmcia_write_reg;
priv->can.clock.freq = EMS_PCMCIA_CAN_CLOCK;
priv->ocr = EMS_PCMCIA_OCR;
priv->cdr = EMS_PCMCIA_CDR;
priv->flags |= SJA1000_CUSTOM_IRQ_HANDLER;
/* Register SJA1000 device */
err = register_sja1000dev(dev);
if (err) {
free_sja1000dev(dev);
goto failure_cleanup;
}
card->channels++;
printk(KERN_INFO "%s: registered %s on channel "
"#%d at 0x%p, irq %d\n", DRV_NAME, dev->name,
i, priv->reg_base, dev->irq);
} else
free_sja1000dev(dev);
}
err = request_irq(dev->irq, &ems_pcmcia_interrupt, IRQF_SHARED,
DRV_NAME, card);
if (!err)
return 0;
failure_cleanup:
ems_pcmcia_del_card(pdev);
return err;
}
/*
* Setup PCMCIA socket and probe for EMS CPC-CARD
*/
static int __devinit ems_pcmcia_probe(struct pcmcia_device *dev)
{
int csval;
/* General socket configuration */
dev->config_flags |= CONF_ENABLE_IRQ;
dev->config_index = 1;
dev->config_regs = PRESENT_OPTION;
/* The io structure describes IO port mapping */
dev->resource[0]->end = 16;
dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
dev->resource[1]->end = 16;
dev->resource[1]->flags |= IO_DATA_PATH_WIDTH_16;
dev->io_lines = 5;
/* Allocate a memory window */
dev->resource[2]->flags =
(WIN_DATA_WIDTH_8 | WIN_MEMORY_TYPE_CM | WIN_ENABLE);
dev->resource[2]->start = dev->resource[2]->end = 0;
csval = pcmcia_request_window(dev, dev->resource[2], 0);
if (csval) {
dev_err(&dev->dev, "pcmcia_request_window failed (err=%d)\n",
csval);
return 0;
}
csval = pcmcia_map_mem_page(dev, dev->resource[2], dev->config_base);
if (csval) {
dev_err(&dev->dev, "pcmcia_map_mem_page failed (err=%d)\n",
csval);
return 0;
}
csval = pcmcia_enable_device(dev);
if (csval) {
dev_err(&dev->dev, "pcmcia_enable_device failed (err=%d)\n",
csval);
return 0;
}
ems_pcmcia_add_card(dev, dev->resource[2]->start);
return 0;
}
/*
* Release claimed resources
*/
static void ems_pcmcia_remove(struct pcmcia_device *dev)
{
ems_pcmcia_del_card(dev);
pcmcia_disable_device(dev);
}
static struct pcmcia_driver ems_pcmcia_driver = {
.name = DRV_NAME,
.probe = ems_pcmcia_probe,
.remove = ems_pcmcia_remove,
.id_table = ems_pcmcia_tbl,
};
static int __init ems_pcmcia_init(void)
{
return pcmcia_register_driver(&ems_pcmcia_driver);
}
module_init(ems_pcmcia_init);
static void __exit ems_pcmcia_exit(void)
{
pcmcia_unregister_driver(&ems_pcmcia_driver);
}
module_exit(ems_pcmcia_exit);
| gpl-2.0 |
forumber/Temiz_Kernel_G3 | drivers/net/ethernet/intel/ixgb/ixgb_param.c | 5250 | 12777 | /*******************************************************************************
Intel PRO/10GbE Linux driver
Copyright(c) 1999 - 2008 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
Linux NICS <linux.nics@intel.com>
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "ixgb.h"
/* This is the only thing that needs to be changed to adjust the
* maximum number of ports that the driver can manage.
*/
#define IXGB_MAX_NIC 8
#define OPTION_UNSET -1
#define OPTION_DISABLED 0
#define OPTION_ENABLED 1
/* All parameters are treated the same, as an integer array of values.
* This macro just reduces the need to repeat the same declaration code
* over and over (plus this helps to avoid typo bugs).
*/
#define IXGB_PARAM_INIT { [0 ... IXGB_MAX_NIC] = OPTION_UNSET }
#define IXGB_PARAM(X, desc) \
static int __devinitdata X[IXGB_MAX_NIC+1] \
= IXGB_PARAM_INIT; \
static unsigned int num_##X = 0; \
module_param_array_named(X, X, int, &num_##X, 0); \
MODULE_PARM_DESC(X, desc);
/* Transmit Descriptor Count
*
* Valid Range: 64-4096
*
* Default Value: 256
*/
IXGB_PARAM(TxDescriptors, "Number of transmit descriptors");
/* Receive Descriptor Count
*
* Valid Range: 64-4096
*
* Default Value: 1024
*/
IXGB_PARAM(RxDescriptors, "Number of receive descriptors");
/* User Specified Flow Control Override
*
* Valid Range: 0-3
* - 0 - No Flow Control
* - 1 - Rx only, respond to PAUSE frames but do not generate them
* - 2 - Tx only, generate PAUSE frames but ignore them on receive
* - 3 - Full Flow Control Support
*
* Default Value: 2 - Tx only (silicon bug avoidance)
*/
IXGB_PARAM(FlowControl, "Flow Control setting");
/* XsumRX - Receive Checksum Offload Enable/Disable
*
* Valid Range: 0, 1
* - 0 - disables all checksum offload
* - 1 - enables receive IP/TCP/UDP checksum offload
* on 82597 based NICs
*
* Default Value: 1
*/
IXGB_PARAM(XsumRX, "Disable or enable Receive Checksum offload");
/* Transmit Interrupt Delay in units of 0.8192 microseconds
*
* Valid Range: 0-65535
*
* Default Value: 32
*/
IXGB_PARAM(TxIntDelay, "Transmit Interrupt Delay");
/* Receive Interrupt Delay in units of 0.8192 microseconds
*
* Valid Range: 0-65535
*
* Default Value: 72
*/
IXGB_PARAM(RxIntDelay, "Receive Interrupt Delay");
/* Receive Flow control high threshold (when we send a pause frame)
* (FCRTH)
*
* Valid Range: 1,536 - 262,136 (0x600 - 0x3FFF8, 8 byte granularity)
*
* Default Value: 196,608 (0x30000)
*/
IXGB_PARAM(RxFCHighThresh, "Receive Flow Control High Threshold");
/* Receive Flow control low threshold (when we send a resume frame)
* (FCRTL)
*
* Valid Range: 64 - 262,136 (0x40 - 0x3FFF8, 8 byte granularity)
* must be less than high threshold by at least 8 bytes
*
* Default Value: 163,840 (0x28000)
*/
IXGB_PARAM(RxFCLowThresh, "Receive Flow Control Low Threshold");
/* Flow control request timeout (how long to pause the link partner's tx)
* (PAP 15:0)
*
* Valid Range: 1 - 65535
*
* Default Value: 65535 (0xffff) (we'll send an xon if we recover)
*/
IXGB_PARAM(FCReqTimeout, "Flow Control Request Timeout");
/* Interrupt Delay Enable
*
* Valid Range: 0, 1
*
* - 0 - disables transmit interrupt delay
* - 1 - enables transmmit interrupt delay
*
* Default Value: 1
*/
IXGB_PARAM(IntDelayEnable, "Transmit Interrupt Delay Enable");
#define DEFAULT_TIDV 32
#define MAX_TIDV 0xFFFF
#define MIN_TIDV 0
#define DEFAULT_RDTR 72
#define MAX_RDTR 0xFFFF
#define MIN_RDTR 0
#define XSUMRX_DEFAULT OPTION_ENABLED
#define DEFAULT_FCRTL 0x28000
#define DEFAULT_FCRTH 0x30000
#define MIN_FCRTL 0
#define MAX_FCRTL 0x3FFE8
#define MIN_FCRTH 8
#define MAX_FCRTH 0x3FFF0
#define MIN_FCPAUSE 1
#define MAX_FCPAUSE 0xffff
#define DEFAULT_FCPAUSE 0xFFFF /* this may be too long */
struct ixgb_option {
enum { enable_option, range_option, list_option } type;
const char *name;
const char *err;
int def;
union {
struct { /* range_option info */
int min;
int max;
} r;
struct { /* list_option info */
int nr;
const struct ixgb_opt_list {
int i;
const char *str;
} *p;
} l;
} arg;
};
static int __devinit
ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
{
if (*value == OPTION_UNSET) {
*value = opt->def;
return 0;
}
switch (opt->type) {
case enable_option:
switch (*value) {
case OPTION_ENABLED:
pr_info("%s Enabled\n", opt->name);
return 0;
case OPTION_DISABLED:
pr_info("%s Disabled\n", opt->name);
return 0;
}
break;
case range_option:
if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
pr_info("%s set to %i\n", opt->name, *value);
return 0;
}
break;
case list_option: {
int i;
const struct ixgb_opt_list *ent;
for (i = 0; i < opt->arg.l.nr; i++) {
ent = &opt->arg.l.p[i];
if (*value == ent->i) {
if (ent->str[0] != '\0')
pr_info("%s\n", ent->str);
return 0;
}
}
}
break;
default:
BUG();
}
pr_info("Invalid %s specified (%i) %s\n", opt->name, *value, opt->err);
*value = opt->def;
return -1;
}
/**
* ixgb_check_options - Range Checking for Command Line Parameters
* @adapter: board private structure
*
* This routine checks all command line parameters for valid user
* input. If an invalid value is given, or if no user specified
* value exists, a default value is used. The final value is stored
* in a variable in the adapter structure.
**/
void __devinit
ixgb_check_options(struct ixgb_adapter *adapter)
{
int bd = adapter->bd_number;
if (bd >= IXGB_MAX_NIC) {
pr_notice("Warning: no configuration for board #%i\n", bd);
pr_notice("Using defaults for all values\n");
}
{ /* Transmit Descriptor Count */
static const struct ixgb_option opt = {
.type = range_option,
.name = "Transmit Descriptors",
.err = "using default of " __MODULE_STRING(DEFAULT_TXD),
.def = DEFAULT_TXD,
.arg = { .r = { .min = MIN_TXD,
.max = MAX_TXD}}
};
struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
if (num_TxDescriptors > bd) {
tx_ring->count = TxDescriptors[bd];
ixgb_validate_option(&tx_ring->count, &opt);
} else {
tx_ring->count = opt.def;
}
tx_ring->count = ALIGN(tx_ring->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE);
}
{ /* Receive Descriptor Count */
static const struct ixgb_option opt = {
.type = range_option,
.name = "Receive Descriptors",
.err = "using default of " __MODULE_STRING(DEFAULT_RXD),
.def = DEFAULT_RXD,
.arg = { .r = { .min = MIN_RXD,
.max = MAX_RXD}}
};
struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
if (num_RxDescriptors > bd) {
rx_ring->count = RxDescriptors[bd];
ixgb_validate_option(&rx_ring->count, &opt);
} else {
rx_ring->count = opt.def;
}
rx_ring->count = ALIGN(rx_ring->count, IXGB_REQ_RX_DESCRIPTOR_MULTIPLE);
}
{ /* Receive Checksum Offload Enable */
static const struct ixgb_option opt = {
.type = enable_option,
.name = "Receive Checksum Offload",
.err = "defaulting to Enabled",
.def = OPTION_ENABLED
};
if (num_XsumRX > bd) {
unsigned int rx_csum = XsumRX[bd];
ixgb_validate_option(&rx_csum, &opt);
adapter->rx_csum = rx_csum;
} else {
adapter->rx_csum = opt.def;
}
}
{ /* Flow Control */
static const struct ixgb_opt_list fc_list[] = {
{ ixgb_fc_none, "Flow Control Disabled" },
{ ixgb_fc_rx_pause, "Flow Control Receive Only" },
{ ixgb_fc_tx_pause, "Flow Control Transmit Only" },
{ ixgb_fc_full, "Flow Control Enabled" },
{ ixgb_fc_default, "Flow Control Hardware Default" }
};
static const struct ixgb_option opt = {
.type = list_option,
.name = "Flow Control",
.err = "reading default settings from EEPROM",
.def = ixgb_fc_tx_pause,
.arg = { .l = { .nr = ARRAY_SIZE(fc_list),
.p = fc_list }}
};
if (num_FlowControl > bd) {
unsigned int fc = FlowControl[bd];
ixgb_validate_option(&fc, &opt);
adapter->hw.fc.type = fc;
} else {
adapter->hw.fc.type = opt.def;
}
}
{ /* Receive Flow Control High Threshold */
static const struct ixgb_option opt = {
.type = range_option,
.name = "Rx Flow Control High Threshold",
.err = "using default of " __MODULE_STRING(DEFAULT_FCRTH),
.def = DEFAULT_FCRTH,
.arg = { .r = { .min = MIN_FCRTH,
.max = MAX_FCRTH}}
};
if (num_RxFCHighThresh > bd) {
adapter->hw.fc.high_water = RxFCHighThresh[bd];
ixgb_validate_option(&adapter->hw.fc.high_water, &opt);
} else {
adapter->hw.fc.high_water = opt.def;
}
if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
pr_info("Ignoring RxFCHighThresh when no RxFC\n");
}
{ /* Receive Flow Control Low Threshold */
static const struct ixgb_option opt = {
.type = range_option,
.name = "Rx Flow Control Low Threshold",
.err = "using default of " __MODULE_STRING(DEFAULT_FCRTL),
.def = DEFAULT_FCRTL,
.arg = { .r = { .min = MIN_FCRTL,
.max = MAX_FCRTL}}
};
if (num_RxFCLowThresh > bd) {
adapter->hw.fc.low_water = RxFCLowThresh[bd];
ixgb_validate_option(&adapter->hw.fc.low_water, &opt);
} else {
adapter->hw.fc.low_water = opt.def;
}
if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
pr_info("Ignoring RxFCLowThresh when no RxFC\n");
}
{ /* Flow Control Pause Time Request*/
static const struct ixgb_option opt = {
.type = range_option,
.name = "Flow Control Pause Time Request",
.err = "using default of "__MODULE_STRING(DEFAULT_FCPAUSE),
.def = DEFAULT_FCPAUSE,
.arg = { .r = { .min = MIN_FCPAUSE,
.max = MAX_FCPAUSE}}
};
if (num_FCReqTimeout > bd) {
unsigned int pause_time = FCReqTimeout[bd];
ixgb_validate_option(&pause_time, &opt);
adapter->hw.fc.pause_time = pause_time;
} else {
adapter->hw.fc.pause_time = opt.def;
}
if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
pr_info("Ignoring FCReqTimeout when no RxFC\n");
}
/* high low and spacing check for rx flow control thresholds */
if (adapter->hw.fc.type & ixgb_fc_tx_pause) {
/* high must be greater than low */
if (adapter->hw.fc.high_water < (adapter->hw.fc.low_water + 8)) {
/* set defaults */
pr_info("RxFCHighThresh must be >= (RxFCLowThresh + 8), Using Defaults\n");
adapter->hw.fc.high_water = DEFAULT_FCRTH;
adapter->hw.fc.low_water = DEFAULT_FCRTL;
}
}
{ /* Receive Interrupt Delay */
static const struct ixgb_option opt = {
.type = range_option,
.name = "Receive Interrupt Delay",
.err = "using default of " __MODULE_STRING(DEFAULT_RDTR),
.def = DEFAULT_RDTR,
.arg = { .r = { .min = MIN_RDTR,
.max = MAX_RDTR}}
};
if (num_RxIntDelay > bd) {
adapter->rx_int_delay = RxIntDelay[bd];
ixgb_validate_option(&adapter->rx_int_delay, &opt);
} else {
adapter->rx_int_delay = opt.def;
}
}
{ /* Transmit Interrupt Delay */
static const struct ixgb_option opt = {
.type = range_option,
.name = "Transmit Interrupt Delay",
.err = "using default of " __MODULE_STRING(DEFAULT_TIDV),
.def = DEFAULT_TIDV,
.arg = { .r = { .min = MIN_TIDV,
.max = MAX_TIDV}}
};
if (num_TxIntDelay > bd) {
adapter->tx_int_delay = TxIntDelay[bd];
ixgb_validate_option(&adapter->tx_int_delay, &opt);
} else {
adapter->tx_int_delay = opt.def;
}
}
{ /* Transmit Interrupt Delay Enable */
static const struct ixgb_option opt = {
.type = enable_option,
.name = "Tx Interrupt Delay Enable",
.err = "defaulting to Enabled",
.def = OPTION_ENABLED
};
if (num_IntDelayEnable > bd) {
unsigned int ide = IntDelayEnable[bd];
ixgb_validate_option(&ide, &opt);
adapter->tx_int_delay_enable = ide;
} else {
adapter->tx_int_delay_enable = opt.def;
}
}
}
| gpl-2.0 |
FAlinux-SoftwareinLife/silfa | OS/kernel/drivers/scsi/mvsas/mv_94xx.c | 5250 | 26625 | /*
* Marvell 88SE94xx hardware specific
*
* Copyright 2007 Red Hat, Inc.
* Copyright 2008 Marvell. <kewei@marvell.com>
* Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
*
* This file is licensed under GPLv2.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; version 2 of the
* License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*/
#include "mv_sas.h"
#include "mv_94xx.h"
#include "mv_chips.h"
static void mvs_94xx_detect_porttype(struct mvs_info *mvi, int i)
{
u32 reg;
struct mvs_phy *phy = &mvi->phy[i];
u32 phy_status;
mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE3);
reg = mvs_read_port_vsr_data(mvi, i);
phy_status = ((reg & 0x3f0000) >> 16) & 0xff;
phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
switch (phy_status) {
case 0x10:
phy->phy_type |= PORT_TYPE_SAS;
break;
case 0x1d:
default:
phy->phy_type |= PORT_TYPE_SATA;
break;
}
}
void set_phy_tuning(struct mvs_info *mvi, int phy_id,
struct phy_tuning phy_tuning)
{
u32 tmp, setting_0 = 0, setting_1 = 0;
u8 i;
/* Remap information for B0 chip:
*
* R0Ch -> R118h[15:0] (Adapted DFE F3 - F5 coefficient)
* R0Dh -> R118h[31:16] (Generation 1 Setting 0)
* R0Eh -> R11Ch[15:0] (Generation 1 Setting 1)
* R0Fh -> R11Ch[31:16] (Generation 2 Setting 0)
* R10h -> R120h[15:0] (Generation 2 Setting 1)
* R11h -> R120h[31:16] (Generation 3 Setting 0)
* R12h -> R124h[15:0] (Generation 3 Setting 1)
* R13h -> R124h[31:16] (Generation 4 Setting 0 (Reserved))
*/
/* A0 has a different set of registers */
if (mvi->pdev->revision == VANIR_A0_REV)
return;
for (i = 0; i < 3; i++) {
/* loop 3 times, set Gen 1, Gen 2, Gen 3 */
switch (i) {
case 0:
setting_0 = GENERATION_1_SETTING;
setting_1 = GENERATION_1_2_SETTING;
break;
case 1:
setting_0 = GENERATION_1_2_SETTING;
setting_1 = GENERATION_2_3_SETTING;
break;
case 2:
setting_0 = GENERATION_2_3_SETTING;
setting_1 = GENERATION_3_4_SETTING;
break;
}
/* Set:
*
* Transmitter Emphasis Enable
* Transmitter Emphasis Amplitude
* Transmitter Amplitude
*/
mvs_write_port_vsr_addr(mvi, phy_id, setting_0);
tmp = mvs_read_port_vsr_data(mvi, phy_id);
tmp &= ~(0xFBE << 16);
tmp |= (((phy_tuning.trans_emp_en << 11) |
(phy_tuning.trans_emp_amp << 7) |
(phy_tuning.trans_amp << 1)) << 16);
mvs_write_port_vsr_data(mvi, phy_id, tmp);
/* Set Transmitter Amplitude Adjust */
mvs_write_port_vsr_addr(mvi, phy_id, setting_1);
tmp = mvs_read_port_vsr_data(mvi, phy_id);
tmp &= ~(0xC000);
tmp |= (phy_tuning.trans_amp_adj << 14);
mvs_write_port_vsr_data(mvi, phy_id, tmp);
}
}
void set_phy_ffe_tuning(struct mvs_info *mvi, int phy_id,
struct ffe_control ffe)
{
u32 tmp;
/* Don't run this if A0/B0 */
if ((mvi->pdev->revision == VANIR_A0_REV)
|| (mvi->pdev->revision == VANIR_B0_REV))
return;
/* FFE Resistor and Capacitor */
/* R10Ch DFE Resolution Control/Squelch and FFE Setting
*
* FFE_FORCE [7]
* FFE_RES_SEL [6:4]
* FFE_CAP_SEL [3:0]
*/
mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_FFE_CONTROL);
tmp = mvs_read_port_vsr_data(mvi, phy_id);
tmp &= ~0xFF;
/* Read from HBA_Info_Page */
tmp |= ((0x1 << 7) |
(ffe.ffe_rss_sel << 4) |
(ffe.ffe_cap_sel << 0));
mvs_write_port_vsr_data(mvi, phy_id, tmp);
/* R064h PHY Mode Register 1
*
* DFE_DIS 18
*/
mvs_write_port_vsr_addr(mvi, phy_id, VSR_REF_CLOCK_CRTL);
tmp = mvs_read_port_vsr_data(mvi, phy_id);
tmp &= ~0x40001;
/* Hard coding */
/* No defines in HBA_Info_Page */
tmp |= (0 << 18);
mvs_write_port_vsr_data(mvi, phy_id, tmp);
/* R110h DFE F0-F1 Coefficient Control/DFE Update Control
*
* DFE_UPDATE_EN [11:6]
* DFE_FX_FORCE [5:0]
*/
mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_DFE_UPDATE_CRTL);
tmp = mvs_read_port_vsr_data(mvi, phy_id);
tmp &= ~0xFFF;
/* Hard coding */
/* No defines in HBA_Info_Page */
tmp |= ((0x3F << 6) | (0x0 << 0));
mvs_write_port_vsr_data(mvi, phy_id, tmp);
/* R1A0h Interface and Digital Reference Clock Control/Reserved_50h
*
* FFE_TRAIN_EN 3
*/
mvs_write_port_vsr_addr(mvi, phy_id, VSR_REF_CLOCK_CRTL);
tmp = mvs_read_port_vsr_data(mvi, phy_id);
tmp &= ~0x8;
/* Hard coding */
/* No defines in HBA_Info_Page */
tmp |= (0 << 3);
mvs_write_port_vsr_data(mvi, phy_id, tmp);
}
/*Notice: this function must be called when phy is disabled*/
void set_phy_rate(struct mvs_info *mvi, int phy_id, u8 rate)
{
union reg_phy_cfg phy_cfg, phy_cfg_tmp;
mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
phy_cfg_tmp.v = mvs_read_port_vsr_data(mvi, phy_id);
phy_cfg.v = 0;
phy_cfg.u.disable_phy = phy_cfg_tmp.u.disable_phy;
phy_cfg.u.sas_support = 1;
phy_cfg.u.sata_support = 1;
phy_cfg.u.sata_host_mode = 1;
switch (rate) {
case 0x0:
/* support 1.5 Gbps */
phy_cfg.u.speed_support = 1;
phy_cfg.u.snw_3_support = 0;
phy_cfg.u.tx_lnk_parity = 1;
phy_cfg.u.tx_spt_phs_lnk_rate = 0x30;
break;
case 0x1:
/* support 1.5, 3.0 Gbps */
phy_cfg.u.speed_support = 3;
phy_cfg.u.tx_spt_phs_lnk_rate = 0x3c;
phy_cfg.u.tx_lgcl_lnk_rate = 0x08;
break;
case 0x2:
default:
/* support 1.5, 3.0, 6.0 Gbps */
phy_cfg.u.speed_support = 7;
phy_cfg.u.snw_3_support = 1;
phy_cfg.u.tx_lnk_parity = 1;
phy_cfg.u.tx_spt_phs_lnk_rate = 0x3f;
phy_cfg.u.tx_lgcl_lnk_rate = 0x09;
break;
}
mvs_write_port_vsr_data(mvi, phy_id, phy_cfg.v);
}
static void __devinit
mvs_94xx_config_reg_from_hba(struct mvs_info *mvi, int phy_id)
{
u32 temp;
temp = (u32)(*(u32 *)&mvi->hba_info_param.phy_tuning[phy_id]);
if (temp == 0xFFFFFFFFL) {
mvi->hba_info_param.phy_tuning[phy_id].trans_emp_amp = 0x6;
mvi->hba_info_param.phy_tuning[phy_id].trans_amp = 0x1A;
mvi->hba_info_param.phy_tuning[phy_id].trans_amp_adj = 0x3;
}
temp = (u8)(*(u8 *)&mvi->hba_info_param.ffe_ctl[phy_id]);
if (temp == 0xFFL) {
switch (mvi->pdev->revision) {
case VANIR_A0_REV:
case VANIR_B0_REV:
mvi->hba_info_param.ffe_ctl[phy_id].ffe_rss_sel = 0x7;
mvi->hba_info_param.ffe_ctl[phy_id].ffe_cap_sel = 0x7;
break;
case VANIR_C0_REV:
case VANIR_C1_REV:
case VANIR_C2_REV:
default:
mvi->hba_info_param.ffe_ctl[phy_id].ffe_rss_sel = 0x7;
mvi->hba_info_param.ffe_ctl[phy_id].ffe_cap_sel = 0xC;
break;
}
}
temp = (u8)(*(u8 *)&mvi->hba_info_param.phy_rate[phy_id]);
if (temp == 0xFFL)
/*set default phy_rate = 6Gbps*/
mvi->hba_info_param.phy_rate[phy_id] = 0x2;
set_phy_tuning(mvi, phy_id,
mvi->hba_info_param.phy_tuning[phy_id]);
set_phy_ffe_tuning(mvi, phy_id,
mvi->hba_info_param.ffe_ctl[phy_id]);
set_phy_rate(mvi, phy_id,
mvi->hba_info_param.phy_rate[phy_id]);
}
static void __devinit mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id)
{
void __iomem *regs = mvi->regs;
u32 tmp;
tmp = mr32(MVS_PCS);
tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2);
mw32(MVS_PCS, tmp);
}
static void mvs_94xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
{
u32 tmp;
u32 delay = 5000;
if (hard == MVS_PHY_TUNE) {
mvs_write_port_cfg_addr(mvi, phy_id, PHYR_SATA_CTL);
tmp = mvs_read_port_cfg_data(mvi, phy_id);
mvs_write_port_cfg_data(mvi, phy_id, tmp|0x20000000);
mvs_write_port_cfg_data(mvi, phy_id, tmp|0x100000);
return;
}
tmp = mvs_read_port_irq_stat(mvi, phy_id);
tmp &= ~PHYEV_RDY_CH;
mvs_write_port_irq_stat(mvi, phy_id, tmp);
if (hard) {
tmp = mvs_read_phy_ctl(mvi, phy_id);
tmp |= PHY_RST_HARD;
mvs_write_phy_ctl(mvi, phy_id, tmp);
do {
tmp = mvs_read_phy_ctl(mvi, phy_id);
udelay(10);
delay--;
} while ((tmp & PHY_RST_HARD) && delay);
if (!delay)
mv_dprintk("phy hard reset failed.\n");
} else {
tmp = mvs_read_phy_ctl(mvi, phy_id);
tmp |= PHY_RST;
mvs_write_phy_ctl(mvi, phy_id, tmp);
}
}
static void mvs_94xx_phy_disable(struct mvs_info *mvi, u32 phy_id)
{
u32 tmp;
mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
tmp = mvs_read_port_vsr_data(mvi, phy_id);
mvs_write_port_vsr_data(mvi, phy_id, tmp | 0x00800000);
}
static void mvs_94xx_phy_enable(struct mvs_info *mvi, u32 phy_id)
{
u32 tmp;
u8 revision = 0;
revision = mvi->pdev->revision;
if (revision == VANIR_A0_REV) {
mvs_write_port_vsr_addr(mvi, phy_id, CMD_HOST_RD_DATA);
mvs_write_port_vsr_data(mvi, phy_id, 0x8300ffc1);
}
if (revision == VANIR_B0_REV) {
mvs_write_port_vsr_addr(mvi, phy_id, CMD_APP_MEM_CTL);
mvs_write_port_vsr_data(mvi, phy_id, 0x08001006);
mvs_write_port_vsr_addr(mvi, phy_id, CMD_HOST_RD_DATA);
mvs_write_port_vsr_data(mvi, phy_id, 0x0000705f);
}
mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
tmp = mvs_read_port_vsr_data(mvi, phy_id);
tmp |= bit(0);
mvs_write_port_vsr_data(mvi, phy_id, tmp & 0xfd7fffff);
}
static int __devinit mvs_94xx_init(struct mvs_info *mvi)
{
void __iomem *regs = mvi->regs;
int i;
u32 tmp, cctl;
u8 revision;
revision = mvi->pdev->revision;
mvs_show_pcie_usage(mvi);
if (mvi->flags & MVF_FLAG_SOC) {
tmp = mr32(MVS_PHY_CTL);
tmp &= ~PCTL_PWR_OFF;
tmp |= PCTL_PHY_DSBL;
mw32(MVS_PHY_CTL, tmp);
}
/* Init Chip */
/* make sure RST is set; HBA_RST /should/ have done that for us */
cctl = mr32(MVS_CTL) & 0xFFFF;
if (cctl & CCTL_RST)
cctl &= ~CCTL_RST;
else
mw32_f(MVS_CTL, cctl | CCTL_RST);
if (mvi->flags & MVF_FLAG_SOC) {
tmp = mr32(MVS_PHY_CTL);
tmp &= ~PCTL_PWR_OFF;
tmp |= PCTL_COM_ON;
tmp &= ~PCTL_PHY_DSBL;
tmp |= PCTL_LINK_RST;
mw32(MVS_PHY_CTL, tmp);
msleep(100);
tmp &= ~PCTL_LINK_RST;
mw32(MVS_PHY_CTL, tmp);
msleep(100);
}
/* disable Multiplexing, enable phy implemented */
mw32(MVS_PORTS_IMP, 0xFF);
if (revision == VANIR_A0_REV) {
mw32(MVS_PA_VSR_ADDR, CMD_CMWK_OOB_DET);
mw32(MVS_PA_VSR_PORT, 0x00018080);
}
mw32(MVS_PA_VSR_ADDR, VSR_PHY_MODE2);
if (revision == VANIR_A0_REV || revision == VANIR_B0_REV)
/* set 6G/3G/1.5G, multiplexing, without SSC */
mw32(MVS_PA_VSR_PORT, 0x0084d4fe);
else
/* set 6G/3G/1.5G, multiplexing, with and without SSC */
mw32(MVS_PA_VSR_PORT, 0x0084fffe);
if (revision == VANIR_B0_REV) {
mw32(MVS_PA_VSR_ADDR, CMD_APP_MEM_CTL);
mw32(MVS_PA_VSR_PORT, 0x08001006);
mw32(MVS_PA_VSR_ADDR, CMD_HOST_RD_DATA);
mw32(MVS_PA_VSR_PORT, 0x0000705f);
}
/* reset control */
mw32(MVS_PCS, 0); /* MVS_PCS */
mw32(MVS_STP_REG_SET_0, 0);
mw32(MVS_STP_REG_SET_1, 0);
/* init phys */
mvs_phy_hacks(mvi);
/* disable non data frame retry */
tmp = mvs_cr32(mvi, CMD_SAS_CTL1);
if ((revision == VANIR_A0_REV) ||
(revision == VANIR_B0_REV) ||
(revision == VANIR_C0_REV)) {
tmp &= ~0xffff;
tmp |= 0x007f;
mvs_cw32(mvi, CMD_SAS_CTL1, tmp);
}
/* set LED blink when IO*/
mw32(MVS_PA_VSR_ADDR, VSR_PHY_ACT_LED);
tmp = mr32(MVS_PA_VSR_PORT);
tmp &= 0xFFFF00FF;
tmp |= 0x00003300;
mw32(MVS_PA_VSR_PORT, tmp);
mw32(MVS_CMD_LIST_LO, mvi->slot_dma);
mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma);
mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ);
mw32(MVS_TX_LO, mvi->tx_dma);
mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16);
mw32(MVS_RX_CFG, MVS_RX_RING_SZ);
mw32(MVS_RX_LO, mvi->rx_dma);
mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16);
for (i = 0; i < mvi->chip->n_phy; i++) {
mvs_94xx_phy_disable(mvi, i);
/* set phy local SAS address */
mvs_set_sas_addr(mvi, i, CONFIG_ID_FRAME3, CONFIG_ID_FRAME4,
cpu_to_le64(mvi->phy[i].dev_sas_addr));
mvs_94xx_enable_xmt(mvi, i);
mvs_94xx_config_reg_from_hba(mvi, i);
mvs_94xx_phy_enable(mvi, i);
mvs_94xx_phy_reset(mvi, i, PHY_RST_HARD);
msleep(500);
mvs_94xx_detect_porttype(mvi, i);
}
if (mvi->flags & MVF_FLAG_SOC) {
/* set select registers */
writel(0x0E008000, regs + 0x000);
writel(0x59000008, regs + 0x004);
writel(0x20, regs + 0x008);
writel(0x20, regs + 0x00c);
writel(0x20, regs + 0x010);
writel(0x20, regs + 0x014);
writel(0x20, regs + 0x018);
writel(0x20, regs + 0x01c);
}
for (i = 0; i < mvi->chip->n_phy; i++) {
/* clear phy int status */
tmp = mvs_read_port_irq_stat(mvi, i);
tmp &= ~PHYEV_SIG_FIS;
mvs_write_port_irq_stat(mvi, i, tmp);
/* set phy int mask */
tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH |
PHYEV_ID_DONE | PHYEV_DCDR_ERR | PHYEV_CRC_ERR ;
mvs_write_port_irq_mask(mvi, i, tmp);
msleep(100);
mvs_update_phyinfo(mvi, i, 1);
}
/* little endian for open address and command table, etc. */
cctl = mr32(MVS_CTL);
cctl |= CCTL_ENDIAN_CMD;
cctl &= ~CCTL_ENDIAN_OPEN;
cctl |= CCTL_ENDIAN_RSP;
mw32_f(MVS_CTL, cctl);
/* reset CMD queue */
tmp = mr32(MVS_PCS);
tmp |= PCS_CMD_RST;
tmp &= ~PCS_SELF_CLEAR;
mw32(MVS_PCS, tmp);
/*
* the max count is 0x1ff, while our max slot is 0x200,
* it will make count 0.
*/
tmp = 0;
if (MVS_CHIP_SLOT_SZ > 0x1ff)
mw32(MVS_INT_COAL, 0x1ff | COAL_EN);
else
mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ | COAL_EN);
/* default interrupt coalescing time is 128us */
tmp = 0x10000 | interrupt_coalescing;
mw32(MVS_INT_COAL_TMOUT, tmp);
/* ladies and gentlemen, start your engines */
mw32(MVS_TX_CFG, 0);
mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN);
/* enable CMD/CMPL_Q/RESP mode */
mw32(MVS_PCS, PCS_SATA_RETRY_2 | PCS_FIS_RX_EN |
PCS_CMD_EN | PCS_CMD_STOP_ERR);
/* enable completion queue interrupt */
tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP |
CINT_DMA_PCIE | CINT_NON_SPEC_NCQ_ERROR);
tmp |= CINT_PHY_MASK;
mw32(MVS_INT_MASK, tmp);
tmp = mvs_cr32(mvi, CMD_LINK_TIMER);
tmp |= 0xFFFF0000;
mvs_cw32(mvi, CMD_LINK_TIMER, tmp);
/* tune STP performance */
tmp = 0x003F003F;
mvs_cw32(mvi, CMD_PL_TIMER, tmp);
/* This can improve expander large block size seq write performance */
tmp = mvs_cr32(mvi, CMD_PORT_LAYER_TIMER1);
tmp |= 0xFFFF007F;
mvs_cw32(mvi, CMD_PORT_LAYER_TIMER1, tmp);
/* change the connection open-close behavior (bit 9)
* set bit8 to 1 for performance tuning */
tmp = mvs_cr32(mvi, CMD_SL_MODE0);
tmp |= 0x00000300;
/* set bit0 to 0 to enable retry for no_dest reject case */
tmp &= 0xFFFFFFFE;
mvs_cw32(mvi, CMD_SL_MODE0, tmp);
/* Enable SRS interrupt */
mw32(MVS_INT_MASK_SRS_0, 0xFFFF);
return 0;
}
static int mvs_94xx_ioremap(struct mvs_info *mvi)
{
if (!mvs_ioremap(mvi, 2, -1)) {
mvi->regs_ex = mvi->regs + 0x10200;
mvi->regs += 0x20000;
if (mvi->id == 1)
mvi->regs += 0x4000;
return 0;
}
return -1;
}
static void mvs_94xx_iounmap(struct mvs_info *mvi)
{
if (mvi->regs) {
mvi->regs -= 0x20000;
if (mvi->id == 1)
mvi->regs -= 0x4000;
mvs_iounmap(mvi->regs);
}
}
static void mvs_94xx_interrupt_enable(struct mvs_info *mvi)
{
void __iomem *regs = mvi->regs_ex;
u32 tmp;
tmp = mr32(MVS_GBL_CTL);
tmp |= (IRQ_SAS_A | IRQ_SAS_B);
mw32(MVS_GBL_INT_STAT, tmp);
writel(tmp, regs + 0x0C);
writel(tmp, regs + 0x10);
writel(tmp, regs + 0x14);
writel(tmp, regs + 0x18);
mw32(MVS_GBL_CTL, tmp);
}
static void mvs_94xx_interrupt_disable(struct mvs_info *mvi)
{
void __iomem *regs = mvi->regs_ex;
u32 tmp;
tmp = mr32(MVS_GBL_CTL);
tmp &= ~(IRQ_SAS_A | IRQ_SAS_B);
mw32(MVS_GBL_INT_STAT, tmp);
writel(tmp, regs + 0x0C);
writel(tmp, regs + 0x10);
writel(tmp, regs + 0x14);
writel(tmp, regs + 0x18);
mw32(MVS_GBL_CTL, tmp);
}
static u32 mvs_94xx_isr_status(struct mvs_info *mvi, int irq)
{
void __iomem *regs = mvi->regs_ex;
u32 stat = 0;
if (!(mvi->flags & MVF_FLAG_SOC)) {
stat = mr32(MVS_GBL_INT_STAT);
if (!(stat & (IRQ_SAS_A | IRQ_SAS_B)))
return 0;
}
return stat;
}
static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat)
{
void __iomem *regs = mvi->regs;
if (((stat & IRQ_SAS_A) && mvi->id == 0) ||
((stat & IRQ_SAS_B) && mvi->id == 1)) {
mw32_f(MVS_INT_STAT, CINT_DONE);
spin_lock(&mvi->lock);
mvs_int_full(mvi);
spin_unlock(&mvi->lock);
}
return IRQ_HANDLED;
}
static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx)
{
u32 tmp;
tmp = mvs_cr32(mvi, MVS_COMMAND_ACTIVE+(slot_idx >> 3));
if (tmp && 1 << (slot_idx % 32)) {
mv_printk("command active %08X, slot [%x].\n", tmp, slot_idx);
mvs_cw32(mvi, MVS_COMMAND_ACTIVE + (slot_idx >> 3),
1 << (slot_idx % 32));
do {
tmp = mvs_cr32(mvi,
MVS_COMMAND_ACTIVE + (slot_idx >> 3));
} while (tmp & 1 << (slot_idx % 32));
}
}
void mvs_94xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set, u8 clear_all)
{
void __iomem *regs = mvi->regs;
u32 tmp;
if (clear_all) {
tmp = mr32(MVS_INT_STAT_SRS_0);
if (tmp) {
mv_dprintk("check SRS 0 %08X.\n", tmp);
mw32(MVS_INT_STAT_SRS_0, tmp);
}
tmp = mr32(MVS_INT_STAT_SRS_1);
if (tmp) {
mv_dprintk("check SRS 1 %08X.\n", tmp);
mw32(MVS_INT_STAT_SRS_1, tmp);
}
} else {
if (reg_set > 31)
tmp = mr32(MVS_INT_STAT_SRS_1);
else
tmp = mr32(MVS_INT_STAT_SRS_0);
if (tmp & (1 << (reg_set % 32))) {
mv_dprintk("register set 0x%x was stopped.\n", reg_set);
if (reg_set > 31)
mw32(MVS_INT_STAT_SRS_1, 1 << (reg_set % 32));
else
mw32(MVS_INT_STAT_SRS_0, 1 << (reg_set % 32));
}
}
}
static void mvs_94xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
u32 tfs)
{
void __iomem *regs = mvi->regs;
u32 tmp;
mvs_94xx_clear_srs_irq(mvi, 0, 1);
tmp = mr32(MVS_INT_STAT);
mw32(MVS_INT_STAT, tmp | CINT_CI_STOP);
tmp = mr32(MVS_PCS) | 0xFF00;
mw32(MVS_PCS, tmp);
}
static void mvs_94xx_non_spec_ncq_error(struct mvs_info *mvi)
{
void __iomem *regs = mvi->regs;
u32 err_0, err_1;
u8 i;
struct mvs_device *device;
err_0 = mr32(MVS_NON_NCQ_ERR_0);
err_1 = mr32(MVS_NON_NCQ_ERR_1);
mv_dprintk("non specific ncq error err_0:%x,err_1:%x.\n",
err_0, err_1);
for (i = 0; i < 32; i++) {
if (err_0 & bit(i)) {
device = mvs_find_dev_by_reg_set(mvi, i);
if (device)
mvs_release_task(mvi, device->sas_device);
}
if (err_1 & bit(i)) {
device = mvs_find_dev_by_reg_set(mvi, i+32);
if (device)
mvs_release_task(mvi, device->sas_device);
}
}
mw32(MVS_NON_NCQ_ERR_0, err_0);
mw32(MVS_NON_NCQ_ERR_1, err_1);
}
static void mvs_94xx_free_reg_set(struct mvs_info *mvi, u8 *tfs)
{
void __iomem *regs = mvi->regs;
u8 reg_set = *tfs;
if (*tfs == MVS_ID_NOT_MAPPED)
return;
mvi->sata_reg_set &= ~bit(reg_set);
if (reg_set < 32)
w_reg_set_enable(reg_set, (u32)mvi->sata_reg_set);
else
w_reg_set_enable(reg_set, (u32)(mvi->sata_reg_set >> 32));
*tfs = MVS_ID_NOT_MAPPED;
return;
}
static u8 mvs_94xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs)
{
int i;
void __iomem *regs = mvi->regs;
if (*tfs != MVS_ID_NOT_MAPPED)
return 0;
i = mv_ffc64(mvi->sata_reg_set);
if (i >= 32) {
mvi->sata_reg_set |= bit(i);
w_reg_set_enable(i, (u32)(mvi->sata_reg_set >> 32));
*tfs = i;
return 0;
} else if (i >= 0) {
mvi->sata_reg_set |= bit(i);
w_reg_set_enable(i, (u32)mvi->sata_reg_set);
*tfs = i;
return 0;
}
return MVS_ID_NOT_MAPPED;
}
static void mvs_94xx_make_prd(struct scatterlist *scatter, int nr, void *prd)
{
int i;
struct scatterlist *sg;
struct mvs_prd *buf_prd = prd;
struct mvs_prd_imt im_len;
*(u32 *)&im_len = 0;
for_each_sg(scatter, sg, nr, i) {
buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
im_len.len = sg_dma_len(sg);
buf_prd->im_len = cpu_to_le32(*(u32 *)&im_len);
buf_prd++;
}
}
static int mvs_94xx_oob_done(struct mvs_info *mvi, int i)
{
u32 phy_st;
phy_st = mvs_read_phy_ctl(mvi, i);
if (phy_st & PHY_READY_MASK)
return 1;
return 0;
}
static void mvs_94xx_get_dev_identify_frame(struct mvs_info *mvi, int port_id,
struct sas_identify_frame *id)
{
int i;
u32 id_frame[7];
for (i = 0; i < 7; i++) {
mvs_write_port_cfg_addr(mvi, port_id,
CONFIG_ID_FRAME0 + i * 4);
id_frame[i] = cpu_to_le32(mvs_read_port_cfg_data(mvi, port_id));
}
memcpy(id, id_frame, 28);
}
static void mvs_94xx_get_att_identify_frame(struct mvs_info *mvi, int port_id,
struct sas_identify_frame *id)
{
int i;
u32 id_frame[7];
for (i = 0; i < 7; i++) {
mvs_write_port_cfg_addr(mvi, port_id,
CONFIG_ATT_ID_FRAME0 + i * 4);
id_frame[i] = cpu_to_le32(mvs_read_port_cfg_data(mvi, port_id));
mv_dprintk("94xx phy %d atta frame %d %x.\n",
port_id + mvi->id * mvi->chip->n_phy, i, id_frame[i]);
}
memcpy(id, id_frame, 28);
}
static u32 mvs_94xx_make_dev_info(struct sas_identify_frame *id)
{
u32 att_dev_info = 0;
att_dev_info |= id->dev_type;
if (id->stp_iport)
att_dev_info |= PORT_DEV_STP_INIT;
if (id->smp_iport)
att_dev_info |= PORT_DEV_SMP_INIT;
if (id->ssp_iport)
att_dev_info |= PORT_DEV_SSP_INIT;
if (id->stp_tport)
att_dev_info |= PORT_DEV_STP_TRGT;
if (id->smp_tport)
att_dev_info |= PORT_DEV_SMP_TRGT;
if (id->ssp_tport)
att_dev_info |= PORT_DEV_SSP_TRGT;
att_dev_info |= (u32)id->phy_id<<24;
return att_dev_info;
}
static u32 mvs_94xx_make_att_info(struct sas_identify_frame *id)
{
return mvs_94xx_make_dev_info(id);
}
static void mvs_94xx_fix_phy_info(struct mvs_info *mvi, int i,
struct sas_identify_frame *id)
{
struct mvs_phy *phy = &mvi->phy[i];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
mv_dprintk("get all reg link rate is 0x%x\n", phy->phy_status);
sas_phy->linkrate =
(phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
sas_phy->linkrate += 0x8;
mv_dprintk("get link rate is %d\n", sas_phy->linkrate);
phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS;
mvs_94xx_get_dev_identify_frame(mvi, i, id);
phy->dev_info = mvs_94xx_make_dev_info(id);
if (phy->phy_type & PORT_TYPE_SAS) {
mvs_94xx_get_att_identify_frame(mvi, i, id);
phy->att_dev_info = mvs_94xx_make_att_info(id);
phy->att_dev_sas_addr = *(u64 *)id->sas_addr;
} else {
phy->att_dev_info = PORT_DEV_STP_TRGT | 1;
}
/* enable spin up bit */
mvs_write_port_cfg_addr(mvi, i, PHYR_PHY_STAT);
mvs_write_port_cfg_data(mvi, i, 0x04);
}
void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
struct sas_phy_linkrates *rates)
{
u32 lrmax = 0;
u32 tmp;
tmp = mvs_read_phy_ctl(mvi, phy_id);
lrmax = (rates->maximum_linkrate - SAS_LINK_RATE_1_5_GBPS) << 12;
if (lrmax) {
tmp &= ~(0x3 << 12);
tmp |= lrmax;
}
mvs_write_phy_ctl(mvi, phy_id, tmp);
mvs_94xx_phy_reset(mvi, phy_id, PHY_RST_HARD);
}
static void mvs_94xx_clear_active_cmds(struct mvs_info *mvi)
{
u32 tmp;
void __iomem *regs = mvi->regs;
tmp = mr32(MVS_STP_REG_SET_0);
mw32(MVS_STP_REG_SET_0, 0);
mw32(MVS_STP_REG_SET_0, tmp);
tmp = mr32(MVS_STP_REG_SET_1);
mw32(MVS_STP_REG_SET_1, 0);
mw32(MVS_STP_REG_SET_1, tmp);
}
u32 mvs_94xx_spi_read_data(struct mvs_info *mvi)
{
void __iomem *regs = mvi->regs_ex - 0x10200;
return mr32(SPI_RD_DATA_REG_94XX);
}
void mvs_94xx_spi_write_data(struct mvs_info *mvi, u32 data)
{
void __iomem *regs = mvi->regs_ex - 0x10200;
mw32(SPI_RD_DATA_REG_94XX, data);
}
int mvs_94xx_spi_buildcmd(struct mvs_info *mvi,
u32 *dwCmd,
u8 cmd,
u8 read,
u8 length,
u32 addr
)
{
void __iomem *regs = mvi->regs_ex - 0x10200;
u32 dwTmp;
dwTmp = ((u32)cmd << 8) | ((u32)length << 4);
if (read)
dwTmp |= SPI_CTRL_READ_94XX;
if (addr != MV_MAX_U32) {
mw32(SPI_ADDR_REG_94XX, (addr & 0x0003FFFFL));
dwTmp |= SPI_ADDR_VLD_94XX;
}
*dwCmd = dwTmp;
return 0;
}
int mvs_94xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
{
void __iomem *regs = mvi->regs_ex - 0x10200;
mw32(SPI_CTRL_REG_94XX, cmd | SPI_CTRL_SpiStart_94XX);
return 0;
}
int mvs_94xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
{
void __iomem *regs = mvi->regs_ex - 0x10200;
u32 i, dwTmp;
for (i = 0; i < timeout; i++) {
dwTmp = mr32(SPI_CTRL_REG_94XX);
if (!(dwTmp & SPI_CTRL_SpiStart_94XX))
return 0;
msleep(10);
}
return -1;
}
void mvs_94xx_fix_dma(struct mvs_info *mvi, u32 phy_mask,
int buf_len, int from, void *prd)
{
int i;
struct mvs_prd *buf_prd = prd;
dma_addr_t buf_dma;
struct mvs_prd_imt im_len;
*(u32 *)&im_len = 0;
buf_prd += from;
#define PRD_CHAINED_ENTRY 0x01
if ((mvi->pdev->revision == VANIR_A0_REV) ||
(mvi->pdev->revision == VANIR_B0_REV))
buf_dma = (phy_mask <= 0x08) ?
mvi->bulk_buffer_dma : mvi->bulk_buffer_dma1;
else
return;
for (i = from; i < MAX_SG_ENTRY; i++, ++buf_prd) {
if (i == MAX_SG_ENTRY - 1) {
buf_prd->addr = cpu_to_le64(virt_to_phys(buf_prd - 1));
im_len.len = 2;
im_len.misc_ctl = PRD_CHAINED_ENTRY;
} else {
buf_prd->addr = cpu_to_le64(buf_dma);
im_len.len = buf_len;
}
buf_prd->im_len = cpu_to_le32(*(u32 *)&im_len);
}
}
static void mvs_94xx_tune_interrupt(struct mvs_info *mvi, u32 time)
{
void __iomem *regs = mvi->regs;
u32 tmp = 0;
/*
* the max count is 0x1ff, while our max slot is 0x200,
* it will make count 0.
*/
if (time == 0) {
mw32(MVS_INT_COAL, 0);
mw32(MVS_INT_COAL_TMOUT, 0x10000);
} else {
if (MVS_CHIP_SLOT_SZ > 0x1ff)
mw32(MVS_INT_COAL, 0x1ff|COAL_EN);
else
mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ|COAL_EN);
tmp = 0x10000 | time;
mw32(MVS_INT_COAL_TMOUT, tmp);
}
}
const struct mvs_dispatch mvs_94xx_dispatch = {
"mv94xx",
mvs_94xx_init,
NULL,
mvs_94xx_ioremap,
mvs_94xx_iounmap,
mvs_94xx_isr,
mvs_94xx_isr_status,
mvs_94xx_interrupt_enable,
mvs_94xx_interrupt_disable,
mvs_read_phy_ctl,
mvs_write_phy_ctl,
mvs_read_port_cfg_data,
mvs_write_port_cfg_data,
mvs_write_port_cfg_addr,
mvs_read_port_vsr_data,
mvs_write_port_vsr_data,
mvs_write_port_vsr_addr,
mvs_read_port_irq_stat,
mvs_write_port_irq_stat,
mvs_read_port_irq_mask,
mvs_write_port_irq_mask,
mvs_94xx_command_active,
mvs_94xx_clear_srs_irq,
mvs_94xx_issue_stop,
mvs_start_delivery,
mvs_rx_update,
mvs_int_full,
mvs_94xx_assign_reg_set,
mvs_94xx_free_reg_set,
mvs_get_prd_size,
mvs_get_prd_count,
mvs_94xx_make_prd,
mvs_94xx_detect_porttype,
mvs_94xx_oob_done,
mvs_94xx_fix_phy_info,
NULL,
mvs_94xx_phy_set_link_rate,
mvs_hw_max_link_rate,
mvs_94xx_phy_disable,
mvs_94xx_phy_enable,
mvs_94xx_phy_reset,
NULL,
mvs_94xx_clear_active_cmds,
mvs_94xx_spi_read_data,
mvs_94xx_spi_write_data,
mvs_94xx_spi_buildcmd,
mvs_94xx_spi_issuecmd,
mvs_94xx_spi_waitdataready,
mvs_94xx_fix_dma,
mvs_94xx_tune_interrupt,
mvs_94xx_non_spec_ncq_error,
};
| gpl-2.0 |
boa19861105/LeeDrOiD-Hima-M9 | drivers/staging/media/as102/as10x_cmd_stream.c | 7554 | 5401 | /*
* Abilis Systems Single DVB-T Receiver
* Copyright (C) 2008 Pierrick Hascoet <pierrick.hascoet@abilis.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include "as102_drv.h"
#include "as10x_cmd.h"
/**
* as10x_cmd_add_PID_filter - send add filter command to AS10x
* @adap: pointer to AS10x bus adapter
* @filter: TSFilter filter for DVB-T
*
* Return 0 on success or negative value in case of error.
*/
int as10x_cmd_add_PID_filter(struct as10x_bus_adapter_t *adap,
struct as10x_ts_filter *filter)
{
int error;
struct as10x_cmd_t *pcmd, *prsp;
ENTER();
pcmd = adap->cmd;
prsp = adap->rsp;
/* prepare command */
as10x_cmd_build(pcmd, (++adap->cmd_xid),
sizeof(pcmd->body.add_pid_filter.req));
/* fill command */
pcmd->body.add_pid_filter.req.proc_id =
cpu_to_le16(CONTROL_PROC_SETFILTER);
pcmd->body.add_pid_filter.req.pid = cpu_to_le16(filter->pid);
pcmd->body.add_pid_filter.req.stream_type = filter->type;
if (filter->idx < 16)
pcmd->body.add_pid_filter.req.idx = filter->idx;
else
pcmd->body.add_pid_filter.req.idx = 0xFF;
/* send command */
if (adap->ops->xfer_cmd) {
error = adap->ops->xfer_cmd(adap, (uint8_t *) pcmd,
sizeof(pcmd->body.add_pid_filter.req)
+ HEADER_SIZE, (uint8_t *) prsp,
sizeof(prsp->body.add_pid_filter.rsp)
+ HEADER_SIZE);
} else {
error = AS10X_CMD_ERROR;
}
if (error < 0)
goto out;
/* parse response */
error = as10x_rsp_parse(prsp, CONTROL_PROC_SETFILTER_RSP);
if (error == 0) {
/* Response OK -> get response data */
filter->idx = prsp->body.add_pid_filter.rsp.filter_id;
}
out:
LEAVE();
return error;
}
/**
* as10x_cmd_del_PID_filter - Send delete filter command to AS10x
* @adap: pointer to AS10x bus adapte
* @pid_value: PID to delete
*
* Return 0 on success or negative value in case of error.
*/
int as10x_cmd_del_PID_filter(struct as10x_bus_adapter_t *adap,
uint16_t pid_value)
{
int error;
struct as10x_cmd_t *pcmd, *prsp;
ENTER();
pcmd = adap->cmd;
prsp = adap->rsp;
/* prepare command */
as10x_cmd_build(pcmd, (++adap->cmd_xid),
sizeof(pcmd->body.del_pid_filter.req));
/* fill command */
pcmd->body.del_pid_filter.req.proc_id =
cpu_to_le16(CONTROL_PROC_REMOVEFILTER);
pcmd->body.del_pid_filter.req.pid = cpu_to_le16(pid_value);
/* send command */
if (adap->ops->xfer_cmd) {
error = adap->ops->xfer_cmd(adap, (uint8_t *) pcmd,
sizeof(pcmd->body.del_pid_filter.req)
+ HEADER_SIZE, (uint8_t *) prsp,
sizeof(prsp->body.del_pid_filter.rsp)
+ HEADER_SIZE);
} else {
error = AS10X_CMD_ERROR;
}
if (error < 0)
goto out;
/* parse response */
error = as10x_rsp_parse(prsp, CONTROL_PROC_REMOVEFILTER_RSP);
out:
LEAVE();
return error;
}
/**
* as10x_cmd_start_streaming - Send start streaming command to AS10x
* @adap: pointer to AS10x bus adapter
*
* Return 0 on success or negative value in case of error.
*/
int as10x_cmd_start_streaming(struct as10x_bus_adapter_t *adap)
{
int error;
struct as10x_cmd_t *pcmd, *prsp;
ENTER();
pcmd = adap->cmd;
prsp = adap->rsp;
/* prepare command */
as10x_cmd_build(pcmd, (++adap->cmd_xid),
sizeof(pcmd->body.start_streaming.req));
/* fill command */
pcmd->body.start_streaming.req.proc_id =
cpu_to_le16(CONTROL_PROC_START_STREAMING);
/* send command */
if (adap->ops->xfer_cmd) {
error = adap->ops->xfer_cmd(adap, (uint8_t *) pcmd,
sizeof(pcmd->body.start_streaming.req)
+ HEADER_SIZE, (uint8_t *) prsp,
sizeof(prsp->body.start_streaming.rsp)
+ HEADER_SIZE);
} else {
error = AS10X_CMD_ERROR;
}
if (error < 0)
goto out;
/* parse response */
error = as10x_rsp_parse(prsp, CONTROL_PROC_START_STREAMING_RSP);
out:
LEAVE();
return error;
}
/**
* as10x_cmd_stop_streaming - Send stop streaming command to AS10x
* @adap: pointer to AS10x bus adapter
*
* Return 0 on success or negative value in case of error.
*/
int as10x_cmd_stop_streaming(struct as10x_bus_adapter_t *adap)
{
int8_t error;
struct as10x_cmd_t *pcmd, *prsp;
ENTER();
pcmd = adap->cmd;
prsp = adap->rsp;
/* prepare command */
as10x_cmd_build(pcmd, (++adap->cmd_xid),
sizeof(pcmd->body.stop_streaming.req));
/* fill command */
pcmd->body.stop_streaming.req.proc_id =
cpu_to_le16(CONTROL_PROC_STOP_STREAMING);
/* send command */
if (adap->ops->xfer_cmd) {
error = adap->ops->xfer_cmd(adap, (uint8_t *) pcmd,
sizeof(pcmd->body.stop_streaming.req)
+ HEADER_SIZE, (uint8_t *) prsp,
sizeof(prsp->body.stop_streaming.rsp)
+ HEADER_SIZE);
} else {
error = AS10X_CMD_ERROR;
}
if (error < 0)
goto out;
/* parse response */
error = as10x_rsp_parse(prsp, CONTROL_PROC_STOP_STREAMING_RSP);
out:
LEAVE();
return error;
}
| gpl-2.0 |
Jovy23/M919_Kernel | fs/ufs/util.c | 8322 | 6213 | /*
* linux/fs/ufs/util.c
*
* Copyright (C) 1998
* Daniel Pirkl <daniel.pirkl@email.cz>
* Charles University, Faculty of Mathematics and Physics
*/
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/buffer_head.h>
#include "ufs_fs.h"
#include "ufs.h"
#include "swab.h"
#include "util.h"
struct ufs_buffer_head * _ubh_bread_ (struct ufs_sb_private_info * uspi,
struct super_block *sb, u64 fragment, u64 size)
{
struct ufs_buffer_head * ubh;
unsigned i, j ;
u64 count = 0;
if (size & ~uspi->s_fmask)
return NULL;
count = size >> uspi->s_fshift;
if (count > UFS_MAXFRAG)
return NULL;
ubh = (struct ufs_buffer_head *)
kmalloc (sizeof (struct ufs_buffer_head), GFP_NOFS);
if (!ubh)
return NULL;
ubh->fragment = fragment;
ubh->count = count;
for (i = 0; i < count; i++)
if (!(ubh->bh[i] = sb_bread(sb, fragment + i)))
goto failed;
for (; i < UFS_MAXFRAG; i++)
ubh->bh[i] = NULL;
return ubh;
failed:
for (j = 0; j < i; j++)
brelse (ubh->bh[j]);
kfree(ubh);
return NULL;
}
struct ufs_buffer_head * ubh_bread_uspi (struct ufs_sb_private_info * uspi,
struct super_block *sb, u64 fragment, u64 size)
{
unsigned i, j;
u64 count = 0;
if (size & ~uspi->s_fmask)
return NULL;
count = size >> uspi->s_fshift;
if (count <= 0 || count > UFS_MAXFRAG)
return NULL;
USPI_UBH(uspi)->fragment = fragment;
USPI_UBH(uspi)->count = count;
for (i = 0; i < count; i++)
if (!(USPI_UBH(uspi)->bh[i] = sb_bread(sb, fragment + i)))
goto failed;
for (; i < UFS_MAXFRAG; i++)
USPI_UBH(uspi)->bh[i] = NULL;
return USPI_UBH(uspi);
failed:
for (j = 0; j < i; j++)
brelse (USPI_UBH(uspi)->bh[j]);
return NULL;
}
void ubh_brelse (struct ufs_buffer_head * ubh)
{
unsigned i;
if (!ubh)
return;
for (i = 0; i < ubh->count; i++)
brelse (ubh->bh[i]);
kfree (ubh);
}
void ubh_brelse_uspi (struct ufs_sb_private_info * uspi)
{
unsigned i;
if (!USPI_UBH(uspi))
return;
for ( i = 0; i < USPI_UBH(uspi)->count; i++ ) {
brelse (USPI_UBH(uspi)->bh[i]);
USPI_UBH(uspi)->bh[i] = NULL;
}
}
void ubh_mark_buffer_dirty (struct ufs_buffer_head * ubh)
{
unsigned i;
if (!ubh)
return;
for ( i = 0; i < ubh->count; i++ )
mark_buffer_dirty (ubh->bh[i]);
}
void ubh_mark_buffer_uptodate (struct ufs_buffer_head * ubh, int flag)
{
unsigned i;
if (!ubh)
return;
if (flag) {
for ( i = 0; i < ubh->count; i++ )
set_buffer_uptodate (ubh->bh[i]);
} else {
for ( i = 0; i < ubh->count; i++ )
clear_buffer_uptodate (ubh->bh[i]);
}
}
void ubh_sync_block(struct ufs_buffer_head *ubh)
{
if (ubh) {
unsigned i;
for (i = 0; i < ubh->count; i++)
write_dirty_buffer(ubh->bh[i], WRITE);
for (i = 0; i < ubh->count; i++)
wait_on_buffer(ubh->bh[i]);
}
}
void ubh_bforget (struct ufs_buffer_head * ubh)
{
unsigned i;
if (!ubh)
return;
for ( i = 0; i < ubh->count; i++ ) if ( ubh->bh[i] )
bforget (ubh->bh[i]);
}
int ubh_buffer_dirty (struct ufs_buffer_head * ubh)
{
unsigned i;
unsigned result = 0;
if (!ubh)
return 0;
for ( i = 0; i < ubh->count; i++ )
result |= buffer_dirty(ubh->bh[i]);
return result;
}
void _ubh_ubhcpymem_(struct ufs_sb_private_info * uspi,
unsigned char * mem, struct ufs_buffer_head * ubh, unsigned size)
{
unsigned len, bhno;
if (size > (ubh->count << uspi->s_fshift))
size = ubh->count << uspi->s_fshift;
bhno = 0;
while (size) {
len = min_t(unsigned int, size, uspi->s_fsize);
memcpy (mem, ubh->bh[bhno]->b_data, len);
mem += uspi->s_fsize;
size -= len;
bhno++;
}
}
void _ubh_memcpyubh_(struct ufs_sb_private_info * uspi,
struct ufs_buffer_head * ubh, unsigned char * mem, unsigned size)
{
unsigned len, bhno;
if (size > (ubh->count << uspi->s_fshift))
size = ubh->count << uspi->s_fshift;
bhno = 0;
while (size) {
len = min_t(unsigned int, size, uspi->s_fsize);
memcpy (ubh->bh[bhno]->b_data, mem, len);
mem += uspi->s_fsize;
size -= len;
bhno++;
}
}
dev_t
ufs_get_inode_dev(struct super_block *sb, struct ufs_inode_info *ufsi)
{
__u32 fs32;
dev_t dev;
if ((UFS_SB(sb)->s_flags & UFS_ST_MASK) == UFS_ST_SUNx86)
fs32 = fs32_to_cpu(sb, ufsi->i_u1.i_data[1]);
else
fs32 = fs32_to_cpu(sb, ufsi->i_u1.i_data[0]);
switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
case UFS_ST_SUNx86:
case UFS_ST_SUN:
if ((fs32 & 0xffff0000) == 0 ||
(fs32 & 0xffff0000) == 0xffff0000)
dev = old_decode_dev(fs32 & 0x7fff);
else
dev = MKDEV(sysv_major(fs32), sysv_minor(fs32));
break;
default:
dev = old_decode_dev(fs32);
break;
}
return dev;
}
void
ufs_set_inode_dev(struct super_block *sb, struct ufs_inode_info *ufsi, dev_t dev)
{
__u32 fs32;
switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
case UFS_ST_SUNx86:
case UFS_ST_SUN:
fs32 = sysv_encode_dev(dev);
if ((fs32 & 0xffff8000) == 0) {
fs32 = old_encode_dev(dev);
}
break;
default:
fs32 = old_encode_dev(dev);
break;
}
if ((UFS_SB(sb)->s_flags & UFS_ST_MASK) == UFS_ST_SUNx86)
ufsi->i_u1.i_data[1] = cpu_to_fs32(sb, fs32);
else
ufsi->i_u1.i_data[0] = cpu_to_fs32(sb, fs32);
}
/**
* ufs_get_locked_page() - locate, pin and lock a pagecache page, if not exist
* read it from disk.
* @mapping: the address_space to search
* @index: the page index
*
* Locates the desired pagecache page, if not exist we'll read it,
* locks it, increments its reference
* count and returns its address.
*
*/
struct page *ufs_get_locked_page(struct address_space *mapping,
pgoff_t index)
{
struct page *page;
page = find_lock_page(mapping, index);
if (!page) {
page = read_mapping_page(mapping, index, NULL);
if (IS_ERR(page)) {
printk(KERN_ERR "ufs_change_blocknr: "
"read_mapping_page error: ino %lu, index: %lu\n",
mapping->host->i_ino, index);
goto out;
}
lock_page(page);
if (unlikely(page->mapping == NULL)) {
/* Truncate got there first */
unlock_page(page);
page_cache_release(page);
page = NULL;
goto out;
}
if (!PageUptodate(page) || PageError(page)) {
unlock_page(page);
page_cache_release(page);
printk(KERN_ERR "ufs_change_blocknr: "
"can not read page: ino %lu, index: %lu\n",
mapping->host->i_ino, index);
page = ERR_PTR(-EIO);
}
}
out:
return page;
}
| gpl-2.0 |
ruleless/linux | arch/sh/kernel/dma-nommu.c | 8834 | 1912 | /*
* DMA mapping support for platforms lacking IOMMUs.
*
* Copyright (C) 2009 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/dma-mapping.h>
#include <linux/io.h>
static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
dma_addr_t addr = page_to_phys(page) + offset;
WARN_ON(size == 0);
dma_cache_sync(dev, page_address(page) + offset, size, dir);
return addr;
}
static int nommu_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
struct scatterlist *s;
int i;
WARN_ON(nents == 0 || sg[0].length == 0);
for_each_sg(sg, s, nents, i) {
BUG_ON(!sg_page(s));
dma_cache_sync(dev, sg_virt(s), s->length, dir);
s->dma_address = sg_phys(s);
s->dma_length = s->length;
}
return nents;
}
#ifdef CONFIG_DMA_NONCOHERENT
static void nommu_sync_single(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir)
{
dma_cache_sync(dev, phys_to_virt(addr), size, dir);
}
static void nommu_sync_sg(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir)
{
struct scatterlist *s;
int i;
for_each_sg(sg, s, nelems, i)
dma_cache_sync(dev, sg_virt(s), s->length, dir);
}
#endif
struct dma_map_ops nommu_dma_ops = {
.alloc = dma_generic_alloc_coherent,
.free = dma_generic_free_coherent,
.map_page = nommu_map_page,
.map_sg = nommu_map_sg,
#ifdef CONFIG_DMA_NONCOHERENT
.sync_single_for_device = nommu_sync_single,
.sync_sg_for_device = nommu_sync_sg,
#endif
.is_phys = 1,
};
void __init no_iommu_init(void)
{
if (dma_ops)
return;
dma_ops = &nommu_dma_ops;
}
| gpl-2.0 |
jsgage/android_kernel_ba2x | net/rxrpc/rxkad.c | 9090 | 27965 | /* Kerberos-based RxRPC security
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/net.h>
#include <linux/skbuff.h>
#include <linux/udp.h>
#include <linux/crypto.h>
#include <linux/scatterlist.h>
#include <linux/ctype.h>
#include <linux/slab.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include <keys/rxrpc-type.h>
#define rxrpc_debug rxkad_debug
#include "ar-internal.h"
#define RXKAD_VERSION 2
#define MAXKRB5TICKETLEN 1024
#define RXKAD_TKT_TYPE_KERBEROS_V5 256
#define ANAME_SZ 40 /* size of authentication name */
#define INST_SZ 40 /* size of principal's instance */
#define REALM_SZ 40 /* size of principal's auth domain */
#define SNAME_SZ 40 /* size of service name */
unsigned rxrpc_debug;
module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(debug, "rxkad debugging mask");
struct rxkad_level1_hdr {
__be32 data_size; /* true data size (excluding padding) */
};
struct rxkad_level2_hdr {
__be32 data_size; /* true data size (excluding padding) */
__be32 checksum; /* decrypted data checksum */
};
MODULE_DESCRIPTION("RxRPC network protocol type-2 security (Kerberos 4)");
MODULE_AUTHOR("Red Hat, Inc.");
MODULE_LICENSE("GPL");
/*
* this holds a pinned cipher so that keventd doesn't get called by the cipher
* alloc routine, but since we have it to hand, we use it to decrypt RESPONSE
* packets
*/
static struct crypto_blkcipher *rxkad_ci;
static DEFINE_MUTEX(rxkad_ci_mutex);
/*
* initialise connection security
*/
static int rxkad_init_connection_security(struct rxrpc_connection *conn)
{
struct crypto_blkcipher *ci;
struct rxrpc_key_token *token;
int ret;
_enter("{%d},{%x}", conn->debug_id, key_serial(conn->key));
token = conn->key->payload.data;
conn->security_ix = token->security_index;
ci = crypto_alloc_blkcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(ci)) {
_debug("no cipher");
ret = PTR_ERR(ci);
goto error;
}
if (crypto_blkcipher_setkey(ci, token->kad->session_key,
sizeof(token->kad->session_key)) < 0)
BUG();
switch (conn->security_level) {
case RXRPC_SECURITY_PLAIN:
break;
case RXRPC_SECURITY_AUTH:
conn->size_align = 8;
conn->security_size = sizeof(struct rxkad_level1_hdr);
conn->header_size += sizeof(struct rxkad_level1_hdr);
break;
case RXRPC_SECURITY_ENCRYPT:
conn->size_align = 8;
conn->security_size = sizeof(struct rxkad_level2_hdr);
conn->header_size += sizeof(struct rxkad_level2_hdr);
break;
default:
ret = -EKEYREJECTED;
goto error;
}
conn->cipher = ci;
ret = 0;
error:
_leave(" = %d", ret);
return ret;
}
/*
* prime the encryption state with the invariant parts of a connection's
* description
*/
static void rxkad_prime_packet_security(struct rxrpc_connection *conn)
{
struct rxrpc_key_token *token;
struct blkcipher_desc desc;
struct scatterlist sg[2];
struct rxrpc_crypt iv;
struct {
__be32 x[4];
} tmpbuf __attribute__((aligned(16))); /* must all be in same page */
_enter("");
if (!conn->key)
return;
token = conn->key->payload.data;
memcpy(&iv, token->kad->session_key, sizeof(iv));
desc.tfm = conn->cipher;
desc.info = iv.x;
desc.flags = 0;
tmpbuf.x[0] = conn->epoch;
tmpbuf.x[1] = conn->cid;
tmpbuf.x[2] = 0;
tmpbuf.x[3] = htonl(conn->security_ix);
sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf));
sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf));
crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf));
memcpy(&conn->csum_iv, &tmpbuf.x[2], sizeof(conn->csum_iv));
ASSERTCMP(conn->csum_iv.n[0], ==, tmpbuf.x[2]);
_leave("");
}
/*
* partially encrypt a packet (level 1 security)
*/
static int rxkad_secure_packet_auth(const struct rxrpc_call *call,
struct sk_buff *skb,
u32 data_size,
void *sechdr)
{
struct rxrpc_skb_priv *sp;
struct blkcipher_desc desc;
struct rxrpc_crypt iv;
struct scatterlist sg[2];
struct {
struct rxkad_level1_hdr hdr;
__be32 first; /* first four bytes of data and padding */
} tmpbuf __attribute__((aligned(8))); /* must all be in same page */
u16 check;
sp = rxrpc_skb(skb);
_enter("");
check = ntohl(sp->hdr.seq ^ sp->hdr.callNumber);
data_size |= (u32) check << 16;
tmpbuf.hdr.data_size = htonl(data_size);
memcpy(&tmpbuf.first, sechdr + 4, sizeof(tmpbuf.first));
/* start the encryption afresh */
memset(&iv, 0, sizeof(iv));
desc.tfm = call->conn->cipher;
desc.info = iv.x;
desc.flags = 0;
sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf));
sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf));
crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf));
memcpy(sechdr, &tmpbuf, sizeof(tmpbuf));
_leave(" = 0");
return 0;
}
/*
* wholly encrypt a packet (level 2 security)
*/
static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
struct sk_buff *skb,
u32 data_size,
void *sechdr)
{
const struct rxrpc_key_token *token;
struct rxkad_level2_hdr rxkhdr
__attribute__((aligned(8))); /* must be all on one page */
struct rxrpc_skb_priv *sp;
struct blkcipher_desc desc;
struct rxrpc_crypt iv;
struct scatterlist sg[16];
struct sk_buff *trailer;
unsigned len;
u16 check;
int nsg;
sp = rxrpc_skb(skb);
_enter("");
check = ntohl(sp->hdr.seq ^ sp->hdr.callNumber);
rxkhdr.data_size = htonl(data_size | (u32) check << 16);
rxkhdr.checksum = 0;
/* encrypt from the session key */
token = call->conn->key->payload.data;
memcpy(&iv, token->kad->session_key, sizeof(iv));
desc.tfm = call->conn->cipher;
desc.info = iv.x;
desc.flags = 0;
sg_init_one(&sg[0], sechdr, sizeof(rxkhdr));
sg_init_one(&sg[1], &rxkhdr, sizeof(rxkhdr));
crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(rxkhdr));
/* we want to encrypt the skbuff in-place */
nsg = skb_cow_data(skb, 0, &trailer);
if (nsg < 0 || nsg > 16)
return -ENOMEM;
len = data_size + call->conn->size_align - 1;
len &= ~(call->conn->size_align - 1);
sg_init_table(sg, nsg);
skb_to_sgvec(skb, sg, 0, len);
crypto_blkcipher_encrypt_iv(&desc, sg, sg, len);
_leave(" = 0");
return 0;
}
/*
* checksum an RxRPC packet header
*/
static int rxkad_secure_packet(const struct rxrpc_call *call,
struct sk_buff *skb,
size_t data_size,
void *sechdr)
{
struct rxrpc_skb_priv *sp;
struct blkcipher_desc desc;
struct rxrpc_crypt iv;
struct scatterlist sg[2];
struct {
__be32 x[2];
} tmpbuf __attribute__((aligned(8))); /* must all be in same page */
__be32 x;
u32 y;
int ret;
sp = rxrpc_skb(skb);
_enter("{%d{%x}},{#%u},%zu,",
call->debug_id, key_serial(call->conn->key), ntohl(sp->hdr.seq),
data_size);
if (!call->conn->cipher)
return 0;
ret = key_validate(call->conn->key);
if (ret < 0)
return ret;
/* continue encrypting from where we left off */
memcpy(&iv, call->conn->csum_iv.x, sizeof(iv));
desc.tfm = call->conn->cipher;
desc.info = iv.x;
desc.flags = 0;
/* calculate the security checksum */
x = htonl(call->channel << (32 - RXRPC_CIDSHIFT));
x |= sp->hdr.seq & cpu_to_be32(0x3fffffff);
tmpbuf.x[0] = sp->hdr.callNumber;
tmpbuf.x[1] = x;
sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf));
sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf));
crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf));
y = ntohl(tmpbuf.x[1]);
y = (y >> 16) & 0xffff;
if (y == 0)
y = 1; /* zero checksums are not permitted */
sp->hdr.cksum = htons(y);
switch (call->conn->security_level) {
case RXRPC_SECURITY_PLAIN:
ret = 0;
break;
case RXRPC_SECURITY_AUTH:
ret = rxkad_secure_packet_auth(call, skb, data_size, sechdr);
break;
case RXRPC_SECURITY_ENCRYPT:
ret = rxkad_secure_packet_encrypt(call, skb, data_size,
sechdr);
break;
default:
ret = -EPERM;
break;
}
_leave(" = %d [set %hx]", ret, y);
return ret;
}
/*
* decrypt partial encryption on a packet (level 1 security)
*/
static int rxkad_verify_packet_auth(const struct rxrpc_call *call,
struct sk_buff *skb,
u32 *_abort_code)
{
struct rxkad_level1_hdr sechdr;
struct rxrpc_skb_priv *sp;
struct blkcipher_desc desc;
struct rxrpc_crypt iv;
struct scatterlist sg[16];
struct sk_buff *trailer;
u32 data_size, buf;
u16 check;
int nsg;
_enter("");
sp = rxrpc_skb(skb);
/* we want to decrypt the skbuff in-place */
nsg = skb_cow_data(skb, 0, &trailer);
if (nsg < 0 || nsg > 16)
goto nomem;
sg_init_table(sg, nsg);
skb_to_sgvec(skb, sg, 0, 8);
/* start the decryption afresh */
memset(&iv, 0, sizeof(iv));
desc.tfm = call->conn->cipher;
desc.info = iv.x;
desc.flags = 0;
crypto_blkcipher_decrypt_iv(&desc, sg, sg, 8);
/* remove the decrypted packet length */
if (skb_copy_bits(skb, 0, &sechdr, sizeof(sechdr)) < 0)
goto datalen_error;
if (!skb_pull(skb, sizeof(sechdr)))
BUG();
buf = ntohl(sechdr.data_size);
data_size = buf & 0xffff;
check = buf >> 16;
check ^= ntohl(sp->hdr.seq ^ sp->hdr.callNumber);
check &= 0xffff;
if (check != 0) {
*_abort_code = RXKADSEALEDINCON;
goto protocol_error;
}
/* shorten the packet to remove the padding */
if (data_size > skb->len)
goto datalen_error;
else if (data_size < skb->len)
skb->len = data_size;
_leave(" = 0 [dlen=%x]", data_size);
return 0;
datalen_error:
*_abort_code = RXKADDATALEN;
protocol_error:
_leave(" = -EPROTO");
return -EPROTO;
nomem:
_leave(" = -ENOMEM");
return -ENOMEM;
}
/*
* wholly decrypt a packet (level 2 security)
*/
static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call,
struct sk_buff *skb,
u32 *_abort_code)
{
const struct rxrpc_key_token *token;
struct rxkad_level2_hdr sechdr;
struct rxrpc_skb_priv *sp;
struct blkcipher_desc desc;
struct rxrpc_crypt iv;
struct scatterlist _sg[4], *sg;
struct sk_buff *trailer;
u32 data_size, buf;
u16 check;
int nsg;
_enter(",{%d}", skb->len);
sp = rxrpc_skb(skb);
/* we want to decrypt the skbuff in-place */
nsg = skb_cow_data(skb, 0, &trailer);
if (nsg < 0)
goto nomem;
sg = _sg;
if (unlikely(nsg > 4)) {
sg = kmalloc(sizeof(*sg) * nsg, GFP_NOIO);
if (!sg)
goto nomem;
}
sg_init_table(sg, nsg);
skb_to_sgvec(skb, sg, 0, skb->len);
/* decrypt from the session key */
token = call->conn->key->payload.data;
memcpy(&iv, token->kad->session_key, sizeof(iv));
desc.tfm = call->conn->cipher;
desc.info = iv.x;
desc.flags = 0;
crypto_blkcipher_decrypt_iv(&desc, sg, sg, skb->len);
if (sg != _sg)
kfree(sg);
/* remove the decrypted packet length */
if (skb_copy_bits(skb, 0, &sechdr, sizeof(sechdr)) < 0)
goto datalen_error;
if (!skb_pull(skb, sizeof(sechdr)))
BUG();
buf = ntohl(sechdr.data_size);
data_size = buf & 0xffff;
check = buf >> 16;
check ^= ntohl(sp->hdr.seq ^ sp->hdr.callNumber);
check &= 0xffff;
if (check != 0) {
*_abort_code = RXKADSEALEDINCON;
goto protocol_error;
}
/* shorten the packet to remove the padding */
if (data_size > skb->len)
goto datalen_error;
else if (data_size < skb->len)
skb->len = data_size;
_leave(" = 0 [dlen=%x]", data_size);
return 0;
datalen_error:
*_abort_code = RXKADDATALEN;
protocol_error:
_leave(" = -EPROTO");
return -EPROTO;
nomem:
_leave(" = -ENOMEM");
return -ENOMEM;
}
/*
* verify the security on a received packet
*/
static int rxkad_verify_packet(const struct rxrpc_call *call,
struct sk_buff *skb,
u32 *_abort_code)
{
struct blkcipher_desc desc;
struct rxrpc_skb_priv *sp;
struct rxrpc_crypt iv;
struct scatterlist sg[2];
struct {
__be32 x[2];
} tmpbuf __attribute__((aligned(8))); /* must all be in same page */
__be32 x;
__be16 cksum;
u32 y;
int ret;
sp = rxrpc_skb(skb);
_enter("{%d{%x}},{#%u}",
call->debug_id, key_serial(call->conn->key),
ntohl(sp->hdr.seq));
if (!call->conn->cipher)
return 0;
if (sp->hdr.securityIndex != RXRPC_SECURITY_RXKAD) {
*_abort_code = RXKADINCONSISTENCY;
_leave(" = -EPROTO [not rxkad]");
return -EPROTO;
}
/* continue encrypting from where we left off */
memcpy(&iv, call->conn->csum_iv.x, sizeof(iv));
desc.tfm = call->conn->cipher;
desc.info = iv.x;
desc.flags = 0;
/* validate the security checksum */
x = htonl(call->channel << (32 - RXRPC_CIDSHIFT));
x |= sp->hdr.seq & cpu_to_be32(0x3fffffff);
tmpbuf.x[0] = call->call_id;
tmpbuf.x[1] = x;
sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf));
sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf));
crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf));
y = ntohl(tmpbuf.x[1]);
y = (y >> 16) & 0xffff;
if (y == 0)
y = 1; /* zero checksums are not permitted */
cksum = htons(y);
if (sp->hdr.cksum != cksum) {
*_abort_code = RXKADSEALEDINCON;
_leave(" = -EPROTO [csum failed]");
return -EPROTO;
}
switch (call->conn->security_level) {
case RXRPC_SECURITY_PLAIN:
ret = 0;
break;
case RXRPC_SECURITY_AUTH:
ret = rxkad_verify_packet_auth(call, skb, _abort_code);
break;
case RXRPC_SECURITY_ENCRYPT:
ret = rxkad_verify_packet_encrypt(call, skb, _abort_code);
break;
default:
ret = -ENOANO;
break;
}
_leave(" = %d", ret);
return ret;
}
/*
* issue a challenge
*/
static int rxkad_issue_challenge(struct rxrpc_connection *conn)
{
struct rxkad_challenge challenge;
struct rxrpc_header hdr;
struct msghdr msg;
struct kvec iov[2];
size_t len;
int ret;
_enter("{%d,%x}", conn->debug_id, key_serial(conn->key));
ret = key_validate(conn->key);
if (ret < 0)
return ret;
get_random_bytes(&conn->security_nonce, sizeof(conn->security_nonce));
challenge.version = htonl(2);
challenge.nonce = htonl(conn->security_nonce);
challenge.min_level = htonl(0);
challenge.__padding = 0;
msg.msg_name = &conn->trans->peer->srx.transport.sin;
msg.msg_namelen = sizeof(conn->trans->peer->srx.transport.sin);
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_flags = 0;
hdr.epoch = conn->epoch;
hdr.cid = conn->cid;
hdr.callNumber = 0;
hdr.seq = 0;
hdr.type = RXRPC_PACKET_TYPE_CHALLENGE;
hdr.flags = conn->out_clientflag;
hdr.userStatus = 0;
hdr.securityIndex = conn->security_ix;
hdr._rsvd = 0;
hdr.serviceId = conn->service_id;
iov[0].iov_base = &hdr;
iov[0].iov_len = sizeof(hdr);
iov[1].iov_base = &challenge;
iov[1].iov_len = sizeof(challenge);
len = iov[0].iov_len + iov[1].iov_len;
hdr.serial = htonl(atomic_inc_return(&conn->serial));
_proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
if (ret < 0) {
_debug("sendmsg failed: %d", ret);
return -EAGAIN;
}
_leave(" = 0");
return 0;
}
/*
* send a Kerberos security response
*/
static int rxkad_send_response(struct rxrpc_connection *conn,
struct rxrpc_header *hdr,
struct rxkad_response *resp,
const struct rxkad_key *s2)
{
struct msghdr msg;
struct kvec iov[3];
size_t len;
int ret;
_enter("");
msg.msg_name = &conn->trans->peer->srx.transport.sin;
msg.msg_namelen = sizeof(conn->trans->peer->srx.transport.sin);
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_flags = 0;
hdr->epoch = conn->epoch;
hdr->seq = 0;
hdr->type = RXRPC_PACKET_TYPE_RESPONSE;
hdr->flags = conn->out_clientflag;
hdr->userStatus = 0;
hdr->_rsvd = 0;
iov[0].iov_base = hdr;
iov[0].iov_len = sizeof(*hdr);
iov[1].iov_base = resp;
iov[1].iov_len = sizeof(*resp);
iov[2].iov_base = (void *) s2->ticket;
iov[2].iov_len = s2->ticket_len;
len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
hdr->serial = htonl(atomic_inc_return(&conn->serial));
_proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
if (ret < 0) {
_debug("sendmsg failed: %d", ret);
return -EAGAIN;
}
_leave(" = 0");
return 0;
}
/*
* calculate the response checksum
*/
static void rxkad_calc_response_checksum(struct rxkad_response *response)
{
u32 csum = 1000003;
int loop;
u8 *p = (u8 *) response;
for (loop = sizeof(*response); loop > 0; loop--)
csum = csum * 0x10204081 + *p++;
response->encrypted.checksum = htonl(csum);
}
/*
* load a scatterlist with a potentially split-page buffer
*/
static void rxkad_sg_set_buf2(struct scatterlist sg[2],
void *buf, size_t buflen)
{
int nsg = 1;
sg_init_table(sg, 2);
sg_set_buf(&sg[0], buf, buflen);
if (sg[0].offset + buflen > PAGE_SIZE) {
/* the buffer was split over two pages */
sg[0].length = PAGE_SIZE - sg[0].offset;
sg_set_buf(&sg[1], buf + sg[0].length, buflen - sg[0].length);
nsg++;
}
sg_mark_end(&sg[nsg - 1]);
ASSERTCMP(sg[0].length + sg[1].length, ==, buflen);
}
/*
* encrypt the response packet
*/
static void rxkad_encrypt_response(struct rxrpc_connection *conn,
struct rxkad_response *resp,
const struct rxkad_key *s2)
{
struct blkcipher_desc desc;
struct rxrpc_crypt iv;
struct scatterlist sg[2];
/* continue encrypting from where we left off */
memcpy(&iv, s2->session_key, sizeof(iv));
desc.tfm = conn->cipher;
desc.info = iv.x;
desc.flags = 0;
rxkad_sg_set_buf2(sg, &resp->encrypted, sizeof(resp->encrypted));
crypto_blkcipher_encrypt_iv(&desc, sg, sg, sizeof(resp->encrypted));
}
/*
* respond to a challenge packet
*/
static int rxkad_respond_to_challenge(struct rxrpc_connection *conn,
struct sk_buff *skb,
u32 *_abort_code)
{
const struct rxrpc_key_token *token;
struct rxkad_challenge challenge;
struct rxkad_response resp
__attribute__((aligned(8))); /* must be aligned for crypto */
struct rxrpc_skb_priv *sp;
u32 version, nonce, min_level, abort_code;
int ret;
_enter("{%d,%x}", conn->debug_id, key_serial(conn->key));
if (!conn->key) {
_leave(" = -EPROTO [no key]");
return -EPROTO;
}
ret = key_validate(conn->key);
if (ret < 0) {
*_abort_code = RXKADEXPIRED;
return ret;
}
abort_code = RXKADPACKETSHORT;
sp = rxrpc_skb(skb);
if (skb_copy_bits(skb, 0, &challenge, sizeof(challenge)) < 0)
goto protocol_error;
version = ntohl(challenge.version);
nonce = ntohl(challenge.nonce);
min_level = ntohl(challenge.min_level);
_proto("Rx CHALLENGE %%%u { v=%u n=%u ml=%u }",
ntohl(sp->hdr.serial), version, nonce, min_level);
abort_code = RXKADINCONSISTENCY;
if (version != RXKAD_VERSION)
goto protocol_error;
abort_code = RXKADLEVELFAIL;
if (conn->security_level < min_level)
goto protocol_error;
token = conn->key->payload.data;
/* build the response packet */
memset(&resp, 0, sizeof(resp));
resp.version = RXKAD_VERSION;
resp.encrypted.epoch = conn->epoch;
resp.encrypted.cid = conn->cid;
resp.encrypted.securityIndex = htonl(conn->security_ix);
resp.encrypted.call_id[0] =
(conn->channels[0] ? conn->channels[0]->call_id : 0);
resp.encrypted.call_id[1] =
(conn->channels[1] ? conn->channels[1]->call_id : 0);
resp.encrypted.call_id[2] =
(conn->channels[2] ? conn->channels[2]->call_id : 0);
resp.encrypted.call_id[3] =
(conn->channels[3] ? conn->channels[3]->call_id : 0);
resp.encrypted.inc_nonce = htonl(nonce + 1);
resp.encrypted.level = htonl(conn->security_level);
resp.kvno = htonl(token->kad->kvno);
resp.ticket_len = htonl(token->kad->ticket_len);
/* calculate the response checksum and then do the encryption */
rxkad_calc_response_checksum(&resp);
rxkad_encrypt_response(conn, &resp, token->kad);
return rxkad_send_response(conn, &sp->hdr, &resp, token->kad);
protocol_error:
*_abort_code = abort_code;
_leave(" = -EPROTO [%d]", abort_code);
return -EPROTO;
}
/*
* decrypt the kerberos IV ticket in the response
*/
static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
void *ticket, size_t ticket_len,
struct rxrpc_crypt *_session_key,
time_t *_expiry,
u32 *_abort_code)
{
struct blkcipher_desc desc;
struct rxrpc_crypt iv, key;
struct scatterlist sg[1];
struct in_addr addr;
unsigned life;
time_t issue, now;
bool little_endian;
int ret;
u8 *p, *q, *name, *end;
_enter("{%d},{%x}", conn->debug_id, key_serial(conn->server_key));
*_expiry = 0;
ret = key_validate(conn->server_key);
if (ret < 0) {
switch (ret) {
case -EKEYEXPIRED:
*_abort_code = RXKADEXPIRED;
goto error;
default:
*_abort_code = RXKADNOAUTH;
goto error;
}
}
ASSERT(conn->server_key->payload.data != NULL);
ASSERTCMP((unsigned long) ticket & 7UL, ==, 0);
memcpy(&iv, &conn->server_key->type_data, sizeof(iv));
desc.tfm = conn->server_key->payload.data;
desc.info = iv.x;
desc.flags = 0;
sg_init_one(&sg[0], ticket, ticket_len);
crypto_blkcipher_decrypt_iv(&desc, sg, sg, ticket_len);
p = ticket;
end = p + ticket_len;
#define Z(size) \
({ \
u8 *__str = p; \
q = memchr(p, 0, end - p); \
if (!q || q - p > (size)) \
goto bad_ticket; \
for (; p < q; p++) \
if (!isprint(*p)) \
goto bad_ticket; \
p++; \
__str; \
})
/* extract the ticket flags */
_debug("KIV FLAGS: %x", *p);
little_endian = *p & 1;
p++;
/* extract the authentication name */
name = Z(ANAME_SZ);
_debug("KIV ANAME: %s", name);
/* extract the principal's instance */
name = Z(INST_SZ);
_debug("KIV INST : %s", name);
/* extract the principal's authentication domain */
name = Z(REALM_SZ);
_debug("KIV REALM: %s", name);
if (end - p < 4 + 8 + 4 + 2)
goto bad_ticket;
/* get the IPv4 address of the entity that requested the ticket */
memcpy(&addr, p, sizeof(addr));
p += 4;
_debug("KIV ADDR : %pI4", &addr);
/* get the session key from the ticket */
memcpy(&key, p, sizeof(key));
p += 8;
_debug("KIV KEY : %08x %08x", ntohl(key.n[0]), ntohl(key.n[1]));
memcpy(_session_key, &key, sizeof(key));
/* get the ticket's lifetime */
life = *p++ * 5 * 60;
_debug("KIV LIFE : %u", life);
/* get the issue time of the ticket */
if (little_endian) {
__le32 stamp;
memcpy(&stamp, p, 4);
issue = le32_to_cpu(stamp);
} else {
__be32 stamp;
memcpy(&stamp, p, 4);
issue = be32_to_cpu(stamp);
}
p += 4;
now = get_seconds();
_debug("KIV ISSUE: %lx [%lx]", issue, now);
/* check the ticket is in date */
if (issue > now) {
*_abort_code = RXKADNOAUTH;
ret = -EKEYREJECTED;
goto error;
}
if (issue < now - life) {
*_abort_code = RXKADEXPIRED;
ret = -EKEYEXPIRED;
goto error;
}
*_expiry = issue + life;
/* get the service name */
name = Z(SNAME_SZ);
_debug("KIV SNAME: %s", name);
/* get the service instance name */
name = Z(INST_SZ);
_debug("KIV SINST: %s", name);
ret = 0;
error:
_leave(" = %d", ret);
return ret;
bad_ticket:
*_abort_code = RXKADBADTICKET;
ret = -EBADMSG;
goto error;
}
/*
* decrypt the response packet
*/
static void rxkad_decrypt_response(struct rxrpc_connection *conn,
struct rxkad_response *resp,
const struct rxrpc_crypt *session_key)
{
struct blkcipher_desc desc;
struct scatterlist sg[2];
struct rxrpc_crypt iv;
_enter(",,%08x%08x",
ntohl(session_key->n[0]), ntohl(session_key->n[1]));
ASSERT(rxkad_ci != NULL);
mutex_lock(&rxkad_ci_mutex);
if (crypto_blkcipher_setkey(rxkad_ci, session_key->x,
sizeof(*session_key)) < 0)
BUG();
memcpy(&iv, session_key, sizeof(iv));
desc.tfm = rxkad_ci;
desc.info = iv.x;
desc.flags = 0;
rxkad_sg_set_buf2(sg, &resp->encrypted, sizeof(resp->encrypted));
crypto_blkcipher_decrypt_iv(&desc, sg, sg, sizeof(resp->encrypted));
mutex_unlock(&rxkad_ci_mutex);
_leave("");
}
/*
* verify a response
*/
static int rxkad_verify_response(struct rxrpc_connection *conn,
struct sk_buff *skb,
u32 *_abort_code)
{
struct rxkad_response response
__attribute__((aligned(8))); /* must be aligned for crypto */
struct rxrpc_skb_priv *sp;
struct rxrpc_crypt session_key;
time_t expiry;
void *ticket;
u32 abort_code, version, kvno, ticket_len, level;
__be32 csum;
int ret;
_enter("{%d,%x}", conn->debug_id, key_serial(conn->server_key));
abort_code = RXKADPACKETSHORT;
if (skb_copy_bits(skb, 0, &response, sizeof(response)) < 0)
goto protocol_error;
if (!pskb_pull(skb, sizeof(response)))
BUG();
version = ntohl(response.version);
ticket_len = ntohl(response.ticket_len);
kvno = ntohl(response.kvno);
sp = rxrpc_skb(skb);
_proto("Rx RESPONSE %%%u { v=%u kv=%u tl=%u }",
ntohl(sp->hdr.serial), version, kvno, ticket_len);
abort_code = RXKADINCONSISTENCY;
if (version != RXKAD_VERSION)
goto protocol_error;
abort_code = RXKADTICKETLEN;
if (ticket_len < 4 || ticket_len > MAXKRB5TICKETLEN)
goto protocol_error;
abort_code = RXKADUNKNOWNKEY;
if (kvno >= RXKAD_TKT_TYPE_KERBEROS_V5)
goto protocol_error;
/* extract the kerberos ticket and decrypt and decode it */
ticket = kmalloc(ticket_len, GFP_NOFS);
if (!ticket)
return -ENOMEM;
abort_code = RXKADPACKETSHORT;
if (skb_copy_bits(skb, 0, ticket, ticket_len) < 0)
goto protocol_error_free;
ret = rxkad_decrypt_ticket(conn, ticket, ticket_len, &session_key,
&expiry, &abort_code);
if (ret < 0) {
*_abort_code = abort_code;
kfree(ticket);
return ret;
}
/* use the session key from inside the ticket to decrypt the
* response */
rxkad_decrypt_response(conn, &response, &session_key);
abort_code = RXKADSEALEDINCON;
if (response.encrypted.epoch != conn->epoch)
goto protocol_error_free;
if (response.encrypted.cid != conn->cid)
goto protocol_error_free;
if (ntohl(response.encrypted.securityIndex) != conn->security_ix)
goto protocol_error_free;
csum = response.encrypted.checksum;
response.encrypted.checksum = 0;
rxkad_calc_response_checksum(&response);
if (response.encrypted.checksum != csum)
goto protocol_error_free;
if (ntohl(response.encrypted.call_id[0]) > INT_MAX ||
ntohl(response.encrypted.call_id[1]) > INT_MAX ||
ntohl(response.encrypted.call_id[2]) > INT_MAX ||
ntohl(response.encrypted.call_id[3]) > INT_MAX)
goto protocol_error_free;
abort_code = RXKADOUTOFSEQUENCE;
if (response.encrypted.inc_nonce != htonl(conn->security_nonce + 1))
goto protocol_error_free;
abort_code = RXKADLEVELFAIL;
level = ntohl(response.encrypted.level);
if (level > RXRPC_SECURITY_ENCRYPT)
goto protocol_error_free;
conn->security_level = level;
/* create a key to hold the security data and expiration time - after
* this the connection security can be handled in exactly the same way
* as for a client connection */
ret = rxrpc_get_server_data_key(conn, &session_key, expiry, kvno);
if (ret < 0) {
kfree(ticket);
return ret;
}
kfree(ticket);
_leave(" = 0");
return 0;
protocol_error_free:
kfree(ticket);
protocol_error:
*_abort_code = abort_code;
_leave(" = -EPROTO [%d]", abort_code);
return -EPROTO;
}
/*
* clear the connection security
*/
static void rxkad_clear(struct rxrpc_connection *conn)
{
_enter("");
if (conn->cipher)
crypto_free_blkcipher(conn->cipher);
}
/*
* RxRPC Kerberos-based security
*/
static struct rxrpc_security rxkad = {
.owner = THIS_MODULE,
.name = "rxkad",
.security_index = RXRPC_SECURITY_RXKAD,
.init_connection_security = rxkad_init_connection_security,
.prime_packet_security = rxkad_prime_packet_security,
.secure_packet = rxkad_secure_packet,
.verify_packet = rxkad_verify_packet,
.issue_challenge = rxkad_issue_challenge,
.respond_to_challenge = rxkad_respond_to_challenge,
.verify_response = rxkad_verify_response,
.clear = rxkad_clear,
};
static __init int rxkad_init(void)
{
_enter("");
/* pin the cipher we need so that the crypto layer doesn't invoke
* keventd to go get it */
rxkad_ci = crypto_alloc_blkcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(rxkad_ci))
return PTR_ERR(rxkad_ci);
return rxrpc_register_security(&rxkad);
}
module_init(rxkad_init);
static __exit void rxkad_exit(void)
{
_enter("");
rxrpc_unregister_security(&rxkad);
crypto_free_blkcipher(rxkad_ci);
}
module_exit(rxkad_exit);
| gpl-2.0 |
vuanhduy/odroidxu-3.4.y | arch/frv/kernel/gdb-stub.c | 9090 | 56303 | /* gdb-stub.c: FRV GDB stub
*
* Copyright (C) 2003,4 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
* - Derived from Linux/MIPS version, Copyright (C) 1995 Andreas Busse
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
/*
* To enable debugger support, two things need to happen. One, a
* call to set_debug_traps() is necessary in order to allow any breakpoints
* or error conditions to be properly intercepted and reported to gdb.
* Two, a breakpoint needs to be generated to begin communication. This
* is most easily accomplished by a call to breakpoint(). Breakpoint()
* simulates a breakpoint by executing a BREAK instruction.
*
*
* The following gdb commands are supported:
*
* command function Return value
*
* g return the value of the CPU registers hex data or ENN
* G set the value of the CPU registers OK or ENN
*
* mAA..AA,LLLL Read LLLL bytes at address AA..AA hex data or ENN
* MAA..AA,LLLL: Write LLLL bytes at address AA.AA OK or ENN
*
* c Resume at current address SNN ( signal NN)
* cAA..AA Continue at address AA..AA SNN
*
* s Step one instruction SNN
* sAA..AA Step one instruction from AA..AA SNN
*
* k kill
*
* ? What was the last sigval ? SNN (signal NN)
*
* bBB..BB Set baud rate to BB..BB OK or BNN, then sets
* baud rate
*
* All commands and responses are sent with a packet which includes a
* checksum. A packet consists of
*
* $<packet info>#<checksum>.
*
* where
* <packet info> :: <characters representing the command or response>
* <checksum> :: < two hex digits computed as modulo 256 sum of <packetinfo>>
*
* When a packet is received, it is first acknowledged with either '+' or '-'.
* '+' indicates a successful transfer. '-' indicates a failed transfer.
*
* Example:
*
* Host: Reply:
* $m0,10#2a +$00010203040506070809101112131415#42
*
*
* ==============
* MORE EXAMPLES:
* ==============
*
* For reference -- the following are the steps that one
* company took (RidgeRun Inc) to get remote gdb debugging
* going. In this scenario the host machine was a PC and the
* target platform was a Galileo EVB64120A MIPS evaluation
* board.
*
* Step 1:
* First download gdb-5.0.tar.gz from the internet.
* and then build/install the package.
*
* Example:
* $ tar zxf gdb-5.0.tar.gz
* $ cd gdb-5.0
* $ ./configure --target=frv-elf-gdb
* $ make
* $ frv-elf-gdb
*
* Step 2:
* Configure linux for remote debugging and build it.
*
* Example:
* $ cd ~/linux
* $ make menuconfig <go to "Kernel Hacking" and turn on remote debugging>
* $ make vmlinux
*
* Step 3:
* Download the kernel to the remote target and start
* the kernel running. It will promptly halt and wait
* for the host gdb session to connect. It does this
* since the "Kernel Hacking" option has defined
* CONFIG_REMOTE_DEBUG which in turn enables your calls
* to:
* set_debug_traps();
* breakpoint();
*
* Step 4:
* Start the gdb session on the host.
*
* Example:
* $ frv-elf-gdb vmlinux
* (gdb) set remotebaud 115200
* (gdb) target remote /dev/ttyS1
* ...at this point you are connected to
* the remote target and can use gdb
* in the normal fasion. Setting
* breakpoints, single stepping,
* printing variables, etc.
*
*/
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/console.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/nmi.h>
#include <asm/asm-offsets.h>
#include <asm/pgtable.h>
#include <asm/gdb-stub.h>
#define LEDS(x) do { /* *(u32*)0xe1200004 = ~(x); mb(); */ } while(0)
#undef GDBSTUB_DEBUG_PROTOCOL
extern void debug_to_serial(const char *p, int n);
extern void gdbstub_console_write(struct console *co, const char *p, unsigned n);
extern volatile uint32_t __break_error_detect[3]; /* ESFR1, ESR15, EAR15 */
struct __debug_amr {
unsigned long L, P;
} __attribute__((aligned(8)));
struct __debug_mmu {
struct {
unsigned long hsr0, pcsr, esr0, ear0, epcr0;
#ifdef CONFIG_MMU
unsigned long tplr, tppr, tpxr, cxnr;
#endif
} regs;
struct __debug_amr iamr[16];
struct __debug_amr damr[16];
#ifdef CONFIG_MMU
struct __debug_amr tlb[64*2];
#endif
};
static struct __debug_mmu __debug_mmu;
/*
* BUFMAX defines the maximum number of characters in inbound/outbound buffers
* at least NUMREGBYTES*2 are needed for register packets
*/
#define BUFMAX 2048
#define BREAK_INSN 0x801000c0 /* use "break" as bkpt */
static const char gdbstub_banner[] = "Linux/FR-V GDB Stub (c) RedHat 2003\n";
volatile u8 gdbstub_rx_buffer[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
volatile u32 gdbstub_rx_inp = 0;
volatile u32 gdbstub_rx_outp = 0;
volatile u8 gdbstub_rx_overflow = 0;
u8 gdbstub_rx_unget = 0;
/* set with GDB whilst running to permit step through exceptions */
extern volatile u32 __attribute__((section(".bss"))) gdbstub_trace_through_exceptions;
static char input_buffer[BUFMAX];
static char output_buffer[BUFMAX];
static const char *regnames[] = {
"PSR ", "ISR ", "CCR ", "CCCR",
"LR ", "LCR ", "PC ", "_stt",
"sys ", "GR8*", "GNE0", "GNE1",
"IACH", "IACL",
"TBR ", "SP ", "FP ", "GR3 ",
"GR4 ", "GR5 ", "GR6 ", "GR7 ",
"GR8 ", "GR9 ", "GR10", "GR11",
"GR12", "GR13", "GR14", "GR15",
"GR16", "GR17", "GR18", "GR19",
"GR20", "GR21", "GR22", "GR23",
"GR24", "GR25", "GR26", "GR27",
"EFRM", "CURR", "GR30", "BFRM"
};
struct gdbstub_bkpt {
unsigned long addr; /* address of breakpoint */
unsigned len; /* size of breakpoint */
uint32_t originsns[7]; /* original instructions */
};
static struct gdbstub_bkpt gdbstub_bkpts[256];
/*
* local prototypes
*/
static void gdbstub_recv_packet(char *buffer);
static int gdbstub_send_packet(char *buffer);
static int gdbstub_compute_signal(unsigned long tbr);
static int hex(unsigned char ch);
static int hexToInt(char **ptr, unsigned long *intValue);
static unsigned char *mem2hex(const void *mem, char *buf, int count, int may_fault);
static char *hex2mem(const char *buf, void *_mem, int count);
/*
* Convert ch from a hex digit to an int
*/
static int hex(unsigned char ch)
{
if (ch >= 'a' && ch <= 'f')
return ch-'a'+10;
if (ch >= '0' && ch <= '9')
return ch-'0';
if (ch >= 'A' && ch <= 'F')
return ch-'A'+10;
return -1;
}
void gdbstub_printk(const char *fmt, ...)
{
static char buf[1024];
va_list args;
int len;
/* Emit the output into the temporary buffer */
va_start(args, fmt);
len = vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
debug_to_serial(buf, len);
}
static inline char *gdbstub_strcpy(char *dst, const char *src)
{
int loop = 0;
while ((dst[loop] = src[loop]))
loop++;
return dst;
}
static void gdbstub_purge_cache(void)
{
asm volatile(" dcef @(gr0,gr0),#1 \n"
" icei @(gr0,gr0),#1 \n"
" membar \n"
" bar \n"
);
}
/*****************************************************************************/
/*
* scan for the sequence $<data>#<checksum>
*/
static void gdbstub_recv_packet(char *buffer)
{
unsigned char checksum;
unsigned char xmitcsum;
unsigned char ch;
int count, i, ret, error;
for (;;) {
/* wait around for the start character, ignore all other characters */
do {
gdbstub_rx_char(&ch, 0);
} while (ch != '$');
checksum = 0;
xmitcsum = -1;
count = 0;
error = 0;
/* now, read until a # or end of buffer is found */
while (count < BUFMAX) {
ret = gdbstub_rx_char(&ch, 0);
if (ret < 0)
error = ret;
if (ch == '#')
break;
checksum += ch;
buffer[count] = ch;
count++;
}
if (error == -EIO) {
gdbstub_proto("### GDB Rx Error - Skipping packet ###\n");
gdbstub_proto("### GDB Tx NAK\n");
gdbstub_tx_char('-');
continue;
}
if (count >= BUFMAX || error)
continue;
buffer[count] = 0;
/* read the checksum */
ret = gdbstub_rx_char(&ch, 0);
if (ret < 0)
error = ret;
xmitcsum = hex(ch) << 4;
ret = gdbstub_rx_char(&ch, 0);
if (ret < 0)
error = ret;
xmitcsum |= hex(ch);
if (error) {
if (error == -EIO)
gdbstub_proto("### GDB Rx Error - Skipping packet\n");
gdbstub_proto("### GDB Tx NAK\n");
gdbstub_tx_char('-');
continue;
}
/* check the checksum */
if (checksum != xmitcsum) {
gdbstub_proto("### GDB Tx NAK\n");
gdbstub_tx_char('-'); /* failed checksum */
continue;
}
gdbstub_proto("### GDB Rx '$%s#%02x' ###\n", buffer, checksum);
gdbstub_proto("### GDB Tx ACK\n");
gdbstub_tx_char('+'); /* successful transfer */
/* if a sequence char is present, reply the sequence ID */
if (buffer[2] == ':') {
gdbstub_tx_char(buffer[0]);
gdbstub_tx_char(buffer[1]);
/* remove sequence chars from buffer */
count = 0;
while (buffer[count]) count++;
for (i=3; i <= count; i++)
buffer[i - 3] = buffer[i];
}
break;
}
} /* end gdbstub_recv_packet() */
/*****************************************************************************/
/*
* send the packet in buffer.
* - return 0 if successfully ACK'd
* - return 1 if abandoned due to new incoming packet
*/
static int gdbstub_send_packet(char *buffer)
{
unsigned char checksum;
int count;
unsigned char ch;
/* $<packet info>#<checksum> */
gdbstub_proto("### GDB Tx '%s' ###\n", buffer);
do {
gdbstub_tx_char('$');
checksum = 0;
count = 0;
while ((ch = buffer[count]) != 0) {
gdbstub_tx_char(ch);
checksum += ch;
count += 1;
}
gdbstub_tx_char('#');
gdbstub_tx_char(hex_asc_hi(checksum));
gdbstub_tx_char(hex_asc_lo(checksum));
} while (gdbstub_rx_char(&ch,0),
#ifdef GDBSTUB_DEBUG_PROTOCOL
ch=='-' && (gdbstub_proto("### GDB Rx NAK\n"),0),
ch!='-' && ch!='+' && (gdbstub_proto("### GDB Rx ??? %02x\n",ch),0),
#endif
ch!='+' && ch!='$');
if (ch=='+') {
gdbstub_proto("### GDB Rx ACK\n");
return 0;
}
gdbstub_proto("### GDB Tx Abandoned\n");
gdbstub_rx_unget = ch;
return 1;
} /* end gdbstub_send_packet() */
/*
* While we find nice hex chars, build an int.
* Return number of chars processed.
*/
static int hexToInt(char **ptr, unsigned long *_value)
{
int count = 0, ch;
*_value = 0;
while (**ptr) {
ch = hex(**ptr);
if (ch < 0)
break;
*_value = (*_value << 4) | ((uint8_t) ch & 0xf);
count++;
(*ptr)++;
}
return count;
}
/*****************************************************************************/
/*
* probe an address to see whether it maps to anything
*/
static inline int gdbstub_addr_probe(const void *vaddr)
{
#ifdef CONFIG_MMU
unsigned long paddr;
asm("lrad %1,%0,#1,#0,#0" : "=r"(paddr) : "r"(vaddr));
if (!(paddr & xAMPRx_V))
return 0;
#endif
return 1;
} /* end gdbstub_addr_probe() */
#ifdef CONFIG_MMU
static unsigned long __saved_dampr, __saved_damlr;
static inline unsigned long gdbstub_virt_to_pte(unsigned long vaddr)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
unsigned long val, dampr5;
pgd = (pgd_t *) __get_DAMLR(3) + pgd_index(vaddr);
pud = pud_offset(pgd, vaddr);
pmd = pmd_offset(pud, vaddr);
if (pmd_bad(*pmd) || !pmd_present(*pmd))
return 0;
/* make sure dampr5 maps to the correct pmd */
dampr5 = __get_DAMPR(5);
val = pmd_val(*pmd);
__set_DAMPR(5, val | xAMPRx_L | xAMPRx_SS_16Kb | xAMPRx_S | xAMPRx_C | xAMPRx_V);
/* now its safe to access pmd */
pte = (pte_t *)__get_DAMLR(5) + __pte_index(vaddr);
if (pte_present(*pte))
val = pte_val(*pte);
else
val = 0;
/* restore original dampr5 */
__set_DAMPR(5, dampr5);
return val;
}
#endif
static inline int gdbstub_addr_map(const void *vaddr)
{
#ifdef CONFIG_MMU
unsigned long pte;
__saved_dampr = __get_DAMPR(2);
__saved_damlr = __get_DAMLR(2);
#endif
if (gdbstub_addr_probe(vaddr))
return 1;
#ifdef CONFIG_MMU
pte = gdbstub_virt_to_pte((unsigned long) vaddr);
if (pte) {
__set_DAMPR(2, pte);
__set_DAMLR(2, (unsigned long) vaddr & PAGE_MASK);
return 1;
}
#endif
return 0;
}
static inline void gdbstub_addr_unmap(void)
{
#ifdef CONFIG_MMU
__set_DAMPR(2, __saved_dampr);
__set_DAMLR(2, __saved_damlr);
#endif
}
/*
* access potentially dodgy memory through a potentially dodgy pointer
*/
static inline int gdbstub_read_dword(const void *addr, uint32_t *_res)
{
unsigned long brr;
uint32_t res;
if (!gdbstub_addr_map(addr))
return 0;
asm volatile(" movgs gr0,brr \n"
" ld%I2 %M2,%0 \n"
" movsg brr,%1 \n"
: "=r"(res), "=r"(brr)
: "m"(*(uint32_t *) addr));
*_res = res;
gdbstub_addr_unmap();
return likely(!brr);
}
static inline int gdbstub_write_dword(void *addr, uint32_t val)
{
unsigned long brr;
if (!gdbstub_addr_map(addr))
return 0;
asm volatile(" movgs gr0,brr \n"
" st%I2 %1,%M2 \n"
" movsg brr,%0 \n"
: "=r"(brr)
: "r"(val), "m"(*(uint32_t *) addr));
gdbstub_addr_unmap();
return likely(!brr);
}
static inline int gdbstub_read_word(const void *addr, uint16_t *_res)
{
unsigned long brr;
uint16_t res;
if (!gdbstub_addr_map(addr))
return 0;
asm volatile(" movgs gr0,brr \n"
" lduh%I2 %M2,%0 \n"
" movsg brr,%1 \n"
: "=r"(res), "=r"(brr)
: "m"(*(uint16_t *) addr));
*_res = res;
gdbstub_addr_unmap();
return likely(!brr);
}
static inline int gdbstub_write_word(void *addr, uint16_t val)
{
unsigned long brr;
if (!gdbstub_addr_map(addr))
return 0;
asm volatile(" movgs gr0,brr \n"
" sth%I2 %1,%M2 \n"
" movsg brr,%0 \n"
: "=r"(brr)
: "r"(val), "m"(*(uint16_t *) addr));
gdbstub_addr_unmap();
return likely(!brr);
}
static inline int gdbstub_read_byte(const void *addr, uint8_t *_res)
{
unsigned long brr;
uint8_t res;
if (!gdbstub_addr_map(addr))
return 0;
asm volatile(" movgs gr0,brr \n"
" ldub%I2 %M2,%0 \n"
" movsg brr,%1 \n"
: "=r"(res), "=r"(brr)
: "m"(*(uint8_t *) addr));
*_res = res;
gdbstub_addr_unmap();
return likely(!brr);
}
static inline int gdbstub_write_byte(void *addr, uint8_t val)
{
unsigned long brr;
if (!gdbstub_addr_map(addr))
return 0;
asm volatile(" movgs gr0,brr \n"
" stb%I2 %1,%M2 \n"
" movsg brr,%0 \n"
: "=r"(brr)
: "r"(val), "m"(*(uint8_t *) addr));
gdbstub_addr_unmap();
return likely(!brr);
}
static void __gdbstub_console_write(struct console *co, const char *p, unsigned n)
{
char outbuf[26];
int qty;
outbuf[0] = 'O';
while (n > 0) {
qty = 1;
while (n > 0 && qty < 20) {
mem2hex(p, outbuf + qty, 2, 0);
qty += 2;
if (*p == 0x0a) {
outbuf[qty++] = '0';
outbuf[qty++] = 'd';
}
p++;
n--;
}
outbuf[qty] = 0;
gdbstub_send_packet(outbuf);
}
}
#if 0
void debug_to_serial(const char *p, int n)
{
gdbstub_console_write(NULL,p,n);
}
#endif
#ifdef CONFIG_GDB_CONSOLE
static struct console gdbstub_console = {
.name = "gdb",
.write = gdbstub_console_write, /* in break.S */
.flags = CON_PRINTBUFFER,
.index = -1,
};
#endif
/*****************************************************************************/
/*
* Convert the memory pointed to by mem into hex, placing result in buf.
* - if successful, return a pointer to the last char put in buf (NUL)
* - in case of mem fault, return NULL
* may_fault is non-zero if we are reading from arbitrary memory, but is currently
* not used.
*/
static unsigned char *mem2hex(const void *_mem, char *buf, int count, int may_fault)
{
const uint8_t *mem = _mem;
uint8_t ch[4] __attribute__((aligned(4)));
if ((uint32_t)mem&1 && count>=1) {
if (!gdbstub_read_byte(mem,ch))
return NULL;
buf = hex_byte_pack(buf, ch[0]);
mem++;
count--;
}
if ((uint32_t)mem&3 && count>=2) {
if (!gdbstub_read_word(mem,(uint16_t *)ch))
return NULL;
buf = hex_byte_pack(buf, ch[0]);
buf = hex_byte_pack(buf, ch[1]);
mem += 2;
count -= 2;
}
while (count>=4) {
if (!gdbstub_read_dword(mem,(uint32_t *)ch))
return NULL;
buf = hex_byte_pack(buf, ch[0]);
buf = hex_byte_pack(buf, ch[1]);
buf = hex_byte_pack(buf, ch[2]);
buf = hex_byte_pack(buf, ch[3]);
mem += 4;
count -= 4;
}
if (count>=2) {
if (!gdbstub_read_word(mem,(uint16_t *)ch))
return NULL;
buf = hex_byte_pack(buf, ch[0]);
buf = hex_byte_pack(buf, ch[1]);
mem += 2;
count -= 2;
}
if (count>=1) {
if (!gdbstub_read_byte(mem,ch))
return NULL;
buf = hex_byte_pack(buf, ch[0]);
}
*buf = 0;
return buf;
} /* end mem2hex() */
/*****************************************************************************/
/*
* convert the hex array pointed to by buf into binary to be placed in mem
* return a pointer to the character AFTER the last byte of buffer consumed
*/
static char *hex2mem(const char *buf, void *_mem, int count)
{
uint8_t *mem = _mem;
union {
uint32_t l;
uint16_t w;
uint8_t b[4];
} ch;
if ((u32)mem&1 && count>=1) {
ch.b[0] = hex(*buf++) << 4;
ch.b[0] |= hex(*buf++);
if (!gdbstub_write_byte(mem,ch.b[0]))
return NULL;
mem++;
count--;
}
if ((u32)mem&3 && count>=2) {
ch.b[0] = hex(*buf++) << 4;
ch.b[0] |= hex(*buf++);
ch.b[1] = hex(*buf++) << 4;
ch.b[1] |= hex(*buf++);
if (!gdbstub_write_word(mem,ch.w))
return NULL;
mem += 2;
count -= 2;
}
while (count>=4) {
ch.b[0] = hex(*buf++) << 4;
ch.b[0] |= hex(*buf++);
ch.b[1] = hex(*buf++) << 4;
ch.b[1] |= hex(*buf++);
ch.b[2] = hex(*buf++) << 4;
ch.b[2] |= hex(*buf++);
ch.b[3] = hex(*buf++) << 4;
ch.b[3] |= hex(*buf++);
if (!gdbstub_write_dword(mem,ch.l))
return NULL;
mem += 4;
count -= 4;
}
if (count>=2) {
ch.b[0] = hex(*buf++) << 4;
ch.b[0] |= hex(*buf++);
ch.b[1] = hex(*buf++) << 4;
ch.b[1] |= hex(*buf++);
if (!gdbstub_write_word(mem,ch.w))
return NULL;
mem += 2;
count -= 2;
}
if (count>=1) {
ch.b[0] = hex(*buf++) << 4;
ch.b[0] |= hex(*buf++);
if (!gdbstub_write_byte(mem,ch.b[0]))
return NULL;
}
return (char *) buf;
} /* end hex2mem() */
/*****************************************************************************/
/*
* This table contains the mapping between FRV TBR.TT exception codes,
* and signals, which are primarily what GDB understands. It also
* indicates which hardware traps we need to commandeer when
* initializing the stub.
*/
static const struct brr_to_sig_map {
unsigned long brr_mask; /* BRR bitmask */
unsigned long tbr_tt; /* TBR.TT code (in BRR.EBTT) */
unsigned int signo; /* Signal that we map this into */
} brr_to_sig_map[] = {
{ BRR_EB, TBR_TT_INSTR_ACC_ERROR, SIGSEGV },
{ BRR_EB, TBR_TT_ILLEGAL_INSTR, SIGILL },
{ BRR_EB, TBR_TT_PRIV_INSTR, SIGILL },
{ BRR_EB, TBR_TT_MP_EXCEPTION, SIGFPE },
{ BRR_EB, TBR_TT_DATA_ACC_ERROR, SIGSEGV },
{ BRR_EB, TBR_TT_DATA_STR_ERROR, SIGSEGV },
{ BRR_EB, TBR_TT_DIVISION_EXCEP, SIGFPE },
{ BRR_EB, TBR_TT_COMPOUND_EXCEP, SIGSEGV },
{ BRR_EB, TBR_TT_INTERRUPT_13, SIGALRM }, /* watchdog */
{ BRR_EB, TBR_TT_INTERRUPT_14, SIGINT }, /* GDB serial */
{ BRR_EB, TBR_TT_INTERRUPT_15, SIGQUIT }, /* NMI */
{ BRR_CB, 0, SIGUSR1 },
{ BRR_TB, 0, SIGUSR2 },
{ BRR_DBNEx, 0, SIGTRAP },
{ BRR_DBx, 0, SIGTRAP }, /* h/w watchpoint */
{ BRR_IBx, 0, SIGTRAP }, /* h/w breakpoint */
{ BRR_CBB, 0, SIGTRAP },
{ BRR_SB, 0, SIGTRAP },
{ BRR_ST, 0, SIGTRAP }, /* single step */
{ 0, 0, SIGHUP } /* default */
};
/*****************************************************************************/
/*
* convert the FRV BRR register contents into a UNIX signal number
*/
static inline int gdbstub_compute_signal(unsigned long brr)
{
const struct brr_to_sig_map *map;
unsigned long tbr = (brr & BRR_EBTT) >> 12;
for (map = brr_to_sig_map; map->brr_mask; map++)
if (map->brr_mask & brr)
if (!map->tbr_tt || map->tbr_tt == tbr)
break;
return map->signo;
} /* end gdbstub_compute_signal() */
/*****************************************************************************/
/*
* set a software breakpoint or a hardware breakpoint or watchpoint
*/
static int gdbstub_set_breakpoint(unsigned long type, unsigned long addr, unsigned long len)
{
unsigned long tmp;
int bkpt, loop, xloop;
union {
struct {
unsigned long mask0, mask1;
};
uint8_t bytes[8];
} dbmr;
//gdbstub_printk("setbkpt(%ld,%08lx,%ld)\n", type, addr, len);
switch (type) {
/* set software breakpoint */
case 0:
if (addr & 3 || len > 7*4)
return -EINVAL;
for (bkpt = 255; bkpt >= 0; bkpt--)
if (!gdbstub_bkpts[bkpt].addr)
break;
if (bkpt < 0)
return -ENOSPC;
for (loop = 0; loop < len/4; loop++)
if (!gdbstub_read_dword(&((uint32_t *) addr)[loop],
&gdbstub_bkpts[bkpt].originsns[loop]))
return -EFAULT;
for (loop = 0; loop < len/4; loop++)
if (!gdbstub_write_dword(&((uint32_t *) addr)[loop],
BREAK_INSN)
) {
/* need to undo the changes if possible */
for (xloop = 0; xloop < loop; xloop++)
gdbstub_write_dword(&((uint32_t *) addr)[xloop],
gdbstub_bkpts[bkpt].originsns[xloop]);
return -EFAULT;
}
gdbstub_bkpts[bkpt].addr = addr;
gdbstub_bkpts[bkpt].len = len;
#if 0
gdbstub_printk("Set BKPT[%02x]: %08lx #%d {%04x, %04x} -> { %04x, %04x }\n",
bkpt,
gdbstub_bkpts[bkpt].addr,
gdbstub_bkpts[bkpt].len,
gdbstub_bkpts[bkpt].originsns[0],
gdbstub_bkpts[bkpt].originsns[1],
((uint32_t *) addr)[0],
((uint32_t *) addr)[1]
);
#endif
return 0;
/* set hardware breakpoint */
case 1:
if (addr & 3 || len != 4)
return -EINVAL;
if (!(__debug_regs->dcr & DCR_IBE0)) {
//gdbstub_printk("set h/w break 0: %08lx\n", addr);
__debug_regs->dcr |= DCR_IBE0;
__debug_regs->ibar[0] = addr;
asm volatile("movgs %0,ibar0" : : "r"(addr));
return 0;
}
if (!(__debug_regs->dcr & DCR_IBE1)) {
//gdbstub_printk("set h/w break 1: %08lx\n", addr);
__debug_regs->dcr |= DCR_IBE1;
__debug_regs->ibar[1] = addr;
asm volatile("movgs %0,ibar1" : : "r"(addr));
return 0;
}
if (!(__debug_regs->dcr & DCR_IBE2)) {
//gdbstub_printk("set h/w break 2: %08lx\n", addr);
__debug_regs->dcr |= DCR_IBE2;
__debug_regs->ibar[2] = addr;
asm volatile("movgs %0,ibar2" : : "r"(addr));
return 0;
}
if (!(__debug_regs->dcr & DCR_IBE3)) {
//gdbstub_printk("set h/w break 3: %08lx\n", addr);
__debug_regs->dcr |= DCR_IBE3;
__debug_regs->ibar[3] = addr;
asm volatile("movgs %0,ibar3" : : "r"(addr));
return 0;
}
return -ENOSPC;
/* set data read/write/access watchpoint */
case 2:
case 3:
case 4:
if ((addr & ~7) != ((addr + len - 1) & ~7))
return -EINVAL;
tmp = addr & 7;
memset(dbmr.bytes, 0xff, sizeof(dbmr.bytes));
for (loop = 0; loop < len; loop++)
dbmr.bytes[tmp + loop] = 0;
addr &= ~7;
if (!(__debug_regs->dcr & (DCR_DRBE0|DCR_DWBE0))) {
//gdbstub_printk("set h/w watchpoint 0 type %ld: %08lx\n", type, addr);
tmp = type==2 ? DCR_DWBE0 : type==3 ? DCR_DRBE0 : DCR_DRBE0|DCR_DWBE0;
__debug_regs->dcr |= tmp;
__debug_regs->dbar[0] = addr;
__debug_regs->dbmr[0][0] = dbmr.mask0;
__debug_regs->dbmr[0][1] = dbmr.mask1;
__debug_regs->dbdr[0][0] = 0;
__debug_regs->dbdr[0][1] = 0;
asm volatile(" movgs %0,dbar0 \n"
" movgs %1,dbmr00 \n"
" movgs %2,dbmr01 \n"
" movgs gr0,dbdr00 \n"
" movgs gr0,dbdr01 \n"
: : "r"(addr), "r"(dbmr.mask0), "r"(dbmr.mask1));
return 0;
}
if (!(__debug_regs->dcr & (DCR_DRBE1|DCR_DWBE1))) {
//gdbstub_printk("set h/w watchpoint 1 type %ld: %08lx\n", type, addr);
tmp = type==2 ? DCR_DWBE1 : type==3 ? DCR_DRBE1 : DCR_DRBE1|DCR_DWBE1;
__debug_regs->dcr |= tmp;
__debug_regs->dbar[1] = addr;
__debug_regs->dbmr[1][0] = dbmr.mask0;
__debug_regs->dbmr[1][1] = dbmr.mask1;
__debug_regs->dbdr[1][0] = 0;
__debug_regs->dbdr[1][1] = 0;
asm volatile(" movgs %0,dbar1 \n"
" movgs %1,dbmr10 \n"
" movgs %2,dbmr11 \n"
" movgs gr0,dbdr10 \n"
" movgs gr0,dbdr11 \n"
: : "r"(addr), "r"(dbmr.mask0), "r"(dbmr.mask1));
return 0;
}
return -ENOSPC;
default:
return -EINVAL;
}
} /* end gdbstub_set_breakpoint() */
/*****************************************************************************/
/*
* clear a breakpoint or watchpoint
*/
int gdbstub_clear_breakpoint(unsigned long type, unsigned long addr, unsigned long len)
{
unsigned long tmp;
int bkpt, loop;
union {
struct {
unsigned long mask0, mask1;
};
uint8_t bytes[8];
} dbmr;
//gdbstub_printk("clearbkpt(%ld,%08lx,%ld)\n", type, addr, len);
switch (type) {
/* clear software breakpoint */
case 0:
for (bkpt = 255; bkpt >= 0; bkpt--)
if (gdbstub_bkpts[bkpt].addr == addr && gdbstub_bkpts[bkpt].len == len)
break;
if (bkpt < 0)
return -ENOENT;
gdbstub_bkpts[bkpt].addr = 0;
for (loop = 0; loop < len/4; loop++)
if (!gdbstub_write_dword(&((uint32_t *) addr)[loop],
gdbstub_bkpts[bkpt].originsns[loop]))
return -EFAULT;
return 0;
/* clear hardware breakpoint */
case 1:
if (addr & 3 || len != 4)
return -EINVAL;
#define __get_ibar(X) ({ unsigned long x; asm volatile("movsg ibar"#X",%0" : "=r"(x)); x; })
if (__debug_regs->dcr & DCR_IBE0 && __get_ibar(0) == addr) {
//gdbstub_printk("clear h/w break 0: %08lx\n", addr);
__debug_regs->dcr &= ~DCR_IBE0;
__debug_regs->ibar[0] = 0;
asm volatile("movgs gr0,ibar0");
return 0;
}
if (__debug_regs->dcr & DCR_IBE1 && __get_ibar(1) == addr) {
//gdbstub_printk("clear h/w break 1: %08lx\n", addr);
__debug_regs->dcr &= ~DCR_IBE1;
__debug_regs->ibar[1] = 0;
asm volatile("movgs gr0,ibar1");
return 0;
}
if (__debug_regs->dcr & DCR_IBE2 && __get_ibar(2) == addr) {
//gdbstub_printk("clear h/w break 2: %08lx\n", addr);
__debug_regs->dcr &= ~DCR_IBE2;
__debug_regs->ibar[2] = 0;
asm volatile("movgs gr0,ibar2");
return 0;
}
if (__debug_regs->dcr & DCR_IBE3 && __get_ibar(3) == addr) {
//gdbstub_printk("clear h/w break 3: %08lx\n", addr);
__debug_regs->dcr &= ~DCR_IBE3;
__debug_regs->ibar[3] = 0;
asm volatile("movgs gr0,ibar3");
return 0;
}
return -EINVAL;
/* clear data read/write/access watchpoint */
case 2:
case 3:
case 4:
if ((addr & ~7) != ((addr + len - 1) & ~7))
return -EINVAL;
tmp = addr & 7;
memset(dbmr.bytes, 0xff, sizeof(dbmr.bytes));
for (loop = 0; loop < len; loop++)
dbmr.bytes[tmp + loop] = 0;
addr &= ~7;
#define __get_dbar(X) ({ unsigned long x; asm volatile("movsg dbar"#X",%0" : "=r"(x)); x; })
#define __get_dbmr0(X) ({ unsigned long x; asm volatile("movsg dbmr"#X"0,%0" : "=r"(x)); x; })
#define __get_dbmr1(X) ({ unsigned long x; asm volatile("movsg dbmr"#X"1,%0" : "=r"(x)); x; })
/* consider DBAR 0 */
tmp = type==2 ? DCR_DWBE0 : type==3 ? DCR_DRBE0 : DCR_DRBE0|DCR_DWBE0;
if ((__debug_regs->dcr & (DCR_DRBE0|DCR_DWBE0)) != tmp ||
__get_dbar(0) != addr ||
__get_dbmr0(0) != dbmr.mask0 ||
__get_dbmr1(0) != dbmr.mask1)
goto skip_dbar0;
//gdbstub_printk("clear h/w watchpoint 0 type %ld: %08lx\n", type, addr);
__debug_regs->dcr &= ~(DCR_DRBE0|DCR_DWBE0);
__debug_regs->dbar[0] = 0;
__debug_regs->dbmr[0][0] = 0;
__debug_regs->dbmr[0][1] = 0;
__debug_regs->dbdr[0][0] = 0;
__debug_regs->dbdr[0][1] = 0;
asm volatile(" movgs gr0,dbar0 \n"
" movgs gr0,dbmr00 \n"
" movgs gr0,dbmr01 \n"
" movgs gr0,dbdr00 \n"
" movgs gr0,dbdr01 \n");
return 0;
skip_dbar0:
/* consider DBAR 0 */
tmp = type==2 ? DCR_DWBE1 : type==3 ? DCR_DRBE1 : DCR_DRBE1|DCR_DWBE1;
if ((__debug_regs->dcr & (DCR_DRBE1|DCR_DWBE1)) != tmp ||
__get_dbar(1) != addr ||
__get_dbmr0(1) != dbmr.mask0 ||
__get_dbmr1(1) != dbmr.mask1)
goto skip_dbar1;
//gdbstub_printk("clear h/w watchpoint 1 type %ld: %08lx\n", type, addr);
__debug_regs->dcr &= ~(DCR_DRBE1|DCR_DWBE1);
__debug_regs->dbar[1] = 0;
__debug_regs->dbmr[1][0] = 0;
__debug_regs->dbmr[1][1] = 0;
__debug_regs->dbdr[1][0] = 0;
__debug_regs->dbdr[1][1] = 0;
asm volatile(" movgs gr0,dbar1 \n"
" movgs gr0,dbmr10 \n"
" movgs gr0,dbmr11 \n"
" movgs gr0,dbdr10 \n"
" movgs gr0,dbdr11 \n");
return 0;
skip_dbar1:
return -ENOSPC;
default:
return -EINVAL;
}
} /* end gdbstub_clear_breakpoint() */
/*****************************************************************************/
/*
* check a for an internal software breakpoint, and wind the PC back if necessary
*/
static void gdbstub_check_breakpoint(void)
{
unsigned long addr = __debug_frame->pc - 4;
int bkpt;
for (bkpt = 255; bkpt >= 0; bkpt--)
if (gdbstub_bkpts[bkpt].addr == addr)
break;
if (bkpt >= 0)
__debug_frame->pc = addr;
//gdbstub_printk("alter pc [%d] %08lx\n", bkpt, __debug_frame->pc);
} /* end gdbstub_check_breakpoint() */
/*****************************************************************************/
/*
*
*/
static void __maybe_unused gdbstub_show_regs(void)
{
unsigned long *reg;
int loop;
gdbstub_printk("\n");
gdbstub_printk("Frame: @%p [%s]\n",
__debug_frame,
__debug_frame->psr & PSR_S ? "kernel" : "user");
reg = (unsigned long *) __debug_frame;
for (loop = 0; loop < NR_PT_REGS; loop++) {
printk("%s %08lx", regnames[loop + 0], reg[loop + 0]);
if (loop == NR_PT_REGS - 1 || loop % 5 == 4)
printk("\n");
else
printk(" | ");
}
gdbstub_printk("Process %s (pid: %d)\n", current->comm, current->pid);
} /* end gdbstub_show_regs() */
/*****************************************************************************/
/*
* dump debugging regs
*/
static void __maybe_unused gdbstub_dump_debugregs(void)
{
gdbstub_printk("DCR %08lx ", __debug_status.dcr);
gdbstub_printk("BRR %08lx\n", __debug_status.brr);
gdbstub_printk("IBAR0 %08lx ", __get_ibar(0));
gdbstub_printk("IBAR1 %08lx ", __get_ibar(1));
gdbstub_printk("IBAR2 %08lx ", __get_ibar(2));
gdbstub_printk("IBAR3 %08lx\n", __get_ibar(3));
gdbstub_printk("DBAR0 %08lx ", __get_dbar(0));
gdbstub_printk("DBMR00 %08lx ", __get_dbmr0(0));
gdbstub_printk("DBMR01 %08lx\n", __get_dbmr1(0));
gdbstub_printk("DBAR1 %08lx ", __get_dbar(1));
gdbstub_printk("DBMR10 %08lx ", __get_dbmr0(1));
gdbstub_printk("DBMR11 %08lx\n", __get_dbmr1(1));
gdbstub_printk("\n");
} /* end gdbstub_dump_debugregs() */
/*****************************************************************************/
/*
* dump the MMU state into a structure so that it can be accessed with GDB
*/
void gdbstub_get_mmu_state(void)
{
asm volatile("movsg hsr0,%0" : "=r"(__debug_mmu.regs.hsr0));
asm volatile("movsg pcsr,%0" : "=r"(__debug_mmu.regs.pcsr));
asm volatile("movsg esr0,%0" : "=r"(__debug_mmu.regs.esr0));
asm volatile("movsg ear0,%0" : "=r"(__debug_mmu.regs.ear0));
asm volatile("movsg epcr0,%0" : "=r"(__debug_mmu.regs.epcr0));
/* read the protection / SAT registers */
__debug_mmu.iamr[0].L = __get_IAMLR(0);
__debug_mmu.iamr[0].P = __get_IAMPR(0);
__debug_mmu.iamr[1].L = __get_IAMLR(1);
__debug_mmu.iamr[1].P = __get_IAMPR(1);
__debug_mmu.iamr[2].L = __get_IAMLR(2);
__debug_mmu.iamr[2].P = __get_IAMPR(2);
__debug_mmu.iamr[3].L = __get_IAMLR(3);
__debug_mmu.iamr[3].P = __get_IAMPR(3);
__debug_mmu.iamr[4].L = __get_IAMLR(4);
__debug_mmu.iamr[4].P = __get_IAMPR(4);
__debug_mmu.iamr[5].L = __get_IAMLR(5);
__debug_mmu.iamr[5].P = __get_IAMPR(5);
__debug_mmu.iamr[6].L = __get_IAMLR(6);
__debug_mmu.iamr[6].P = __get_IAMPR(6);
__debug_mmu.iamr[7].L = __get_IAMLR(7);
__debug_mmu.iamr[7].P = __get_IAMPR(7);
__debug_mmu.iamr[8].L = __get_IAMLR(8);
__debug_mmu.iamr[8].P = __get_IAMPR(8);
__debug_mmu.iamr[9].L = __get_IAMLR(9);
__debug_mmu.iamr[9].P = __get_IAMPR(9);
__debug_mmu.iamr[10].L = __get_IAMLR(10);
__debug_mmu.iamr[10].P = __get_IAMPR(10);
__debug_mmu.iamr[11].L = __get_IAMLR(11);
__debug_mmu.iamr[11].P = __get_IAMPR(11);
__debug_mmu.iamr[12].L = __get_IAMLR(12);
__debug_mmu.iamr[12].P = __get_IAMPR(12);
__debug_mmu.iamr[13].L = __get_IAMLR(13);
__debug_mmu.iamr[13].P = __get_IAMPR(13);
__debug_mmu.iamr[14].L = __get_IAMLR(14);
__debug_mmu.iamr[14].P = __get_IAMPR(14);
__debug_mmu.iamr[15].L = __get_IAMLR(15);
__debug_mmu.iamr[15].P = __get_IAMPR(15);
__debug_mmu.damr[0].L = __get_DAMLR(0);
__debug_mmu.damr[0].P = __get_DAMPR(0);
__debug_mmu.damr[1].L = __get_DAMLR(1);
__debug_mmu.damr[1].P = __get_DAMPR(1);
__debug_mmu.damr[2].L = __get_DAMLR(2);
__debug_mmu.damr[2].P = __get_DAMPR(2);
__debug_mmu.damr[3].L = __get_DAMLR(3);
__debug_mmu.damr[3].P = __get_DAMPR(3);
__debug_mmu.damr[4].L = __get_DAMLR(4);
__debug_mmu.damr[4].P = __get_DAMPR(4);
__debug_mmu.damr[5].L = __get_DAMLR(5);
__debug_mmu.damr[5].P = __get_DAMPR(5);
__debug_mmu.damr[6].L = __get_DAMLR(6);
__debug_mmu.damr[6].P = __get_DAMPR(6);
__debug_mmu.damr[7].L = __get_DAMLR(7);
__debug_mmu.damr[7].P = __get_DAMPR(7);
__debug_mmu.damr[8].L = __get_DAMLR(8);
__debug_mmu.damr[8].P = __get_DAMPR(8);
__debug_mmu.damr[9].L = __get_DAMLR(9);
__debug_mmu.damr[9].P = __get_DAMPR(9);
__debug_mmu.damr[10].L = __get_DAMLR(10);
__debug_mmu.damr[10].P = __get_DAMPR(10);
__debug_mmu.damr[11].L = __get_DAMLR(11);
__debug_mmu.damr[11].P = __get_DAMPR(11);
__debug_mmu.damr[12].L = __get_DAMLR(12);
__debug_mmu.damr[12].P = __get_DAMPR(12);
__debug_mmu.damr[13].L = __get_DAMLR(13);
__debug_mmu.damr[13].P = __get_DAMPR(13);
__debug_mmu.damr[14].L = __get_DAMLR(14);
__debug_mmu.damr[14].P = __get_DAMPR(14);
__debug_mmu.damr[15].L = __get_DAMLR(15);
__debug_mmu.damr[15].P = __get_DAMPR(15);
#ifdef CONFIG_MMU
do {
/* read the DAT entries from the TLB */
struct __debug_amr *p;
int loop;
asm volatile("movsg tplr,%0" : "=r"(__debug_mmu.regs.tplr));
asm volatile("movsg tppr,%0" : "=r"(__debug_mmu.regs.tppr));
asm volatile("movsg tpxr,%0" : "=r"(__debug_mmu.regs.tpxr));
asm volatile("movsg cxnr,%0" : "=r"(__debug_mmu.regs.cxnr));
p = __debug_mmu.tlb;
/* way 0 */
asm volatile("movgs %0,tpxr" :: "r"(0 << TPXR_WAY_SHIFT));
for (loop = 0; loop < 64; loop++) {
asm volatile("tlbpr %0,gr0,#1,#0" :: "r"(loop << PAGE_SHIFT));
asm volatile("movsg tplr,%0" : "=r"(p->L));
asm volatile("movsg tppr,%0" : "=r"(p->P));
p++;
}
/* way 1 */
asm volatile("movgs %0,tpxr" :: "r"(1 << TPXR_WAY_SHIFT));
for (loop = 0; loop < 64; loop++) {
asm volatile("tlbpr %0,gr0,#1,#0" :: "r"(loop << PAGE_SHIFT));
asm volatile("movsg tplr,%0" : "=r"(p->L));
asm volatile("movsg tppr,%0" : "=r"(p->P));
p++;
}
asm volatile("movgs %0,tplr" :: "r"(__debug_mmu.regs.tplr));
asm volatile("movgs %0,tppr" :: "r"(__debug_mmu.regs.tppr));
asm volatile("movgs %0,tpxr" :: "r"(__debug_mmu.regs.tpxr));
} while(0);
#endif
} /* end gdbstub_get_mmu_state() */
/*
* handle general query commands of the form 'qXXXXX'
*/
static void gdbstub_handle_query(void)
{
if (strcmp(input_buffer, "qAttached") == 0) {
/* return current thread ID */
sprintf(output_buffer, "1");
return;
}
if (strcmp(input_buffer, "qC") == 0) {
/* return current thread ID */
sprintf(output_buffer, "QC 0");
return;
}
if (strcmp(input_buffer, "qOffsets") == 0) {
/* return relocation offset of text and data segments */
sprintf(output_buffer, "Text=0;Data=0;Bss=0");
return;
}
if (strcmp(input_buffer, "qSymbol::") == 0) {
sprintf(output_buffer, "OK");
return;
}
if (strcmp(input_buffer, "qSupported") == 0) {
/* query of supported features */
sprintf(output_buffer, "PacketSize=%u;ReverseContinue-;ReverseStep-",
sizeof(input_buffer));
return;
}
gdbstub_strcpy(output_buffer,"E01");
}
/*****************************************************************************/
/*
* handle event interception and GDB remote protocol processing
* - on entry:
* PSR.ET==0, PSR.S==1 and the CPU is in debug mode
* __debug_frame points to the saved registers
* __frame points to the kernel mode exception frame, if it was in kernel
* mode when the break happened
*/
void gdbstub(int sigval)
{
unsigned long addr, length, loop, dbar, temp, temp2, temp3;
uint32_t zero;
char *ptr;
int flush_cache = 0;
LEDS(0x5000);
if (sigval < 0) {
#ifndef CONFIG_GDBSTUB_IMMEDIATE
/* return immediately if GDB immediate activation option not set */
return;
#else
sigval = SIGINT;
#endif
}
save_user_regs(&__debug_frame0->uc);
#if 0
gdbstub_printk("--> gdbstub() %08x %p %08x %08x\n",
__debug_frame->pc,
__debug_frame,
__debug_regs->brr,
__debug_regs->bpsr);
// gdbstub_show_regs();
#endif
LEDS(0x5001);
/* if we were interrupted by input on the serial gdbstub serial port,
* restore the context prior to the interrupt so that we return to that
* directly
*/
temp = (unsigned long) __entry_kerneltrap_table;
temp2 = (unsigned long) __entry_usertrap_table;
temp3 = __debug_frame->pc & ~15;
if (temp3 == temp + TBR_TT_INTERRUPT_15 ||
temp3 == temp2 + TBR_TT_INTERRUPT_15
) {
asm volatile("movsg pcsr,%0" : "=r"(__debug_frame->pc));
__debug_frame->psr |= PSR_ET;
__debug_frame->psr &= ~PSR_S;
if (__debug_frame->psr & PSR_PS)
__debug_frame->psr |= PSR_S;
__debug_status.brr = (__debug_frame->tbr & TBR_TT) << 12;
__debug_status.brr |= BRR_EB;
sigval = SIGINT;
}
/* handle the decrement timer going off (FR451 only) */
if (temp3 == temp + TBR_TT_DECREMENT_TIMER ||
temp3 == temp2 + TBR_TT_DECREMENT_TIMER
) {
asm volatile("movgs %0,timerd" :: "r"(10000000));
asm volatile("movsg pcsr,%0" : "=r"(__debug_frame->pc));
__debug_frame->psr |= PSR_ET;
__debug_frame->psr &= ~PSR_S;
if (__debug_frame->psr & PSR_PS)
__debug_frame->psr |= PSR_S;
__debug_status.brr = (__debug_frame->tbr & TBR_TT) << 12;
__debug_status.brr |= BRR_EB;
sigval = SIGXCPU;
}
LEDS(0x5002);
/* after a BREAK insn, the PC lands on the far side of it */
if (__debug_status.brr & BRR_SB)
gdbstub_check_breakpoint();
LEDS(0x5003);
/* handle attempts to write console data via GDB "O" commands */
if (__debug_frame->pc == (unsigned long) gdbstub_console_write + 4) {
__gdbstub_console_write((struct console *) __debug_frame->gr8,
(const char *) __debug_frame->gr9,
(unsigned) __debug_frame->gr10);
goto done;
}
if (gdbstub_rx_unget) {
sigval = SIGINT;
goto packet_waiting;
}
if (!sigval)
sigval = gdbstub_compute_signal(__debug_status.brr);
LEDS(0x5004);
/* send a message to the debugger's user saying what happened if it may
* not be clear cut (we can't map exceptions onto signals properly)
*/
if (sigval != SIGINT && sigval != SIGTRAP && sigval != SIGILL) {
static const char title[] = "Break ";
static const char crlf[] = "\r\n";
unsigned long brr = __debug_status.brr;
char hx;
ptr = output_buffer;
*ptr++ = 'O';
ptr = mem2hex(title, ptr, sizeof(title) - 1,0);
hx = hex_asc_hi(brr >> 24);
ptr = hex_byte_pack(ptr, hx);
hx = hex_asc_lo(brr >> 24);
ptr = hex_byte_pack(ptr, hx);
hx = hex_asc_hi(brr >> 16);
ptr = hex_byte_pack(ptr, hx);
hx = hex_asc_lo(brr >> 16);
ptr = hex_byte_pack(ptr, hx);
hx = hex_asc_hi(brr >> 8);
ptr = hex_byte_pack(ptr, hx);
hx = hex_asc_lo(brr >> 8);
ptr = hex_byte_pack(ptr, hx);
hx = hex_asc_hi(brr);
ptr = hex_byte_pack(ptr, hx);
hx = hex_asc_lo(brr);
ptr = hex_byte_pack(ptr, hx);
ptr = mem2hex(crlf, ptr, sizeof(crlf) - 1, 0);
*ptr = 0;
gdbstub_send_packet(output_buffer); /* send it off... */
}
LEDS(0x5005);
/* tell the debugger that an exception has occurred */
ptr = output_buffer;
/* Send trap type (converted to signal) */
*ptr++ = 'T';
ptr = hex_byte_pack(ptr, sigval);
/* Send Error PC */
ptr = hex_byte_pack(ptr, GDB_REG_PC);
*ptr++ = ':';
ptr = mem2hex(&__debug_frame->pc, ptr, 4, 0);
*ptr++ = ';';
/*
* Send frame pointer
*/
ptr = hex_byte_pack(ptr, GDB_REG_FP);
*ptr++ = ':';
ptr = mem2hex(&__debug_frame->fp, ptr, 4, 0);
*ptr++ = ';';
/*
* Send stack pointer
*/
ptr = hex_byte_pack(ptr, GDB_REG_SP);
*ptr++ = ':';
ptr = mem2hex(&__debug_frame->sp, ptr, 4, 0);
*ptr++ = ';';
*ptr++ = 0;
gdbstub_send_packet(output_buffer); /* send it off... */
LEDS(0x5006);
packet_waiting:
gdbstub_get_mmu_state();
/* wait for input from remote GDB */
while (1) {
output_buffer[0] = 0;
LEDS(0x5007);
gdbstub_recv_packet(input_buffer);
LEDS(0x5600 | input_buffer[0]);
switch (input_buffer[0]) {
/* request repeat of last signal number */
case '?':
output_buffer[0] = 'S';
output_buffer[1] = hex_asc_hi(sigval);
output_buffer[2] = hex_asc_lo(sigval);
output_buffer[3] = 0;
break;
case 'd':
/* toggle debug flag */
break;
/* return the value of the CPU registers
* - GR0, GR1, GR2, GR3, GR4, GR5, GR6, GR7,
* - GR8, GR9, GR10, GR11, GR12, GR13, GR14, GR15,
* - GR16, GR17, GR18, GR19, GR20, GR21, GR22, GR23,
* - GR24, GR25, GR26, GR27, GR28, GR29, GR30, GR31,
* - GR32, GR33, GR34, GR35, GR36, GR37, GR38, GR39,
* - GR40, GR41, GR42, GR43, GR44, GR45, GR46, GR47,
* - GR48, GR49, GR50, GR51, GR52, GR53, GR54, GR55,
* - GR56, GR57, GR58, GR59, GR60, GR61, GR62, GR63,
* - FP0, FP1, FP2, FP3, FP4, FP5, FP6, FP7,
* - FP8, FP9, FP10, FP11, FP12, FP13, FP14, FP15,
* - FP16, FP17, FP18, FP19, FP20, FP21, FP22, FP23,
* - FP24, FP25, FP26, FP27, FP28, FP29, FP30, FP31,
* - FP32, FP33, FP34, FP35, FP36, FP37, FP38, FP39,
* - FP40, FP41, FP42, FP43, FP44, FP45, FP46, FP47,
* - FP48, FP49, FP50, FP51, FP52, FP53, FP54, FP55,
* - FP56, FP57, FP58, FP59, FP60, FP61, FP62, FP63,
* - PC, PSR, CCR, CCCR,
* - _X132, _X133, _X134
* - TBR, BRR, DBAR0, DBAR1, DBAR2, DBAR3,
* - _X141, _X142, _X143, _X144,
* - LR, LCR
*/
case 'g':
zero = 0;
ptr = output_buffer;
/* deal with GR0, GR1-GR27, GR28-GR31, GR32-GR63 */
ptr = mem2hex(&zero, ptr, 4, 0);
for (loop = 1; loop <= 27; loop++)
ptr = mem2hex(&__debug_user_context->i.gr[loop], ptr, 4, 0);
temp = (unsigned long) __frame;
ptr = mem2hex(&temp, ptr, 4, 0);
ptr = mem2hex(&__debug_user_context->i.gr[29], ptr, 4, 0);
ptr = mem2hex(&__debug_user_context->i.gr[30], ptr, 4, 0);
#ifdef CONFIG_MMU
ptr = mem2hex(&__debug_user_context->i.gr[31], ptr, 4, 0);
#else
temp = (unsigned long) __debug_frame;
ptr = mem2hex(&temp, ptr, 4, 0);
#endif
for (loop = 32; loop <= 63; loop++)
ptr = mem2hex(&__debug_user_context->i.gr[loop], ptr, 4, 0);
/* deal with FR0-FR63 */
for (loop = 0; loop <= 63; loop++)
ptr = mem2hex(&__debug_user_context->f.fr[loop], ptr, 4, 0);
/* deal with special registers */
ptr = mem2hex(&__debug_frame->pc, ptr, 4, 0);
ptr = mem2hex(&__debug_frame->psr, ptr, 4, 0);
ptr = mem2hex(&__debug_frame->ccr, ptr, 4, 0);
ptr = mem2hex(&__debug_frame->cccr, ptr, 4, 0);
ptr = mem2hex(&zero, ptr, 4, 0);
ptr = mem2hex(&zero, ptr, 4, 0);
ptr = mem2hex(&zero, ptr, 4, 0);
ptr = mem2hex(&__debug_frame->tbr, ptr, 4, 0);
ptr = mem2hex(&__debug_status.brr , ptr, 4, 0);
asm volatile("movsg dbar0,%0" : "=r"(dbar));
ptr = mem2hex(&dbar, ptr, 4, 0);
asm volatile("movsg dbar1,%0" : "=r"(dbar));
ptr = mem2hex(&dbar, ptr, 4, 0);
asm volatile("movsg dbar2,%0" : "=r"(dbar));
ptr = mem2hex(&dbar, ptr, 4, 0);
asm volatile("movsg dbar3,%0" : "=r"(dbar));
ptr = mem2hex(&dbar, ptr, 4, 0);
asm volatile("movsg scr0,%0" : "=r"(dbar));
ptr = mem2hex(&dbar, ptr, 4, 0);
asm volatile("movsg scr1,%0" : "=r"(dbar));
ptr = mem2hex(&dbar, ptr, 4, 0);
asm volatile("movsg scr2,%0" : "=r"(dbar));
ptr = mem2hex(&dbar, ptr, 4, 0);
asm volatile("movsg scr3,%0" : "=r"(dbar));
ptr = mem2hex(&dbar, ptr, 4, 0);
ptr = mem2hex(&__debug_frame->lr, ptr, 4, 0);
ptr = mem2hex(&__debug_frame->lcr, ptr, 4, 0);
ptr = mem2hex(&__debug_frame->iacc0, ptr, 8, 0);
ptr = mem2hex(&__debug_user_context->f.fsr[0], ptr, 4, 0);
for (loop = 0; loop <= 7; loop++)
ptr = mem2hex(&__debug_user_context->f.acc[loop], ptr, 4, 0);
ptr = mem2hex(&__debug_user_context->f.accg, ptr, 8, 0);
for (loop = 0; loop <= 1; loop++)
ptr = mem2hex(&__debug_user_context->f.msr[loop], ptr, 4, 0);
ptr = mem2hex(&__debug_frame->gner0, ptr, 4, 0);
ptr = mem2hex(&__debug_frame->gner1, ptr, 4, 0);
ptr = mem2hex(&__debug_user_context->f.fner[0], ptr, 4, 0);
ptr = mem2hex(&__debug_user_context->f.fner[1], ptr, 4, 0);
break;
/* set the values of the CPU registers */
case 'G':
ptr = &input_buffer[1];
/* deal with GR0, GR1-GR27, GR28-GR31, GR32-GR63 */
ptr = hex2mem(ptr, &temp, 4);
for (loop = 1; loop <= 27; loop++)
ptr = hex2mem(ptr, &__debug_user_context->i.gr[loop], 4);
ptr = hex2mem(ptr, &temp, 4);
__frame = (struct pt_regs *) temp;
ptr = hex2mem(ptr, &__debug_frame->gr29, 4);
ptr = hex2mem(ptr, &__debug_frame->gr30, 4);
#ifdef CONFIG_MMU
ptr = hex2mem(ptr, &__debug_frame->gr31, 4);
#else
ptr = hex2mem(ptr, &temp, 4);
#endif
for (loop = 32; loop <= 63; loop++)
ptr = hex2mem(ptr, &__debug_user_context->i.gr[loop], 4);
/* deal with FR0-FR63 */
for (loop = 0; loop <= 63; loop++)
ptr = mem2hex(&__debug_user_context->f.fr[loop], ptr, 4, 0);
/* deal with special registers */
ptr = hex2mem(ptr, &__debug_frame->pc, 4);
ptr = hex2mem(ptr, &__debug_frame->psr, 4);
ptr = hex2mem(ptr, &__debug_frame->ccr, 4);
ptr = hex2mem(ptr, &__debug_frame->cccr,4);
for (loop = 132; loop <= 140; loop++)
ptr = hex2mem(ptr, &temp, 4);
ptr = hex2mem(ptr, &temp, 4);
asm volatile("movgs %0,scr0" :: "r"(temp));
ptr = hex2mem(ptr, &temp, 4);
asm volatile("movgs %0,scr1" :: "r"(temp));
ptr = hex2mem(ptr, &temp, 4);
asm volatile("movgs %0,scr2" :: "r"(temp));
ptr = hex2mem(ptr, &temp, 4);
asm volatile("movgs %0,scr3" :: "r"(temp));
ptr = hex2mem(ptr, &__debug_frame->lr, 4);
ptr = hex2mem(ptr, &__debug_frame->lcr, 4);
ptr = hex2mem(ptr, &__debug_frame->iacc0, 8);
ptr = hex2mem(ptr, &__debug_user_context->f.fsr[0], 4);
for (loop = 0; loop <= 7; loop++)
ptr = hex2mem(ptr, &__debug_user_context->f.acc[loop], 4);
ptr = hex2mem(ptr, &__debug_user_context->f.accg, 8);
for (loop = 0; loop <= 1; loop++)
ptr = hex2mem(ptr, &__debug_user_context->f.msr[loop], 4);
ptr = hex2mem(ptr, &__debug_frame->gner0, 4);
ptr = hex2mem(ptr, &__debug_frame->gner1, 4);
ptr = hex2mem(ptr, &__debug_user_context->f.fner[0], 4);
ptr = hex2mem(ptr, &__debug_user_context->f.fner[1], 4);
gdbstub_strcpy(output_buffer,"OK");
break;
/* mAA..AA,LLLL Read LLLL bytes at address AA..AA */
case 'm':
ptr = &input_buffer[1];
if (hexToInt(&ptr, &addr) &&
*ptr++ == ',' &&
hexToInt(&ptr, &length)
) {
if (mem2hex((char *)addr, output_buffer, length, 1))
break;
gdbstub_strcpy (output_buffer, "E03");
}
else {
gdbstub_strcpy(output_buffer,"E01");
}
break;
/* MAA..AA,LLLL: Write LLLL bytes at address AA.AA return OK */
case 'M':
ptr = &input_buffer[1];
if (hexToInt(&ptr, &addr) &&
*ptr++ == ',' &&
hexToInt(&ptr, &length) &&
*ptr++ == ':'
) {
if (hex2mem(ptr, (char *)addr, length)) {
gdbstub_strcpy(output_buffer, "OK");
}
else {
gdbstub_strcpy(output_buffer, "E03");
}
}
else
gdbstub_strcpy(output_buffer, "E02");
flush_cache = 1;
break;
/* pNN: Read value of reg N and return it */
case 'p':
/* return no value, indicating that we don't support
* this command and that gdb should use 'g' instead */
break;
/* PNN,=RRRRRRRR: Write value R to reg N return OK */
case 'P':
ptr = &input_buffer[1];
if (!hexToInt(&ptr, &addr) ||
*ptr++ != '=' ||
!hexToInt(&ptr, &temp)
) {
gdbstub_strcpy(output_buffer, "E01");
break;
}
temp2 = 1;
switch (addr) {
case GDB_REG_GR(0):
break;
case GDB_REG_GR(1) ... GDB_REG_GR(63):
__debug_user_context->i.gr[addr - GDB_REG_GR(0)] = temp;
break;
case GDB_REG_FR(0) ... GDB_REG_FR(63):
__debug_user_context->f.fr[addr - GDB_REG_FR(0)] = temp;
break;
case GDB_REG_PC:
__debug_user_context->i.pc = temp;
break;
case GDB_REG_PSR:
__debug_user_context->i.psr = temp;
break;
case GDB_REG_CCR:
__debug_user_context->i.ccr = temp;
break;
case GDB_REG_CCCR:
__debug_user_context->i.cccr = temp;
break;
case GDB_REG_BRR:
__debug_status.brr = temp;
break;
case GDB_REG_LR:
__debug_user_context->i.lr = temp;
break;
case GDB_REG_LCR:
__debug_user_context->i.lcr = temp;
break;
case GDB_REG_FSR0:
__debug_user_context->f.fsr[0] = temp;
break;
case GDB_REG_ACC(0) ... GDB_REG_ACC(7):
__debug_user_context->f.acc[addr - GDB_REG_ACC(0)] = temp;
break;
case GDB_REG_ACCG(0):
*(uint32_t *) &__debug_user_context->f.accg[0] = temp;
break;
case GDB_REG_ACCG(4):
*(uint32_t *) &__debug_user_context->f.accg[4] = temp;
break;
case GDB_REG_MSR(0) ... GDB_REG_MSR(1):
__debug_user_context->f.msr[addr - GDB_REG_MSR(0)] = temp;
break;
case GDB_REG_GNER(0) ... GDB_REG_GNER(1):
__debug_user_context->i.gner[addr - GDB_REG_GNER(0)] = temp;
break;
case GDB_REG_FNER(0) ... GDB_REG_FNER(1):
__debug_user_context->f.fner[addr - GDB_REG_FNER(0)] = temp;
break;
default:
temp2 = 0;
break;
}
if (temp2) {
gdbstub_strcpy(output_buffer, "OK");
}
else {
gdbstub_strcpy(output_buffer, "E02");
}
break;
/* cAA..AA Continue at address AA..AA(optional) */
case 'c':
/* try to read optional parameter, pc unchanged if no parm */
ptr = &input_buffer[1];
if (hexToInt(&ptr, &addr))
__debug_frame->pc = addr;
goto done;
/* kill the program */
case 'k' :
goto done; /* just continue */
/* detach */
case 'D':
gdbstub_strcpy(output_buffer, "OK");
break;
/* reset the whole machine (FIXME: system dependent) */
case 'r':
break;
/* step to next instruction */
case 's':
__debug_regs->dcr |= DCR_SE;
__debug_status.dcr |= DCR_SE;
goto done;
/* extended command */
case 'v':
if (strcmp(input_buffer, "vCont?") == 0) {
output_buffer[0] = 0;
break;
}
goto unsupported_cmd;
/* set baud rate (bBB) */
case 'b':
ptr = &input_buffer[1];
if (!hexToInt(&ptr, &temp)) {
gdbstub_strcpy(output_buffer,"B01");
break;
}
if (temp) {
/* ack before changing speed */
gdbstub_send_packet("OK");
gdbstub_set_baud(temp);
}
break;
/* set breakpoint */
case 'Z':
ptr = &input_buffer[1];
if (!hexToInt(&ptr,&temp) || *ptr++ != ',' ||
!hexToInt(&ptr,&addr) || *ptr++ != ',' ||
!hexToInt(&ptr,&length)
) {
gdbstub_strcpy(output_buffer,"E01");
break;
}
if (temp >= 5) {
gdbstub_strcpy(output_buffer,"E03");
break;
}
if (gdbstub_set_breakpoint(temp, addr, length) < 0) {
gdbstub_strcpy(output_buffer,"E03");
break;
}
if (temp == 0)
flush_cache = 1; /* soft bkpt by modified memory */
gdbstub_strcpy(output_buffer,"OK");
break;
/* clear breakpoint */
case 'z':
ptr = &input_buffer[1];
if (!hexToInt(&ptr,&temp) || *ptr++ != ',' ||
!hexToInt(&ptr,&addr) || *ptr++ != ',' ||
!hexToInt(&ptr,&length)
) {
gdbstub_strcpy(output_buffer,"E01");
break;
}
if (temp >= 5) {
gdbstub_strcpy(output_buffer,"E03");
break;
}
if (gdbstub_clear_breakpoint(temp, addr, length) < 0) {
gdbstub_strcpy(output_buffer,"E03");
break;
}
if (temp == 0)
flush_cache = 1; /* soft bkpt by modified memory */
gdbstub_strcpy(output_buffer,"OK");
break;
/* Thread-setting packet */
case 'H':
gdbstub_strcpy(output_buffer, "OK");
break;
case 'q':
gdbstub_handle_query();
break;
default:
unsupported_cmd:
gdbstub_proto("### GDB Unsupported Cmd '%s'\n",input_buffer);
gdbstub_strcpy(output_buffer,"E01");
break;
}
/* reply to the request */
LEDS(0x5009);
gdbstub_send_packet(output_buffer);
}
done:
restore_user_regs(&__debug_frame0->uc);
//gdbstub_dump_debugregs();
//gdbstub_printk("<-- gdbstub() %08x\n", __debug_frame->pc);
/* need to flush the instruction cache before resuming, as we may have
* deposited a breakpoint, and the icache probably has no way of
* knowing that a data ref to some location may have changed something
* that is in the instruction cache. NB: We flush both caches, just to
* be sure...
*/
/* note: flushing the icache will clobber EAR0 on the FR451 */
if (flush_cache)
gdbstub_purge_cache();
LEDS(0x5666);
} /* end gdbstub() */
/*****************************************************************************/
/*
* initialise the GDB stub
*/
void __init gdbstub_init(void)
{
#ifdef CONFIG_GDBSTUB_IMMEDIATE
unsigned char ch;
int ret;
#endif
gdbstub_printk("%s", gdbstub_banner);
gdbstub_io_init();
/* try to talk to GDB (or anyone insane enough to want to type GDB protocol by hand) */
gdbstub_proto("### GDB Tx ACK\n");
gdbstub_tx_char('+'); /* 'hello world' */
#ifdef CONFIG_GDBSTUB_IMMEDIATE
gdbstub_printk("GDB Stub waiting for packet\n");
/*
* In case GDB is started before us, ack any packets
* (presumably "$?#xx") sitting there.
*/
do { gdbstub_rx_char(&ch, 0); } while (ch != '$');
do { gdbstub_rx_char(&ch, 0); } while (ch != '#');
do { ret = gdbstub_rx_char(&ch, 0); } while (ret != 0); /* eat first csum byte */
do { ret = gdbstub_rx_char(&ch, 0); } while (ret != 0); /* eat second csum byte */
gdbstub_proto("### GDB Tx NAK\n");
gdbstub_tx_char('-'); /* nak it */
#else
gdbstub_printk("GDB Stub set\n");
#endif
#if 0
/* send banner */
ptr = output_buffer;
*ptr++ = 'O';
ptr = mem2hex(gdbstub_banner, ptr, sizeof(gdbstub_banner) - 1, 0);
gdbstub_send_packet(output_buffer);
#endif
#if defined(CONFIG_GDB_CONSOLE) && defined(CONFIG_GDBSTUB_IMMEDIATE)
register_console(&gdbstub_console);
#endif
} /* end gdbstub_init() */
/*****************************************************************************/
/*
* register the console at a more appropriate time
*/
#if defined (CONFIG_GDB_CONSOLE) && !defined(CONFIG_GDBSTUB_IMMEDIATE)
static int __init gdbstub_postinit(void)
{
printk("registering console\n");
register_console(&gdbstub_console);
return 0;
} /* end gdbstub_postinit() */
__initcall(gdbstub_postinit);
#endif
/*****************************************************************************/
/*
* send an exit message to GDB
*/
void gdbstub_exit(int status)
{
unsigned char checksum;
int count;
unsigned char ch;
sprintf(output_buffer,"W%02x",status&0xff);
gdbstub_tx_char('$');
checksum = 0;
count = 0;
while ((ch = output_buffer[count]) != 0) {
gdbstub_tx_char(ch);
checksum += ch;
count += 1;
}
gdbstub_tx_char('#');
gdbstub_tx_char(hex_asc_hi(checksum));
gdbstub_tx_char(hex_asc_lo(checksum));
/* make sure the output is flushed, or else RedBoot might clobber it */
gdbstub_tx_char('-');
gdbstub_tx_flush();
} /* end gdbstub_exit() */
/*****************************************************************************/
/*
* GDB wants to call malloc() and free() to allocate memory for calling kernel
* functions directly from its command line
*/
static void *malloc(size_t size) __maybe_unused;
static void *malloc(size_t size)
{
return kmalloc(size, GFP_ATOMIC);
}
static void free(void *p) __maybe_unused;
static void free(void *p)
{
kfree(p);
}
static uint32_t ___get_HSR0(void) __maybe_unused;
static uint32_t ___get_HSR0(void)
{
return __get_HSR(0);
}
static uint32_t ___set_HSR0(uint32_t x) __maybe_unused;
static uint32_t ___set_HSR0(uint32_t x)
{
__set_HSR(0, x);
return __get_HSR(0);
}
| gpl-2.0 |
DoriKal/linux | arch/frv/kernel/gdb-stub.c | 9090 | 56303 | /* gdb-stub.c: FRV GDB stub
*
* Copyright (C) 2003,4 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
* - Derived from Linux/MIPS version, Copyright (C) 1995 Andreas Busse
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
/*
* To enable debugger support, two things need to happen. One, a
* call to set_debug_traps() is necessary in order to allow any breakpoints
* or error conditions to be properly intercepted and reported to gdb.
* Two, a breakpoint needs to be generated to begin communication. This
* is most easily accomplished by a call to breakpoint(). Breakpoint()
* simulates a breakpoint by executing a BREAK instruction.
*
*
* The following gdb commands are supported:
*
* command function Return value
*
* g return the value of the CPU registers hex data or ENN
* G set the value of the CPU registers OK or ENN
*
* mAA..AA,LLLL Read LLLL bytes at address AA..AA hex data or ENN
* MAA..AA,LLLL: Write LLLL bytes at address AA.AA OK or ENN
*
* c Resume at current address SNN ( signal NN)
* cAA..AA Continue at address AA..AA SNN
*
* s Step one instruction SNN
* sAA..AA Step one instruction from AA..AA SNN
*
* k kill
*
* ? What was the last sigval ? SNN (signal NN)
*
* bBB..BB Set baud rate to BB..BB OK or BNN, then sets
* baud rate
*
* All commands and responses are sent with a packet which includes a
* checksum. A packet consists of
*
* $<packet info>#<checksum>.
*
* where
* <packet info> :: <characters representing the command or response>
* <checksum> :: < two hex digits computed as modulo 256 sum of <packetinfo>>
*
* When a packet is received, it is first acknowledged with either '+' or '-'.
* '+' indicates a successful transfer. '-' indicates a failed transfer.
*
* Example:
*
* Host: Reply:
* $m0,10#2a +$00010203040506070809101112131415#42
*
*
* ==============
* MORE EXAMPLES:
* ==============
*
* For reference -- the following are the steps that one
* company took (RidgeRun Inc) to get remote gdb debugging
* going. In this scenario the host machine was a PC and the
* target platform was a Galileo EVB64120A MIPS evaluation
* board.
*
* Step 1:
* First download gdb-5.0.tar.gz from the internet.
* and then build/install the package.
*
* Example:
* $ tar zxf gdb-5.0.tar.gz
* $ cd gdb-5.0
* $ ./configure --target=frv-elf-gdb
* $ make
* $ frv-elf-gdb
*
* Step 2:
* Configure linux for remote debugging and build it.
*
* Example:
* $ cd ~/linux
* $ make menuconfig <go to "Kernel Hacking" and turn on remote debugging>
* $ make vmlinux
*
* Step 3:
* Download the kernel to the remote target and start
* the kernel running. It will promptly halt and wait
* for the host gdb session to connect. It does this
* since the "Kernel Hacking" option has defined
* CONFIG_REMOTE_DEBUG which in turn enables your calls
* to:
* set_debug_traps();
* breakpoint();
*
* Step 4:
* Start the gdb session on the host.
*
* Example:
* $ frv-elf-gdb vmlinux
* (gdb) set remotebaud 115200
* (gdb) target remote /dev/ttyS1
* ...at this point you are connected to
* the remote target and can use gdb
* in the normal fasion. Setting
* breakpoints, single stepping,
* printing variables, etc.
*
*/
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/console.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/nmi.h>
#include <asm/asm-offsets.h>
#include <asm/pgtable.h>
#include <asm/gdb-stub.h>
#define LEDS(x) do { /* *(u32*)0xe1200004 = ~(x); mb(); */ } while(0)
#undef GDBSTUB_DEBUG_PROTOCOL
extern void debug_to_serial(const char *p, int n);
extern void gdbstub_console_write(struct console *co, const char *p, unsigned n);
extern volatile uint32_t __break_error_detect[3]; /* ESFR1, ESR15, EAR15 */
struct __debug_amr {
unsigned long L, P;
} __attribute__((aligned(8)));
struct __debug_mmu {
struct {
unsigned long hsr0, pcsr, esr0, ear0, epcr0;
#ifdef CONFIG_MMU
unsigned long tplr, tppr, tpxr, cxnr;
#endif
} regs;
struct __debug_amr iamr[16];
struct __debug_amr damr[16];
#ifdef CONFIG_MMU
struct __debug_amr tlb[64*2];
#endif
};
static struct __debug_mmu __debug_mmu;
/*
* BUFMAX defines the maximum number of characters in inbound/outbound buffers
* at least NUMREGBYTES*2 are needed for register packets
*/
#define BUFMAX 2048
#define BREAK_INSN 0x801000c0 /* use "break" as bkpt */
static const char gdbstub_banner[] = "Linux/FR-V GDB Stub (c) RedHat 2003\n";
volatile u8 gdbstub_rx_buffer[PAGE_SIZE] __attribute__((aligned(PAGE_SIZE)));
volatile u32 gdbstub_rx_inp = 0;
volatile u32 gdbstub_rx_outp = 0;
volatile u8 gdbstub_rx_overflow = 0;
u8 gdbstub_rx_unget = 0;
/* set with GDB whilst running to permit step through exceptions */
extern volatile u32 __attribute__((section(".bss"))) gdbstub_trace_through_exceptions;
static char input_buffer[BUFMAX];
static char output_buffer[BUFMAX];
static const char *regnames[] = {
"PSR ", "ISR ", "CCR ", "CCCR",
"LR ", "LCR ", "PC ", "_stt",
"sys ", "GR8*", "GNE0", "GNE1",
"IACH", "IACL",
"TBR ", "SP ", "FP ", "GR3 ",
"GR4 ", "GR5 ", "GR6 ", "GR7 ",
"GR8 ", "GR9 ", "GR10", "GR11",
"GR12", "GR13", "GR14", "GR15",
"GR16", "GR17", "GR18", "GR19",
"GR20", "GR21", "GR22", "GR23",
"GR24", "GR25", "GR26", "GR27",
"EFRM", "CURR", "GR30", "BFRM"
};
struct gdbstub_bkpt {
unsigned long addr; /* address of breakpoint */
unsigned len; /* size of breakpoint */
uint32_t originsns[7]; /* original instructions */
};
static struct gdbstub_bkpt gdbstub_bkpts[256];
/*
* local prototypes
*/
static void gdbstub_recv_packet(char *buffer);
static int gdbstub_send_packet(char *buffer);
static int gdbstub_compute_signal(unsigned long tbr);
static int hex(unsigned char ch);
static int hexToInt(char **ptr, unsigned long *intValue);
static unsigned char *mem2hex(const void *mem, char *buf, int count, int may_fault);
static char *hex2mem(const char *buf, void *_mem, int count);
/*
* Convert ch from a hex digit to an int
*/
static int hex(unsigned char ch)
{
if (ch >= 'a' && ch <= 'f')
return ch-'a'+10;
if (ch >= '0' && ch <= '9')
return ch-'0';
if (ch >= 'A' && ch <= 'F')
return ch-'A'+10;
return -1;
}
void gdbstub_printk(const char *fmt, ...)
{
static char buf[1024];
va_list args;
int len;
/* Emit the output into the temporary buffer */
va_start(args, fmt);
len = vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
debug_to_serial(buf, len);
}
static inline char *gdbstub_strcpy(char *dst, const char *src)
{
int loop = 0;
while ((dst[loop] = src[loop]))
loop++;
return dst;
}
static void gdbstub_purge_cache(void)
{
asm volatile(" dcef @(gr0,gr0),#1 \n"
" icei @(gr0,gr0),#1 \n"
" membar \n"
" bar \n"
);
}
/*****************************************************************************/
/*
* scan for the sequence $<data>#<checksum>
*/
static void gdbstub_recv_packet(char *buffer)
{
unsigned char checksum;
unsigned char xmitcsum;
unsigned char ch;
int count, i, ret, error;
for (;;) {
/* wait around for the start character, ignore all other characters */
do {
gdbstub_rx_char(&ch, 0);
} while (ch != '$');
checksum = 0;
xmitcsum = -1;
count = 0;
error = 0;
/* now, read until a # or end of buffer is found */
while (count < BUFMAX) {
ret = gdbstub_rx_char(&ch, 0);
if (ret < 0)
error = ret;
if (ch == '#')
break;
checksum += ch;
buffer[count] = ch;
count++;
}
if (error == -EIO) {
gdbstub_proto("### GDB Rx Error - Skipping packet ###\n");
gdbstub_proto("### GDB Tx NAK\n");
gdbstub_tx_char('-');
continue;
}
if (count >= BUFMAX || error)
continue;
buffer[count] = 0;
/* read the checksum */
ret = gdbstub_rx_char(&ch, 0);
if (ret < 0)
error = ret;
xmitcsum = hex(ch) << 4;
ret = gdbstub_rx_char(&ch, 0);
if (ret < 0)
error = ret;
xmitcsum |= hex(ch);
if (error) {
if (error == -EIO)
gdbstub_proto("### GDB Rx Error - Skipping packet\n");
gdbstub_proto("### GDB Tx NAK\n");
gdbstub_tx_char('-');
continue;
}
/* check the checksum */
if (checksum != xmitcsum) {
gdbstub_proto("### GDB Tx NAK\n");
gdbstub_tx_char('-'); /* failed checksum */
continue;
}
gdbstub_proto("### GDB Rx '$%s#%02x' ###\n", buffer, checksum);
gdbstub_proto("### GDB Tx ACK\n");
gdbstub_tx_char('+'); /* successful transfer */
/* if a sequence char is present, reply the sequence ID */
if (buffer[2] == ':') {
gdbstub_tx_char(buffer[0]);
gdbstub_tx_char(buffer[1]);
/* remove sequence chars from buffer */
count = 0;
while (buffer[count]) count++;
for (i=3; i <= count; i++)
buffer[i - 3] = buffer[i];
}
break;
}
} /* end gdbstub_recv_packet() */
/*****************************************************************************/
/*
* send the packet in buffer.
* - return 0 if successfully ACK'd
* - return 1 if abandoned due to new incoming packet
*/
static int gdbstub_send_packet(char *buffer)
{
unsigned char checksum;
int count;
unsigned char ch;
/* $<packet info>#<checksum> */
gdbstub_proto("### GDB Tx '%s' ###\n", buffer);
do {
gdbstub_tx_char('$');
checksum = 0;
count = 0;
while ((ch = buffer[count]) != 0) {
gdbstub_tx_char(ch);
checksum += ch;
count += 1;
}
gdbstub_tx_char('#');
gdbstub_tx_char(hex_asc_hi(checksum));
gdbstub_tx_char(hex_asc_lo(checksum));
} while (gdbstub_rx_char(&ch,0),
#ifdef GDBSTUB_DEBUG_PROTOCOL
ch=='-' && (gdbstub_proto("### GDB Rx NAK\n"),0),
ch!='-' && ch!='+' && (gdbstub_proto("### GDB Rx ??? %02x\n",ch),0),
#endif
ch!='+' && ch!='$');
if (ch=='+') {
gdbstub_proto("### GDB Rx ACK\n");
return 0;
}
gdbstub_proto("### GDB Tx Abandoned\n");
gdbstub_rx_unget = ch;
return 1;
} /* end gdbstub_send_packet() */
/*
* While we find nice hex chars, build an int.
* Return number of chars processed.
*/
static int hexToInt(char **ptr, unsigned long *_value)
{
int count = 0, ch;
*_value = 0;
while (**ptr) {
ch = hex(**ptr);
if (ch < 0)
break;
*_value = (*_value << 4) | ((uint8_t) ch & 0xf);
count++;
(*ptr)++;
}
return count;
}
/*****************************************************************************/
/*
* probe an address to see whether it maps to anything
*/
static inline int gdbstub_addr_probe(const void *vaddr)
{
#ifdef CONFIG_MMU
unsigned long paddr;
asm("lrad %1,%0,#1,#0,#0" : "=r"(paddr) : "r"(vaddr));
if (!(paddr & xAMPRx_V))
return 0;
#endif
return 1;
} /* end gdbstub_addr_probe() */
#ifdef CONFIG_MMU
static unsigned long __saved_dampr, __saved_damlr;
static inline unsigned long gdbstub_virt_to_pte(unsigned long vaddr)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
unsigned long val, dampr5;
pgd = (pgd_t *) __get_DAMLR(3) + pgd_index(vaddr);
pud = pud_offset(pgd, vaddr);
pmd = pmd_offset(pud, vaddr);
if (pmd_bad(*pmd) || !pmd_present(*pmd))
return 0;
/* make sure dampr5 maps to the correct pmd */
dampr5 = __get_DAMPR(5);
val = pmd_val(*pmd);
__set_DAMPR(5, val | xAMPRx_L | xAMPRx_SS_16Kb | xAMPRx_S | xAMPRx_C | xAMPRx_V);
/* now its safe to access pmd */
pte = (pte_t *)__get_DAMLR(5) + __pte_index(vaddr);
if (pte_present(*pte))
val = pte_val(*pte);
else
val = 0;
/* restore original dampr5 */
__set_DAMPR(5, dampr5);
return val;
}
#endif
static inline int gdbstub_addr_map(const void *vaddr)
{
#ifdef CONFIG_MMU
unsigned long pte;
__saved_dampr = __get_DAMPR(2);
__saved_damlr = __get_DAMLR(2);
#endif
if (gdbstub_addr_probe(vaddr))
return 1;
#ifdef CONFIG_MMU
pte = gdbstub_virt_to_pte((unsigned long) vaddr);
if (pte) {
__set_DAMPR(2, pte);
__set_DAMLR(2, (unsigned long) vaddr & PAGE_MASK);
return 1;
}
#endif
return 0;
}
static inline void gdbstub_addr_unmap(void)
{
#ifdef CONFIG_MMU
__set_DAMPR(2, __saved_dampr);
__set_DAMLR(2, __saved_damlr);
#endif
}
/*
* access potentially dodgy memory through a potentially dodgy pointer
*/
static inline int gdbstub_read_dword(const void *addr, uint32_t *_res)
{
unsigned long brr;
uint32_t res;
if (!gdbstub_addr_map(addr))
return 0;
asm volatile(" movgs gr0,brr \n"
" ld%I2 %M2,%0 \n"
" movsg brr,%1 \n"
: "=r"(res), "=r"(brr)
: "m"(*(uint32_t *) addr));
*_res = res;
gdbstub_addr_unmap();
return likely(!brr);
}
static inline int gdbstub_write_dword(void *addr, uint32_t val)
{
unsigned long brr;
if (!gdbstub_addr_map(addr))
return 0;
asm volatile(" movgs gr0,brr \n"
" st%I2 %1,%M2 \n"
" movsg brr,%0 \n"
: "=r"(brr)
: "r"(val), "m"(*(uint32_t *) addr));
gdbstub_addr_unmap();
return likely(!brr);
}
static inline int gdbstub_read_word(const void *addr, uint16_t *_res)
{
unsigned long brr;
uint16_t res;
if (!gdbstub_addr_map(addr))
return 0;
asm volatile(" movgs gr0,brr \n"
" lduh%I2 %M2,%0 \n"
" movsg brr,%1 \n"
: "=r"(res), "=r"(brr)
: "m"(*(uint16_t *) addr));
*_res = res;
gdbstub_addr_unmap();
return likely(!brr);
}
static inline int gdbstub_write_word(void *addr, uint16_t val)
{
unsigned long brr;
if (!gdbstub_addr_map(addr))
return 0;
asm volatile(" movgs gr0,brr \n"
" sth%I2 %1,%M2 \n"
" movsg brr,%0 \n"
: "=r"(brr)
: "r"(val), "m"(*(uint16_t *) addr));
gdbstub_addr_unmap();
return likely(!brr);
}
static inline int gdbstub_read_byte(const void *addr, uint8_t *_res)
{
unsigned long brr;
uint8_t res;
if (!gdbstub_addr_map(addr))
return 0;
asm volatile(" movgs gr0,brr \n"
" ldub%I2 %M2,%0 \n"
" movsg brr,%1 \n"
: "=r"(res), "=r"(brr)
: "m"(*(uint8_t *) addr));
*_res = res;
gdbstub_addr_unmap();
return likely(!brr);
}
static inline int gdbstub_write_byte(void *addr, uint8_t val)
{
unsigned long brr;
if (!gdbstub_addr_map(addr))
return 0;
asm volatile(" movgs gr0,brr \n"
" stb%I2 %1,%M2 \n"
" movsg brr,%0 \n"
: "=r"(brr)
: "r"(val), "m"(*(uint8_t *) addr));
gdbstub_addr_unmap();
return likely(!brr);
}
static void __gdbstub_console_write(struct console *co, const char *p, unsigned n)
{
char outbuf[26];
int qty;
outbuf[0] = 'O';
while (n > 0) {
qty = 1;
while (n > 0 && qty < 20) {
mem2hex(p, outbuf + qty, 2, 0);
qty += 2;
if (*p == 0x0a) {
outbuf[qty++] = '0';
outbuf[qty++] = 'd';
}
p++;
n--;
}
outbuf[qty] = 0;
gdbstub_send_packet(outbuf);
}
}
#if 0
void debug_to_serial(const char *p, int n)
{
gdbstub_console_write(NULL,p,n);
}
#endif
#ifdef CONFIG_GDB_CONSOLE
static struct console gdbstub_console = {
.name = "gdb",
.write = gdbstub_console_write, /* in break.S */
.flags = CON_PRINTBUFFER,
.index = -1,
};
#endif
/*****************************************************************************/
/*
* Convert the memory pointed to by mem into hex, placing result in buf.
* - if successful, return a pointer to the last char put in buf (NUL)
* - in case of mem fault, return NULL
* may_fault is non-zero if we are reading from arbitrary memory, but is currently
* not used.
*/
static unsigned char *mem2hex(const void *_mem, char *buf, int count, int may_fault)
{
const uint8_t *mem = _mem;
uint8_t ch[4] __attribute__((aligned(4)));
if ((uint32_t)mem&1 && count>=1) {
if (!gdbstub_read_byte(mem,ch))
return NULL;
buf = hex_byte_pack(buf, ch[0]);
mem++;
count--;
}
if ((uint32_t)mem&3 && count>=2) {
if (!gdbstub_read_word(mem,(uint16_t *)ch))
return NULL;
buf = hex_byte_pack(buf, ch[0]);
buf = hex_byte_pack(buf, ch[1]);
mem += 2;
count -= 2;
}
while (count>=4) {
if (!gdbstub_read_dword(mem,(uint32_t *)ch))
return NULL;
buf = hex_byte_pack(buf, ch[0]);
buf = hex_byte_pack(buf, ch[1]);
buf = hex_byte_pack(buf, ch[2]);
buf = hex_byte_pack(buf, ch[3]);
mem += 4;
count -= 4;
}
if (count>=2) {
if (!gdbstub_read_word(mem,(uint16_t *)ch))
return NULL;
buf = hex_byte_pack(buf, ch[0]);
buf = hex_byte_pack(buf, ch[1]);
mem += 2;
count -= 2;
}
if (count>=1) {
if (!gdbstub_read_byte(mem,ch))
return NULL;
buf = hex_byte_pack(buf, ch[0]);
}
*buf = 0;
return buf;
} /* end mem2hex() */
/*****************************************************************************/
/*
* convert the hex array pointed to by buf into binary to be placed in mem
* return a pointer to the character AFTER the last byte of buffer consumed
*/
static char *hex2mem(const char *buf, void *_mem, int count)
{
uint8_t *mem = _mem;
union {
uint32_t l;
uint16_t w;
uint8_t b[4];
} ch;
if ((u32)mem&1 && count>=1) {
ch.b[0] = hex(*buf++) << 4;
ch.b[0] |= hex(*buf++);
if (!gdbstub_write_byte(mem,ch.b[0]))
return NULL;
mem++;
count--;
}
if ((u32)mem&3 && count>=2) {
ch.b[0] = hex(*buf++) << 4;
ch.b[0] |= hex(*buf++);
ch.b[1] = hex(*buf++) << 4;
ch.b[1] |= hex(*buf++);
if (!gdbstub_write_word(mem,ch.w))
return NULL;
mem += 2;
count -= 2;
}
while (count>=4) {
ch.b[0] = hex(*buf++) << 4;
ch.b[0] |= hex(*buf++);
ch.b[1] = hex(*buf++) << 4;
ch.b[1] |= hex(*buf++);
ch.b[2] = hex(*buf++) << 4;
ch.b[2] |= hex(*buf++);
ch.b[3] = hex(*buf++) << 4;
ch.b[3] |= hex(*buf++);
if (!gdbstub_write_dword(mem,ch.l))
return NULL;
mem += 4;
count -= 4;
}
if (count>=2) {
ch.b[0] = hex(*buf++) << 4;
ch.b[0] |= hex(*buf++);
ch.b[1] = hex(*buf++) << 4;
ch.b[1] |= hex(*buf++);
if (!gdbstub_write_word(mem,ch.w))
return NULL;
mem += 2;
count -= 2;
}
if (count>=1) {
ch.b[0] = hex(*buf++) << 4;
ch.b[0] |= hex(*buf++);
if (!gdbstub_write_byte(mem,ch.b[0]))
return NULL;
}
return (char *) buf;
} /* end hex2mem() */
/*****************************************************************************/
/*
* This table contains the mapping between FRV TBR.TT exception codes,
* and signals, which are primarily what GDB understands. It also
* indicates which hardware traps we need to commandeer when
* initializing the stub.
*/
static const struct brr_to_sig_map {
unsigned long brr_mask; /* BRR bitmask */
unsigned long tbr_tt; /* TBR.TT code (in BRR.EBTT) */
unsigned int signo; /* Signal that we map this into */
} brr_to_sig_map[] = {
{ BRR_EB, TBR_TT_INSTR_ACC_ERROR, SIGSEGV },
{ BRR_EB, TBR_TT_ILLEGAL_INSTR, SIGILL },
{ BRR_EB, TBR_TT_PRIV_INSTR, SIGILL },
{ BRR_EB, TBR_TT_MP_EXCEPTION, SIGFPE },
{ BRR_EB, TBR_TT_DATA_ACC_ERROR, SIGSEGV },
{ BRR_EB, TBR_TT_DATA_STR_ERROR, SIGSEGV },
{ BRR_EB, TBR_TT_DIVISION_EXCEP, SIGFPE },
{ BRR_EB, TBR_TT_COMPOUND_EXCEP, SIGSEGV },
{ BRR_EB, TBR_TT_INTERRUPT_13, SIGALRM }, /* watchdog */
{ BRR_EB, TBR_TT_INTERRUPT_14, SIGINT }, /* GDB serial */
{ BRR_EB, TBR_TT_INTERRUPT_15, SIGQUIT }, /* NMI */
{ BRR_CB, 0, SIGUSR1 },
{ BRR_TB, 0, SIGUSR2 },
{ BRR_DBNEx, 0, SIGTRAP },
{ BRR_DBx, 0, SIGTRAP }, /* h/w watchpoint */
{ BRR_IBx, 0, SIGTRAP }, /* h/w breakpoint */
{ BRR_CBB, 0, SIGTRAP },
{ BRR_SB, 0, SIGTRAP },
{ BRR_ST, 0, SIGTRAP }, /* single step */
{ 0, 0, SIGHUP } /* default */
};
/*****************************************************************************/
/*
* convert the FRV BRR register contents into a UNIX signal number
*/
static inline int gdbstub_compute_signal(unsigned long brr)
{
const struct brr_to_sig_map *map;
unsigned long tbr = (brr & BRR_EBTT) >> 12;
for (map = brr_to_sig_map; map->brr_mask; map++)
if (map->brr_mask & brr)
if (!map->tbr_tt || map->tbr_tt == tbr)
break;
return map->signo;
} /* end gdbstub_compute_signal() */
/*****************************************************************************/
/*
* set a software breakpoint or a hardware breakpoint or watchpoint
*/
static int gdbstub_set_breakpoint(unsigned long type, unsigned long addr, unsigned long len)
{
unsigned long tmp;
int bkpt, loop, xloop;
union {
struct {
unsigned long mask0, mask1;
};
uint8_t bytes[8];
} dbmr;
//gdbstub_printk("setbkpt(%ld,%08lx,%ld)\n", type, addr, len);
switch (type) {
/* set software breakpoint */
case 0:
if (addr & 3 || len > 7*4)
return -EINVAL;
for (bkpt = 255; bkpt >= 0; bkpt--)
if (!gdbstub_bkpts[bkpt].addr)
break;
if (bkpt < 0)
return -ENOSPC;
for (loop = 0; loop < len/4; loop++)
if (!gdbstub_read_dword(&((uint32_t *) addr)[loop],
&gdbstub_bkpts[bkpt].originsns[loop]))
return -EFAULT;
for (loop = 0; loop < len/4; loop++)
if (!gdbstub_write_dword(&((uint32_t *) addr)[loop],
BREAK_INSN)
) {
/* need to undo the changes if possible */
for (xloop = 0; xloop < loop; xloop++)
gdbstub_write_dword(&((uint32_t *) addr)[xloop],
gdbstub_bkpts[bkpt].originsns[xloop]);
return -EFAULT;
}
gdbstub_bkpts[bkpt].addr = addr;
gdbstub_bkpts[bkpt].len = len;
#if 0
gdbstub_printk("Set BKPT[%02x]: %08lx #%d {%04x, %04x} -> { %04x, %04x }\n",
bkpt,
gdbstub_bkpts[bkpt].addr,
gdbstub_bkpts[bkpt].len,
gdbstub_bkpts[bkpt].originsns[0],
gdbstub_bkpts[bkpt].originsns[1],
((uint32_t *) addr)[0],
((uint32_t *) addr)[1]
);
#endif
return 0;
/* set hardware breakpoint */
case 1:
if (addr & 3 || len != 4)
return -EINVAL;
if (!(__debug_regs->dcr & DCR_IBE0)) {
//gdbstub_printk("set h/w break 0: %08lx\n", addr);
__debug_regs->dcr |= DCR_IBE0;
__debug_regs->ibar[0] = addr;
asm volatile("movgs %0,ibar0" : : "r"(addr));
return 0;
}
if (!(__debug_regs->dcr & DCR_IBE1)) {
//gdbstub_printk("set h/w break 1: %08lx\n", addr);
__debug_regs->dcr |= DCR_IBE1;
__debug_regs->ibar[1] = addr;
asm volatile("movgs %0,ibar1" : : "r"(addr));
return 0;
}
if (!(__debug_regs->dcr & DCR_IBE2)) {
//gdbstub_printk("set h/w break 2: %08lx\n", addr);
__debug_regs->dcr |= DCR_IBE2;
__debug_regs->ibar[2] = addr;
asm volatile("movgs %0,ibar2" : : "r"(addr));
return 0;
}
if (!(__debug_regs->dcr & DCR_IBE3)) {
//gdbstub_printk("set h/w break 3: %08lx\n", addr);
__debug_regs->dcr |= DCR_IBE3;
__debug_regs->ibar[3] = addr;
asm volatile("movgs %0,ibar3" : : "r"(addr));
return 0;
}
return -ENOSPC;
/* set data read/write/access watchpoint */
case 2:
case 3:
case 4:
if ((addr & ~7) != ((addr + len - 1) & ~7))
return -EINVAL;
tmp = addr & 7;
memset(dbmr.bytes, 0xff, sizeof(dbmr.bytes));
for (loop = 0; loop < len; loop++)
dbmr.bytes[tmp + loop] = 0;
addr &= ~7;
if (!(__debug_regs->dcr & (DCR_DRBE0|DCR_DWBE0))) {
//gdbstub_printk("set h/w watchpoint 0 type %ld: %08lx\n", type, addr);
tmp = type==2 ? DCR_DWBE0 : type==3 ? DCR_DRBE0 : DCR_DRBE0|DCR_DWBE0;
__debug_regs->dcr |= tmp;
__debug_regs->dbar[0] = addr;
__debug_regs->dbmr[0][0] = dbmr.mask0;
__debug_regs->dbmr[0][1] = dbmr.mask1;
__debug_regs->dbdr[0][0] = 0;
__debug_regs->dbdr[0][1] = 0;
asm volatile(" movgs %0,dbar0 \n"
" movgs %1,dbmr00 \n"
" movgs %2,dbmr01 \n"
" movgs gr0,dbdr00 \n"
" movgs gr0,dbdr01 \n"
: : "r"(addr), "r"(dbmr.mask0), "r"(dbmr.mask1));
return 0;
}
if (!(__debug_regs->dcr & (DCR_DRBE1|DCR_DWBE1))) {
//gdbstub_printk("set h/w watchpoint 1 type %ld: %08lx\n", type, addr);
tmp = type==2 ? DCR_DWBE1 : type==3 ? DCR_DRBE1 : DCR_DRBE1|DCR_DWBE1;
__debug_regs->dcr |= tmp;
__debug_regs->dbar[1] = addr;
__debug_regs->dbmr[1][0] = dbmr.mask0;
__debug_regs->dbmr[1][1] = dbmr.mask1;
__debug_regs->dbdr[1][0] = 0;
__debug_regs->dbdr[1][1] = 0;
asm volatile(" movgs %0,dbar1 \n"
" movgs %1,dbmr10 \n"
" movgs %2,dbmr11 \n"
" movgs gr0,dbdr10 \n"
" movgs gr0,dbdr11 \n"
: : "r"(addr), "r"(dbmr.mask0), "r"(dbmr.mask1));
return 0;
}
return -ENOSPC;
default:
return -EINVAL;
}
} /* end gdbstub_set_breakpoint() */
/*****************************************************************************/
/*
* clear a breakpoint or watchpoint
*/
int gdbstub_clear_breakpoint(unsigned long type, unsigned long addr, unsigned long len)
{
unsigned long tmp;
int bkpt, loop;
union {
struct {
unsigned long mask0, mask1;
};
uint8_t bytes[8];
} dbmr;
//gdbstub_printk("clearbkpt(%ld,%08lx,%ld)\n", type, addr, len);
switch (type) {
/* clear software breakpoint */
case 0:
for (bkpt = 255; bkpt >= 0; bkpt--)
if (gdbstub_bkpts[bkpt].addr == addr && gdbstub_bkpts[bkpt].len == len)
break;
if (bkpt < 0)
return -ENOENT;
gdbstub_bkpts[bkpt].addr = 0;
for (loop = 0; loop < len/4; loop++)
if (!gdbstub_write_dword(&((uint32_t *) addr)[loop],
gdbstub_bkpts[bkpt].originsns[loop]))
return -EFAULT;
return 0;
/* clear hardware breakpoint */
case 1:
if (addr & 3 || len != 4)
return -EINVAL;
#define __get_ibar(X) ({ unsigned long x; asm volatile("movsg ibar"#X",%0" : "=r"(x)); x; })
if (__debug_regs->dcr & DCR_IBE0 && __get_ibar(0) == addr) {
//gdbstub_printk("clear h/w break 0: %08lx\n", addr);
__debug_regs->dcr &= ~DCR_IBE0;
__debug_regs->ibar[0] = 0;
asm volatile("movgs gr0,ibar0");
return 0;
}
if (__debug_regs->dcr & DCR_IBE1 && __get_ibar(1) == addr) {
//gdbstub_printk("clear h/w break 1: %08lx\n", addr);
__debug_regs->dcr &= ~DCR_IBE1;
__debug_regs->ibar[1] = 0;
asm volatile("movgs gr0,ibar1");
return 0;
}
if (__debug_regs->dcr & DCR_IBE2 && __get_ibar(2) == addr) {
//gdbstub_printk("clear h/w break 2: %08lx\n", addr);
__debug_regs->dcr &= ~DCR_IBE2;
__debug_regs->ibar[2] = 0;
asm volatile("movgs gr0,ibar2");
return 0;
}
if (__debug_regs->dcr & DCR_IBE3 && __get_ibar(3) == addr) {
//gdbstub_printk("clear h/w break 3: %08lx\n", addr);
__debug_regs->dcr &= ~DCR_IBE3;
__debug_regs->ibar[3] = 0;
asm volatile("movgs gr0,ibar3");
return 0;
}
return -EINVAL;
/* clear data read/write/access watchpoint */
case 2:
case 3:
case 4:
if ((addr & ~7) != ((addr + len - 1) & ~7))
return -EINVAL;
tmp = addr & 7;
memset(dbmr.bytes, 0xff, sizeof(dbmr.bytes));
for (loop = 0; loop < len; loop++)
dbmr.bytes[tmp + loop] = 0;
addr &= ~7;
#define __get_dbar(X) ({ unsigned long x; asm volatile("movsg dbar"#X",%0" : "=r"(x)); x; })
#define __get_dbmr0(X) ({ unsigned long x; asm volatile("movsg dbmr"#X"0,%0" : "=r"(x)); x; })
#define __get_dbmr1(X) ({ unsigned long x; asm volatile("movsg dbmr"#X"1,%0" : "=r"(x)); x; })
/* consider DBAR 0 */
tmp = type==2 ? DCR_DWBE0 : type==3 ? DCR_DRBE0 : DCR_DRBE0|DCR_DWBE0;
if ((__debug_regs->dcr & (DCR_DRBE0|DCR_DWBE0)) != tmp ||
__get_dbar(0) != addr ||
__get_dbmr0(0) != dbmr.mask0 ||
__get_dbmr1(0) != dbmr.mask1)
goto skip_dbar0;
//gdbstub_printk("clear h/w watchpoint 0 type %ld: %08lx\n", type, addr);
__debug_regs->dcr &= ~(DCR_DRBE0|DCR_DWBE0);
__debug_regs->dbar[0] = 0;
__debug_regs->dbmr[0][0] = 0;
__debug_regs->dbmr[0][1] = 0;
__debug_regs->dbdr[0][0] = 0;
__debug_regs->dbdr[0][1] = 0;
asm volatile(" movgs gr0,dbar0 \n"
" movgs gr0,dbmr00 \n"
" movgs gr0,dbmr01 \n"
" movgs gr0,dbdr00 \n"
" movgs gr0,dbdr01 \n");
return 0;
skip_dbar0:
/* consider DBAR 0 */
tmp = type==2 ? DCR_DWBE1 : type==3 ? DCR_DRBE1 : DCR_DRBE1|DCR_DWBE1;
if ((__debug_regs->dcr & (DCR_DRBE1|DCR_DWBE1)) != tmp ||
__get_dbar(1) != addr ||
__get_dbmr0(1) != dbmr.mask0 ||
__get_dbmr1(1) != dbmr.mask1)
goto skip_dbar1;
//gdbstub_printk("clear h/w watchpoint 1 type %ld: %08lx\n", type, addr);
__debug_regs->dcr &= ~(DCR_DRBE1|DCR_DWBE1);
__debug_regs->dbar[1] = 0;
__debug_regs->dbmr[1][0] = 0;
__debug_regs->dbmr[1][1] = 0;
__debug_regs->dbdr[1][0] = 0;
__debug_regs->dbdr[1][1] = 0;
asm volatile(" movgs gr0,dbar1 \n"
" movgs gr0,dbmr10 \n"
" movgs gr0,dbmr11 \n"
" movgs gr0,dbdr10 \n"
" movgs gr0,dbdr11 \n");
return 0;
skip_dbar1:
return -ENOSPC;
default:
return -EINVAL;
}
} /* end gdbstub_clear_breakpoint() */
/*****************************************************************************/
/*
* check a for an internal software breakpoint, and wind the PC back if necessary
*/
static void gdbstub_check_breakpoint(void)
{
unsigned long addr = __debug_frame->pc - 4;
int bkpt;
for (bkpt = 255; bkpt >= 0; bkpt--)
if (gdbstub_bkpts[bkpt].addr == addr)
break;
if (bkpt >= 0)
__debug_frame->pc = addr;
//gdbstub_printk("alter pc [%d] %08lx\n", bkpt, __debug_frame->pc);
} /* end gdbstub_check_breakpoint() */
/*****************************************************************************/
/*
*
*/
static void __maybe_unused gdbstub_show_regs(void)
{
unsigned long *reg;
int loop;
gdbstub_printk("\n");
gdbstub_printk("Frame: @%p [%s]\n",
__debug_frame,
__debug_frame->psr & PSR_S ? "kernel" : "user");
reg = (unsigned long *) __debug_frame;
for (loop = 0; loop < NR_PT_REGS; loop++) {
printk("%s %08lx", regnames[loop + 0], reg[loop + 0]);
if (loop == NR_PT_REGS - 1 || loop % 5 == 4)
printk("\n");
else
printk(" | ");
}
gdbstub_printk("Process %s (pid: %d)\n", current->comm, current->pid);
} /* end gdbstub_show_regs() */
/*****************************************************************************/
/*
* dump debugging regs
*/
static void __maybe_unused gdbstub_dump_debugregs(void)
{
gdbstub_printk("DCR %08lx ", __debug_status.dcr);
gdbstub_printk("BRR %08lx\n", __debug_status.brr);
gdbstub_printk("IBAR0 %08lx ", __get_ibar(0));
gdbstub_printk("IBAR1 %08lx ", __get_ibar(1));
gdbstub_printk("IBAR2 %08lx ", __get_ibar(2));
gdbstub_printk("IBAR3 %08lx\n", __get_ibar(3));
gdbstub_printk("DBAR0 %08lx ", __get_dbar(0));
gdbstub_printk("DBMR00 %08lx ", __get_dbmr0(0));
gdbstub_printk("DBMR01 %08lx\n", __get_dbmr1(0));
gdbstub_printk("DBAR1 %08lx ", __get_dbar(1));
gdbstub_printk("DBMR10 %08lx ", __get_dbmr0(1));
gdbstub_printk("DBMR11 %08lx\n", __get_dbmr1(1));
gdbstub_printk("\n");
} /* end gdbstub_dump_debugregs() */
/*****************************************************************************/
/*
* dump the MMU state into a structure so that it can be accessed with GDB
*/
void gdbstub_get_mmu_state(void)
{
asm volatile("movsg hsr0,%0" : "=r"(__debug_mmu.regs.hsr0));
asm volatile("movsg pcsr,%0" : "=r"(__debug_mmu.regs.pcsr));
asm volatile("movsg esr0,%0" : "=r"(__debug_mmu.regs.esr0));
asm volatile("movsg ear0,%0" : "=r"(__debug_mmu.regs.ear0));
asm volatile("movsg epcr0,%0" : "=r"(__debug_mmu.regs.epcr0));
/* read the protection / SAT registers */
__debug_mmu.iamr[0].L = __get_IAMLR(0);
__debug_mmu.iamr[0].P = __get_IAMPR(0);
__debug_mmu.iamr[1].L = __get_IAMLR(1);
__debug_mmu.iamr[1].P = __get_IAMPR(1);
__debug_mmu.iamr[2].L = __get_IAMLR(2);
__debug_mmu.iamr[2].P = __get_IAMPR(2);
__debug_mmu.iamr[3].L = __get_IAMLR(3);
__debug_mmu.iamr[3].P = __get_IAMPR(3);
__debug_mmu.iamr[4].L = __get_IAMLR(4);
__debug_mmu.iamr[4].P = __get_IAMPR(4);
__debug_mmu.iamr[5].L = __get_IAMLR(5);
__debug_mmu.iamr[5].P = __get_IAMPR(5);
__debug_mmu.iamr[6].L = __get_IAMLR(6);
__debug_mmu.iamr[6].P = __get_IAMPR(6);
__debug_mmu.iamr[7].L = __get_IAMLR(7);
__debug_mmu.iamr[7].P = __get_IAMPR(7);
__debug_mmu.iamr[8].L = __get_IAMLR(8);
__debug_mmu.iamr[8].P = __get_IAMPR(8);
__debug_mmu.iamr[9].L = __get_IAMLR(9);
__debug_mmu.iamr[9].P = __get_IAMPR(9);
__debug_mmu.iamr[10].L = __get_IAMLR(10);
__debug_mmu.iamr[10].P = __get_IAMPR(10);
__debug_mmu.iamr[11].L = __get_IAMLR(11);
__debug_mmu.iamr[11].P = __get_IAMPR(11);
__debug_mmu.iamr[12].L = __get_IAMLR(12);
__debug_mmu.iamr[12].P = __get_IAMPR(12);
__debug_mmu.iamr[13].L = __get_IAMLR(13);
__debug_mmu.iamr[13].P = __get_IAMPR(13);
__debug_mmu.iamr[14].L = __get_IAMLR(14);
__debug_mmu.iamr[14].P = __get_IAMPR(14);
__debug_mmu.iamr[15].L = __get_IAMLR(15);
__debug_mmu.iamr[15].P = __get_IAMPR(15);
__debug_mmu.damr[0].L = __get_DAMLR(0);
__debug_mmu.damr[0].P = __get_DAMPR(0);
__debug_mmu.damr[1].L = __get_DAMLR(1);
__debug_mmu.damr[1].P = __get_DAMPR(1);
__debug_mmu.damr[2].L = __get_DAMLR(2);
__debug_mmu.damr[2].P = __get_DAMPR(2);
__debug_mmu.damr[3].L = __get_DAMLR(3);
__debug_mmu.damr[3].P = __get_DAMPR(3);
__debug_mmu.damr[4].L = __get_DAMLR(4);
__debug_mmu.damr[4].P = __get_DAMPR(4);
__debug_mmu.damr[5].L = __get_DAMLR(5);
__debug_mmu.damr[5].P = __get_DAMPR(5);
__debug_mmu.damr[6].L = __get_DAMLR(6);
__debug_mmu.damr[6].P = __get_DAMPR(6);
__debug_mmu.damr[7].L = __get_DAMLR(7);
__debug_mmu.damr[7].P = __get_DAMPR(7);
__debug_mmu.damr[8].L = __get_DAMLR(8);
__debug_mmu.damr[8].P = __get_DAMPR(8);
__debug_mmu.damr[9].L = __get_DAMLR(9);
__debug_mmu.damr[9].P = __get_DAMPR(9);
__debug_mmu.damr[10].L = __get_DAMLR(10);
__debug_mmu.damr[10].P = __get_DAMPR(10);
__debug_mmu.damr[11].L = __get_DAMLR(11);
__debug_mmu.damr[11].P = __get_DAMPR(11);
__debug_mmu.damr[12].L = __get_DAMLR(12);
__debug_mmu.damr[12].P = __get_DAMPR(12);
__debug_mmu.damr[13].L = __get_DAMLR(13);
__debug_mmu.damr[13].P = __get_DAMPR(13);
__debug_mmu.damr[14].L = __get_DAMLR(14);
__debug_mmu.damr[14].P = __get_DAMPR(14);
__debug_mmu.damr[15].L = __get_DAMLR(15);
__debug_mmu.damr[15].P = __get_DAMPR(15);
#ifdef CONFIG_MMU
do {
/* read the DAT entries from the TLB */
struct __debug_amr *p;
int loop;
asm volatile("movsg tplr,%0" : "=r"(__debug_mmu.regs.tplr));
asm volatile("movsg tppr,%0" : "=r"(__debug_mmu.regs.tppr));
asm volatile("movsg tpxr,%0" : "=r"(__debug_mmu.regs.tpxr));
asm volatile("movsg cxnr,%0" : "=r"(__debug_mmu.regs.cxnr));
p = __debug_mmu.tlb;
/* way 0 */
asm volatile("movgs %0,tpxr" :: "r"(0 << TPXR_WAY_SHIFT));
for (loop = 0; loop < 64; loop++) {
asm volatile("tlbpr %0,gr0,#1,#0" :: "r"(loop << PAGE_SHIFT));
asm volatile("movsg tplr,%0" : "=r"(p->L));
asm volatile("movsg tppr,%0" : "=r"(p->P));
p++;
}
/* way 1 */
asm volatile("movgs %0,tpxr" :: "r"(1 << TPXR_WAY_SHIFT));
for (loop = 0; loop < 64; loop++) {
asm volatile("tlbpr %0,gr0,#1,#0" :: "r"(loop << PAGE_SHIFT));
asm volatile("movsg tplr,%0" : "=r"(p->L));
asm volatile("movsg tppr,%0" : "=r"(p->P));
p++;
}
asm volatile("movgs %0,tplr" :: "r"(__debug_mmu.regs.tplr));
asm volatile("movgs %0,tppr" :: "r"(__debug_mmu.regs.tppr));
asm volatile("movgs %0,tpxr" :: "r"(__debug_mmu.regs.tpxr));
} while(0);
#endif
} /* end gdbstub_get_mmu_state() */
/*
* handle general query commands of the form 'qXXXXX'
*/
static void gdbstub_handle_query(void)
{
if (strcmp(input_buffer, "qAttached") == 0) {
/* return current thread ID */
sprintf(output_buffer, "1");
return;
}
if (strcmp(input_buffer, "qC") == 0) {
/* return current thread ID */
sprintf(output_buffer, "QC 0");
return;
}
if (strcmp(input_buffer, "qOffsets") == 0) {
/* return relocation offset of text and data segments */
sprintf(output_buffer, "Text=0;Data=0;Bss=0");
return;
}
if (strcmp(input_buffer, "qSymbol::") == 0) {
sprintf(output_buffer, "OK");
return;
}
if (strcmp(input_buffer, "qSupported") == 0) {
/* query of supported features */
sprintf(output_buffer, "PacketSize=%u;ReverseContinue-;ReverseStep-",
sizeof(input_buffer));
return;
}
gdbstub_strcpy(output_buffer,"E01");
}
/*****************************************************************************/
/*
* handle event interception and GDB remote protocol processing
* - on entry:
* PSR.ET==0, PSR.S==1 and the CPU is in debug mode
* __debug_frame points to the saved registers
* __frame points to the kernel mode exception frame, if it was in kernel
* mode when the break happened
*/
void gdbstub(int sigval)
{
unsigned long addr, length, loop, dbar, temp, temp2, temp3;
uint32_t zero;
char *ptr;
int flush_cache = 0;
LEDS(0x5000);
if (sigval < 0) {
#ifndef CONFIG_GDBSTUB_IMMEDIATE
/* return immediately if GDB immediate activation option not set */
return;
#else
sigval = SIGINT;
#endif
}
save_user_regs(&__debug_frame0->uc);
#if 0
gdbstub_printk("--> gdbstub() %08x %p %08x %08x\n",
__debug_frame->pc,
__debug_frame,
__debug_regs->brr,
__debug_regs->bpsr);
// gdbstub_show_regs();
#endif
LEDS(0x5001);
/* if we were interrupted by input on the serial gdbstub serial port,
* restore the context prior to the interrupt so that we return to that
* directly
*/
temp = (unsigned long) __entry_kerneltrap_table;
temp2 = (unsigned long) __entry_usertrap_table;
temp3 = __debug_frame->pc & ~15;
if (temp3 == temp + TBR_TT_INTERRUPT_15 ||
temp3 == temp2 + TBR_TT_INTERRUPT_15
) {
asm volatile("movsg pcsr,%0" : "=r"(__debug_frame->pc));
__debug_frame->psr |= PSR_ET;
__debug_frame->psr &= ~PSR_S;
if (__debug_frame->psr & PSR_PS)
__debug_frame->psr |= PSR_S;
__debug_status.brr = (__debug_frame->tbr & TBR_TT) << 12;
__debug_status.brr |= BRR_EB;
sigval = SIGINT;
}
/* handle the decrement timer going off (FR451 only) */
if (temp3 == temp + TBR_TT_DECREMENT_TIMER ||
temp3 == temp2 + TBR_TT_DECREMENT_TIMER
) {
asm volatile("movgs %0,timerd" :: "r"(10000000));
asm volatile("movsg pcsr,%0" : "=r"(__debug_frame->pc));
__debug_frame->psr |= PSR_ET;
__debug_frame->psr &= ~PSR_S;
if (__debug_frame->psr & PSR_PS)
__debug_frame->psr |= PSR_S;
__debug_status.brr = (__debug_frame->tbr & TBR_TT) << 12;
__debug_status.brr |= BRR_EB;
sigval = SIGXCPU;
}
LEDS(0x5002);
/* after a BREAK insn, the PC lands on the far side of it */
if (__debug_status.brr & BRR_SB)
gdbstub_check_breakpoint();
LEDS(0x5003);
/* handle attempts to write console data via GDB "O" commands */
if (__debug_frame->pc == (unsigned long) gdbstub_console_write + 4) {
__gdbstub_console_write((struct console *) __debug_frame->gr8,
(const char *) __debug_frame->gr9,
(unsigned) __debug_frame->gr10);
goto done;
}
if (gdbstub_rx_unget) {
sigval = SIGINT;
goto packet_waiting;
}
if (!sigval)
sigval = gdbstub_compute_signal(__debug_status.brr);
LEDS(0x5004);
/* send a message to the debugger's user saying what happened if it may
* not be clear cut (we can't map exceptions onto signals properly)
*/
if (sigval != SIGINT && sigval != SIGTRAP && sigval != SIGILL) {
static const char title[] = "Break ";
static const char crlf[] = "\r\n";
unsigned long brr = __debug_status.brr;
char hx;
ptr = output_buffer;
*ptr++ = 'O';
ptr = mem2hex(title, ptr, sizeof(title) - 1,0);
hx = hex_asc_hi(brr >> 24);
ptr = hex_byte_pack(ptr, hx);
hx = hex_asc_lo(brr >> 24);
ptr = hex_byte_pack(ptr, hx);
hx = hex_asc_hi(brr >> 16);
ptr = hex_byte_pack(ptr, hx);
hx = hex_asc_lo(brr >> 16);
ptr = hex_byte_pack(ptr, hx);
hx = hex_asc_hi(brr >> 8);
ptr = hex_byte_pack(ptr, hx);
hx = hex_asc_lo(brr >> 8);
ptr = hex_byte_pack(ptr, hx);
hx = hex_asc_hi(brr);
ptr = hex_byte_pack(ptr, hx);
hx = hex_asc_lo(brr);
ptr = hex_byte_pack(ptr, hx);
ptr = mem2hex(crlf, ptr, sizeof(crlf) - 1, 0);
*ptr = 0;
gdbstub_send_packet(output_buffer); /* send it off... */
}
LEDS(0x5005);
/* tell the debugger that an exception has occurred */
ptr = output_buffer;
/* Send trap type (converted to signal) */
*ptr++ = 'T';
ptr = hex_byte_pack(ptr, sigval);
/* Send Error PC */
ptr = hex_byte_pack(ptr, GDB_REG_PC);
*ptr++ = ':';
ptr = mem2hex(&__debug_frame->pc, ptr, 4, 0);
*ptr++ = ';';
/*
* Send frame pointer
*/
ptr = hex_byte_pack(ptr, GDB_REG_FP);
*ptr++ = ':';
ptr = mem2hex(&__debug_frame->fp, ptr, 4, 0);
*ptr++ = ';';
/*
* Send stack pointer
*/
ptr = hex_byte_pack(ptr, GDB_REG_SP);
*ptr++ = ':';
ptr = mem2hex(&__debug_frame->sp, ptr, 4, 0);
*ptr++ = ';';
*ptr++ = 0;
gdbstub_send_packet(output_buffer); /* send it off... */
LEDS(0x5006);
packet_waiting:
gdbstub_get_mmu_state();
/* wait for input from remote GDB */
while (1) {
output_buffer[0] = 0;
LEDS(0x5007);
gdbstub_recv_packet(input_buffer);
LEDS(0x5600 | input_buffer[0]);
switch (input_buffer[0]) {
/* request repeat of last signal number */
case '?':
output_buffer[0] = 'S';
output_buffer[1] = hex_asc_hi(sigval);
output_buffer[2] = hex_asc_lo(sigval);
output_buffer[3] = 0;
break;
case 'd':
/* toggle debug flag */
break;
/* return the value of the CPU registers
* - GR0, GR1, GR2, GR3, GR4, GR5, GR6, GR7,
* - GR8, GR9, GR10, GR11, GR12, GR13, GR14, GR15,
* - GR16, GR17, GR18, GR19, GR20, GR21, GR22, GR23,
* - GR24, GR25, GR26, GR27, GR28, GR29, GR30, GR31,
* - GR32, GR33, GR34, GR35, GR36, GR37, GR38, GR39,
* - GR40, GR41, GR42, GR43, GR44, GR45, GR46, GR47,
* - GR48, GR49, GR50, GR51, GR52, GR53, GR54, GR55,
* - GR56, GR57, GR58, GR59, GR60, GR61, GR62, GR63,
* - FP0, FP1, FP2, FP3, FP4, FP5, FP6, FP7,
* - FP8, FP9, FP10, FP11, FP12, FP13, FP14, FP15,
* - FP16, FP17, FP18, FP19, FP20, FP21, FP22, FP23,
* - FP24, FP25, FP26, FP27, FP28, FP29, FP30, FP31,
* - FP32, FP33, FP34, FP35, FP36, FP37, FP38, FP39,
* - FP40, FP41, FP42, FP43, FP44, FP45, FP46, FP47,
* - FP48, FP49, FP50, FP51, FP52, FP53, FP54, FP55,
* - FP56, FP57, FP58, FP59, FP60, FP61, FP62, FP63,
* - PC, PSR, CCR, CCCR,
* - _X132, _X133, _X134
* - TBR, BRR, DBAR0, DBAR1, DBAR2, DBAR3,
* - _X141, _X142, _X143, _X144,
* - LR, LCR
*/
case 'g':
zero = 0;
ptr = output_buffer;
/* deal with GR0, GR1-GR27, GR28-GR31, GR32-GR63 */
ptr = mem2hex(&zero, ptr, 4, 0);
for (loop = 1; loop <= 27; loop++)
ptr = mem2hex(&__debug_user_context->i.gr[loop], ptr, 4, 0);
temp = (unsigned long) __frame;
ptr = mem2hex(&temp, ptr, 4, 0);
ptr = mem2hex(&__debug_user_context->i.gr[29], ptr, 4, 0);
ptr = mem2hex(&__debug_user_context->i.gr[30], ptr, 4, 0);
#ifdef CONFIG_MMU
ptr = mem2hex(&__debug_user_context->i.gr[31], ptr, 4, 0);
#else
temp = (unsigned long) __debug_frame;
ptr = mem2hex(&temp, ptr, 4, 0);
#endif
for (loop = 32; loop <= 63; loop++)
ptr = mem2hex(&__debug_user_context->i.gr[loop], ptr, 4, 0);
/* deal with FR0-FR63 */
for (loop = 0; loop <= 63; loop++)
ptr = mem2hex(&__debug_user_context->f.fr[loop], ptr, 4, 0);
/* deal with special registers */
ptr = mem2hex(&__debug_frame->pc, ptr, 4, 0);
ptr = mem2hex(&__debug_frame->psr, ptr, 4, 0);
ptr = mem2hex(&__debug_frame->ccr, ptr, 4, 0);
ptr = mem2hex(&__debug_frame->cccr, ptr, 4, 0);
ptr = mem2hex(&zero, ptr, 4, 0);
ptr = mem2hex(&zero, ptr, 4, 0);
ptr = mem2hex(&zero, ptr, 4, 0);
ptr = mem2hex(&__debug_frame->tbr, ptr, 4, 0);
ptr = mem2hex(&__debug_status.brr , ptr, 4, 0);
asm volatile("movsg dbar0,%0" : "=r"(dbar));
ptr = mem2hex(&dbar, ptr, 4, 0);
asm volatile("movsg dbar1,%0" : "=r"(dbar));
ptr = mem2hex(&dbar, ptr, 4, 0);
asm volatile("movsg dbar2,%0" : "=r"(dbar));
ptr = mem2hex(&dbar, ptr, 4, 0);
asm volatile("movsg dbar3,%0" : "=r"(dbar));
ptr = mem2hex(&dbar, ptr, 4, 0);
asm volatile("movsg scr0,%0" : "=r"(dbar));
ptr = mem2hex(&dbar, ptr, 4, 0);
asm volatile("movsg scr1,%0" : "=r"(dbar));
ptr = mem2hex(&dbar, ptr, 4, 0);
asm volatile("movsg scr2,%0" : "=r"(dbar));
ptr = mem2hex(&dbar, ptr, 4, 0);
asm volatile("movsg scr3,%0" : "=r"(dbar));
ptr = mem2hex(&dbar, ptr, 4, 0);
ptr = mem2hex(&__debug_frame->lr, ptr, 4, 0);
ptr = mem2hex(&__debug_frame->lcr, ptr, 4, 0);
ptr = mem2hex(&__debug_frame->iacc0, ptr, 8, 0);
ptr = mem2hex(&__debug_user_context->f.fsr[0], ptr, 4, 0);
for (loop = 0; loop <= 7; loop++)
ptr = mem2hex(&__debug_user_context->f.acc[loop], ptr, 4, 0);
ptr = mem2hex(&__debug_user_context->f.accg, ptr, 8, 0);
for (loop = 0; loop <= 1; loop++)
ptr = mem2hex(&__debug_user_context->f.msr[loop], ptr, 4, 0);
ptr = mem2hex(&__debug_frame->gner0, ptr, 4, 0);
ptr = mem2hex(&__debug_frame->gner1, ptr, 4, 0);
ptr = mem2hex(&__debug_user_context->f.fner[0], ptr, 4, 0);
ptr = mem2hex(&__debug_user_context->f.fner[1], ptr, 4, 0);
break;
/* set the values of the CPU registers */
case 'G':
ptr = &input_buffer[1];
/* deal with GR0, GR1-GR27, GR28-GR31, GR32-GR63 */
ptr = hex2mem(ptr, &temp, 4);
for (loop = 1; loop <= 27; loop++)
ptr = hex2mem(ptr, &__debug_user_context->i.gr[loop], 4);
ptr = hex2mem(ptr, &temp, 4);
__frame = (struct pt_regs *) temp;
ptr = hex2mem(ptr, &__debug_frame->gr29, 4);
ptr = hex2mem(ptr, &__debug_frame->gr30, 4);
#ifdef CONFIG_MMU
ptr = hex2mem(ptr, &__debug_frame->gr31, 4);
#else
ptr = hex2mem(ptr, &temp, 4);
#endif
for (loop = 32; loop <= 63; loop++)
ptr = hex2mem(ptr, &__debug_user_context->i.gr[loop], 4);
/* deal with FR0-FR63 */
for (loop = 0; loop <= 63; loop++)
ptr = mem2hex(&__debug_user_context->f.fr[loop], ptr, 4, 0);
/* deal with special registers */
ptr = hex2mem(ptr, &__debug_frame->pc, 4);
ptr = hex2mem(ptr, &__debug_frame->psr, 4);
ptr = hex2mem(ptr, &__debug_frame->ccr, 4);
ptr = hex2mem(ptr, &__debug_frame->cccr,4);
for (loop = 132; loop <= 140; loop++)
ptr = hex2mem(ptr, &temp, 4);
ptr = hex2mem(ptr, &temp, 4);
asm volatile("movgs %0,scr0" :: "r"(temp));
ptr = hex2mem(ptr, &temp, 4);
asm volatile("movgs %0,scr1" :: "r"(temp));
ptr = hex2mem(ptr, &temp, 4);
asm volatile("movgs %0,scr2" :: "r"(temp));
ptr = hex2mem(ptr, &temp, 4);
asm volatile("movgs %0,scr3" :: "r"(temp));
ptr = hex2mem(ptr, &__debug_frame->lr, 4);
ptr = hex2mem(ptr, &__debug_frame->lcr, 4);
ptr = hex2mem(ptr, &__debug_frame->iacc0, 8);
ptr = hex2mem(ptr, &__debug_user_context->f.fsr[0], 4);
for (loop = 0; loop <= 7; loop++)
ptr = hex2mem(ptr, &__debug_user_context->f.acc[loop], 4);
ptr = hex2mem(ptr, &__debug_user_context->f.accg, 8);
for (loop = 0; loop <= 1; loop++)
ptr = hex2mem(ptr, &__debug_user_context->f.msr[loop], 4);
ptr = hex2mem(ptr, &__debug_frame->gner0, 4);
ptr = hex2mem(ptr, &__debug_frame->gner1, 4);
ptr = hex2mem(ptr, &__debug_user_context->f.fner[0], 4);
ptr = hex2mem(ptr, &__debug_user_context->f.fner[1], 4);
gdbstub_strcpy(output_buffer,"OK");
break;
/* mAA..AA,LLLL Read LLLL bytes at address AA..AA */
case 'm':
ptr = &input_buffer[1];
if (hexToInt(&ptr, &addr) &&
*ptr++ == ',' &&
hexToInt(&ptr, &length)
) {
if (mem2hex((char *)addr, output_buffer, length, 1))
break;
gdbstub_strcpy (output_buffer, "E03");
}
else {
gdbstub_strcpy(output_buffer,"E01");
}
break;
/* MAA..AA,LLLL: Write LLLL bytes at address AA.AA return OK */
case 'M':
ptr = &input_buffer[1];
if (hexToInt(&ptr, &addr) &&
*ptr++ == ',' &&
hexToInt(&ptr, &length) &&
*ptr++ == ':'
) {
if (hex2mem(ptr, (char *)addr, length)) {
gdbstub_strcpy(output_buffer, "OK");
}
else {
gdbstub_strcpy(output_buffer, "E03");
}
}
else
gdbstub_strcpy(output_buffer, "E02");
flush_cache = 1;
break;
/* pNN: Read value of reg N and return it */
case 'p':
/* return no value, indicating that we don't support
* this command and that gdb should use 'g' instead */
break;
/* PNN,=RRRRRRRR: Write value R to reg N return OK */
case 'P':
ptr = &input_buffer[1];
if (!hexToInt(&ptr, &addr) ||
*ptr++ != '=' ||
!hexToInt(&ptr, &temp)
) {
gdbstub_strcpy(output_buffer, "E01");
break;
}
temp2 = 1;
switch (addr) {
case GDB_REG_GR(0):
break;
case GDB_REG_GR(1) ... GDB_REG_GR(63):
__debug_user_context->i.gr[addr - GDB_REG_GR(0)] = temp;
break;
case GDB_REG_FR(0) ... GDB_REG_FR(63):
__debug_user_context->f.fr[addr - GDB_REG_FR(0)] = temp;
break;
case GDB_REG_PC:
__debug_user_context->i.pc = temp;
break;
case GDB_REG_PSR:
__debug_user_context->i.psr = temp;
break;
case GDB_REG_CCR:
__debug_user_context->i.ccr = temp;
break;
case GDB_REG_CCCR:
__debug_user_context->i.cccr = temp;
break;
case GDB_REG_BRR:
__debug_status.brr = temp;
break;
case GDB_REG_LR:
__debug_user_context->i.lr = temp;
break;
case GDB_REG_LCR:
__debug_user_context->i.lcr = temp;
break;
case GDB_REG_FSR0:
__debug_user_context->f.fsr[0] = temp;
break;
case GDB_REG_ACC(0) ... GDB_REG_ACC(7):
__debug_user_context->f.acc[addr - GDB_REG_ACC(0)] = temp;
break;
case GDB_REG_ACCG(0):
*(uint32_t *) &__debug_user_context->f.accg[0] = temp;
break;
case GDB_REG_ACCG(4):
*(uint32_t *) &__debug_user_context->f.accg[4] = temp;
break;
case GDB_REG_MSR(0) ... GDB_REG_MSR(1):
__debug_user_context->f.msr[addr - GDB_REG_MSR(0)] = temp;
break;
case GDB_REG_GNER(0) ... GDB_REG_GNER(1):
__debug_user_context->i.gner[addr - GDB_REG_GNER(0)] = temp;
break;
case GDB_REG_FNER(0) ... GDB_REG_FNER(1):
__debug_user_context->f.fner[addr - GDB_REG_FNER(0)] = temp;
break;
default:
temp2 = 0;
break;
}
if (temp2) {
gdbstub_strcpy(output_buffer, "OK");
}
else {
gdbstub_strcpy(output_buffer, "E02");
}
break;
/* cAA..AA Continue at address AA..AA(optional) */
case 'c':
/* try to read optional parameter, pc unchanged if no parm */
ptr = &input_buffer[1];
if (hexToInt(&ptr, &addr))
__debug_frame->pc = addr;
goto done;
/* kill the program */
case 'k' :
goto done; /* just continue */
/* detach */
case 'D':
gdbstub_strcpy(output_buffer, "OK");
break;
/* reset the whole machine (FIXME: system dependent) */
case 'r':
break;
/* step to next instruction */
case 's':
__debug_regs->dcr |= DCR_SE;
__debug_status.dcr |= DCR_SE;
goto done;
/* extended command */
case 'v':
if (strcmp(input_buffer, "vCont?") == 0) {
output_buffer[0] = 0;
break;
}
goto unsupported_cmd;
/* set baud rate (bBB) */
case 'b':
ptr = &input_buffer[1];
if (!hexToInt(&ptr, &temp)) {
gdbstub_strcpy(output_buffer,"B01");
break;
}
if (temp) {
/* ack before changing speed */
gdbstub_send_packet("OK");
gdbstub_set_baud(temp);
}
break;
/* set breakpoint */
case 'Z':
ptr = &input_buffer[1];
if (!hexToInt(&ptr,&temp) || *ptr++ != ',' ||
!hexToInt(&ptr,&addr) || *ptr++ != ',' ||
!hexToInt(&ptr,&length)
) {
gdbstub_strcpy(output_buffer,"E01");
break;
}
if (temp >= 5) {
gdbstub_strcpy(output_buffer,"E03");
break;
}
if (gdbstub_set_breakpoint(temp, addr, length) < 0) {
gdbstub_strcpy(output_buffer,"E03");
break;
}
if (temp == 0)
flush_cache = 1; /* soft bkpt by modified memory */
gdbstub_strcpy(output_buffer,"OK");
break;
/* clear breakpoint */
case 'z':
ptr = &input_buffer[1];
if (!hexToInt(&ptr,&temp) || *ptr++ != ',' ||
!hexToInt(&ptr,&addr) || *ptr++ != ',' ||
!hexToInt(&ptr,&length)
) {
gdbstub_strcpy(output_buffer,"E01");
break;
}
if (temp >= 5) {
gdbstub_strcpy(output_buffer,"E03");
break;
}
if (gdbstub_clear_breakpoint(temp, addr, length) < 0) {
gdbstub_strcpy(output_buffer,"E03");
break;
}
if (temp == 0)
flush_cache = 1; /* soft bkpt by modified memory */
gdbstub_strcpy(output_buffer,"OK");
break;
/* Thread-setting packet */
case 'H':
gdbstub_strcpy(output_buffer, "OK");
break;
case 'q':
gdbstub_handle_query();
break;
default:
unsupported_cmd:
gdbstub_proto("### GDB Unsupported Cmd '%s'\n",input_buffer);
gdbstub_strcpy(output_buffer,"E01");
break;
}
/* reply to the request */
LEDS(0x5009);
gdbstub_send_packet(output_buffer);
}
done:
restore_user_regs(&__debug_frame0->uc);
//gdbstub_dump_debugregs();
//gdbstub_printk("<-- gdbstub() %08x\n", __debug_frame->pc);
/* need to flush the instruction cache before resuming, as we may have
* deposited a breakpoint, and the icache probably has no way of
* knowing that a data ref to some location may have changed something
* that is in the instruction cache. NB: We flush both caches, just to
* be sure...
*/
/* note: flushing the icache will clobber EAR0 on the FR451 */
if (flush_cache)
gdbstub_purge_cache();
LEDS(0x5666);
} /* end gdbstub() */
/*****************************************************************************/
/*
* initialise the GDB stub
*/
void __init gdbstub_init(void)
{
#ifdef CONFIG_GDBSTUB_IMMEDIATE
unsigned char ch;
int ret;
#endif
gdbstub_printk("%s", gdbstub_banner);
gdbstub_io_init();
/* try to talk to GDB (or anyone insane enough to want to type GDB protocol by hand) */
gdbstub_proto("### GDB Tx ACK\n");
gdbstub_tx_char('+'); /* 'hello world' */
#ifdef CONFIG_GDBSTUB_IMMEDIATE
gdbstub_printk("GDB Stub waiting for packet\n");
/*
* In case GDB is started before us, ack any packets
* (presumably "$?#xx") sitting there.
*/
do { gdbstub_rx_char(&ch, 0); } while (ch != '$');
do { gdbstub_rx_char(&ch, 0); } while (ch != '#');
do { ret = gdbstub_rx_char(&ch, 0); } while (ret != 0); /* eat first csum byte */
do { ret = gdbstub_rx_char(&ch, 0); } while (ret != 0); /* eat second csum byte */
gdbstub_proto("### GDB Tx NAK\n");
gdbstub_tx_char('-'); /* nak it */
#else
gdbstub_printk("GDB Stub set\n");
#endif
#if 0
/* send banner */
ptr = output_buffer;
*ptr++ = 'O';
ptr = mem2hex(gdbstub_banner, ptr, sizeof(gdbstub_banner) - 1, 0);
gdbstub_send_packet(output_buffer);
#endif
#if defined(CONFIG_GDB_CONSOLE) && defined(CONFIG_GDBSTUB_IMMEDIATE)
register_console(&gdbstub_console);
#endif
} /* end gdbstub_init() */
/*****************************************************************************/
/*
* register the console at a more appropriate time
*/
#if defined (CONFIG_GDB_CONSOLE) && !defined(CONFIG_GDBSTUB_IMMEDIATE)
static int __init gdbstub_postinit(void)
{
printk("registering console\n");
register_console(&gdbstub_console);
return 0;
} /* end gdbstub_postinit() */
__initcall(gdbstub_postinit);
#endif
/*****************************************************************************/
/*
* send an exit message to GDB
*/
void gdbstub_exit(int status)
{
unsigned char checksum;
int count;
unsigned char ch;
sprintf(output_buffer,"W%02x",status&0xff);
gdbstub_tx_char('$');
checksum = 0;
count = 0;
while ((ch = output_buffer[count]) != 0) {
gdbstub_tx_char(ch);
checksum += ch;
count += 1;
}
gdbstub_tx_char('#');
gdbstub_tx_char(hex_asc_hi(checksum));
gdbstub_tx_char(hex_asc_lo(checksum));
/* make sure the output is flushed, or else RedBoot might clobber it */
gdbstub_tx_char('-');
gdbstub_tx_flush();
} /* end gdbstub_exit() */
/*****************************************************************************/
/*
* GDB wants to call malloc() and free() to allocate memory for calling kernel
* functions directly from its command line
*/
static void *malloc(size_t size) __maybe_unused;
static void *malloc(size_t size)
{
return kmalloc(size, GFP_ATOMIC);
}
static void free(void *p) __maybe_unused;
static void free(void *p)
{
kfree(p);
}
static uint32_t ___get_HSR0(void) __maybe_unused;
static uint32_t ___get_HSR0(void)
{
return __get_HSR(0);
}
static uint32_t ___set_HSR0(uint32_t x) __maybe_unused;
static uint32_t ___set_HSR0(uint32_t x)
{
__set_HSR(0, x);
return __get_HSR(0);
}
| gpl-2.0 |
kannu1994/samsung_kernel_aries | fs/cachefiles/key.c | 9858 | 3754 | /* Key to pathname encoder
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/slab.h>
#include "internal.h"
static const char cachefiles_charmap[64] =
"0123456789" /* 0 - 9 */
"abcdefghijklmnopqrstuvwxyz" /* 10 - 35 */
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" /* 36 - 61 */
"_-" /* 62 - 63 */
;
static const char cachefiles_filecharmap[256] = {
/* we skip space and tab and control chars */
[33 ... 46] = 1, /* '!' -> '.' */
/* we skip '/' as it's significant to pathwalk */
[48 ... 127] = 1, /* '0' -> '~' */
};
/*
* turn the raw key into something cooked
* - the raw key should include the length in the two bytes at the front
* - the key may be up to 514 bytes in length (including the length word)
* - "base64" encode the strange keys, mapping 3 bytes of raw to four of
* cooked
* - need to cut the cooked key into 252 char lengths (189 raw bytes)
*/
char *cachefiles_cook_key(const u8 *raw, int keylen, uint8_t type)
{
unsigned char csum, ch;
unsigned int acc;
char *key;
int loop, len, max, seg, mark, print;
_enter(",%d", keylen);
BUG_ON(keylen < 2 || keylen > 514);
csum = raw[0] + raw[1];
print = 1;
for (loop = 2; loop < keylen; loop++) {
ch = raw[loop];
csum += ch;
print &= cachefiles_filecharmap[ch];
}
if (print) {
/* if the path is usable ASCII, then we render it directly */
max = keylen - 2;
max += 2; /* two base64'd length chars on the front */
max += 5; /* @checksum/M */
max += 3 * 2; /* maximum number of segment dividers (".../M")
* is ((514 + 251) / 252) = 3
*/
max += 1; /* NUL on end */
} else {
/* calculate the maximum length of the cooked key */
keylen = (keylen + 2) / 3;
max = keylen * 4;
max += 5; /* @checksum/M */
max += 3 * 2; /* maximum number of segment dividers (".../M")
* is ((514 + 188) / 189) = 3
*/
max += 1; /* NUL on end */
}
max += 1; /* 2nd NUL on end */
_debug("max: %d", max);
key = kmalloc(max, GFP_KERNEL);
if (!key)
return NULL;
len = 0;
/* build the cooked key */
sprintf(key, "@%02x%c+", (unsigned) csum, 0);
len = 5;
mark = len - 1;
if (print) {
acc = *(uint16_t *) raw;
raw += 2;
key[len + 1] = cachefiles_charmap[acc & 63];
acc >>= 6;
key[len] = cachefiles_charmap[acc & 63];
len += 2;
seg = 250;
for (loop = keylen; loop > 0; loop--) {
if (seg <= 0) {
key[len++] = '\0';
mark = len;
key[len++] = '+';
seg = 252;
}
key[len++] = *raw++;
ASSERT(len < max);
}
switch (type) {
case FSCACHE_COOKIE_TYPE_INDEX: type = 'I'; break;
case FSCACHE_COOKIE_TYPE_DATAFILE: type = 'D'; break;
default: type = 'S'; break;
}
} else {
seg = 252;
for (loop = keylen; loop > 0; loop--) {
if (seg <= 0) {
key[len++] = '\0';
mark = len;
key[len++] = '+';
seg = 252;
}
acc = *raw++;
acc |= *raw++ << 8;
acc |= *raw++ << 16;
_debug("acc: %06x", acc);
key[len++] = cachefiles_charmap[acc & 63];
acc >>= 6;
key[len++] = cachefiles_charmap[acc & 63];
acc >>= 6;
key[len++] = cachefiles_charmap[acc & 63];
acc >>= 6;
key[len++] = cachefiles_charmap[acc & 63];
ASSERT(len < max);
}
switch (type) {
case FSCACHE_COOKIE_TYPE_INDEX: type = 'J'; break;
case FSCACHE_COOKIE_TYPE_DATAFILE: type = 'E'; break;
default: type = 'T'; break;
}
}
key[mark] = type;
key[len++] = 0;
key[len] = 0;
_leave(" = %p %d", key, len);
return key;
}
| gpl-2.0 |
vathpela/linux-esrt | net/tipc/bearer.c | 131 | 25336 | /*
* net/tipc/bearer.c: TIPC bearer code
*
* Copyright (c) 1996-2006, 2013-2014, Ericsson AB
* Copyright (c) 2004-2006, 2010-2013, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <net/sock.h>
#include "core.h"
#include "bearer.h"
#include "link.h"
#include "discover.h"
#include "bcast.h"
#define MAX_ADDR_STR 60
static struct tipc_media * const media_info_array[] = {
ð_media_info,
#ifdef CONFIG_TIPC_MEDIA_IB
&ib_media_info,
#endif
NULL
};
static const struct nla_policy
tipc_nl_bearer_policy[TIPC_NLA_BEARER_MAX + 1] = {
[TIPC_NLA_BEARER_UNSPEC] = { .type = NLA_UNSPEC },
[TIPC_NLA_BEARER_NAME] = {
.type = NLA_STRING,
.len = TIPC_MAX_BEARER_NAME
},
[TIPC_NLA_BEARER_PROP] = { .type = NLA_NESTED },
[TIPC_NLA_BEARER_DOMAIN] = { .type = NLA_U32 }
};
static const struct nla_policy tipc_nl_media_policy[TIPC_NLA_MEDIA_MAX + 1] = {
[TIPC_NLA_MEDIA_UNSPEC] = { .type = NLA_UNSPEC },
[TIPC_NLA_MEDIA_NAME] = { .type = NLA_STRING },
[TIPC_NLA_MEDIA_PROP] = { .type = NLA_NESTED }
};
static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr,
bool shutting_down);
/**
* tipc_media_find - locates specified media object by name
*/
struct tipc_media *tipc_media_find(const char *name)
{
u32 i;
for (i = 0; media_info_array[i] != NULL; i++) {
if (!strcmp(media_info_array[i]->name, name))
break;
}
return media_info_array[i];
}
/**
* media_find_id - locates specified media object by type identifier
*/
static struct tipc_media *media_find_id(u8 type)
{
u32 i;
for (i = 0; media_info_array[i] != NULL; i++) {
if (media_info_array[i]->type_id == type)
break;
}
return media_info_array[i];
}
/**
* tipc_media_addr_printf - record media address in print buffer
*/
void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a)
{
char addr_str[MAX_ADDR_STR];
struct tipc_media *m_ptr;
int ret;
m_ptr = media_find_id(a->media_id);
if (m_ptr && !m_ptr->addr2str(a, addr_str, sizeof(addr_str)))
ret = scnprintf(buf, len, "%s(%s)", m_ptr->name, addr_str);
else {
u32 i;
ret = scnprintf(buf, len, "UNKNOWN(%u)", a->media_id);
for (i = 0; i < sizeof(a->value); i++)
ret += scnprintf(buf - ret, len + ret,
"-%02x", a->value[i]);
}
}
/**
* bearer_name_validate - validate & (optionally) deconstruct bearer name
* @name: ptr to bearer name string
* @name_parts: ptr to area for bearer name components (or NULL if not needed)
*
* Returns 1 if bearer name is valid, otherwise 0.
*/
static int bearer_name_validate(const char *name,
struct tipc_bearer_names *name_parts)
{
char name_copy[TIPC_MAX_BEARER_NAME];
char *media_name;
char *if_name;
u32 media_len;
u32 if_len;
/* copy bearer name & ensure length is OK */
name_copy[TIPC_MAX_BEARER_NAME - 1] = 0;
/* need above in case non-Posix strncpy() doesn't pad with nulls */
strncpy(name_copy, name, TIPC_MAX_BEARER_NAME);
if (name_copy[TIPC_MAX_BEARER_NAME - 1] != 0)
return 0;
/* ensure all component parts of bearer name are present */
media_name = name_copy;
if_name = strchr(media_name, ':');
if (if_name == NULL)
return 0;
*(if_name++) = 0;
media_len = if_name - media_name;
if_len = strlen(if_name) + 1;
/* validate component parts of bearer name */
if ((media_len <= 1) || (media_len > TIPC_MAX_MEDIA_NAME) ||
(if_len <= 1) || (if_len > TIPC_MAX_IF_NAME))
return 0;
/* return bearer name components, if necessary */
if (name_parts) {
strcpy(name_parts->media_name, media_name);
strcpy(name_parts->if_name, if_name);
}
return 1;
}
/**
* tipc_bearer_find - locates bearer object with matching bearer name
*/
struct tipc_bearer *tipc_bearer_find(struct net *net, const char *name)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_bearer *b_ptr;
u32 i;
for (i = 0; i < MAX_BEARERS; i++) {
b_ptr = rtnl_dereference(tn->bearer_list[i]);
if (b_ptr && (!strcmp(b_ptr->name, name)))
return b_ptr;
}
return NULL;
}
void tipc_bearer_add_dest(struct net *net, u32 bearer_id, u32 dest)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_bearer *b_ptr;
rcu_read_lock();
b_ptr = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
if (b_ptr) {
tipc_bcbearer_sort(net, &b_ptr->nodes, dest, true);
tipc_disc_add_dest(b_ptr->link_req);
}
rcu_read_unlock();
}
void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_bearer *b_ptr;
rcu_read_lock();
b_ptr = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
if (b_ptr) {
tipc_bcbearer_sort(net, &b_ptr->nodes, dest, false);
tipc_disc_remove_dest(b_ptr->link_req);
}
rcu_read_unlock();
}
/**
* tipc_enable_bearer - enable bearer with the given name
*/
static int tipc_enable_bearer(struct net *net, const char *name,
u32 disc_domain, u32 priority)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_bearer *b_ptr;
struct tipc_media *m_ptr;
struct tipc_bearer_names b_names;
char addr_string[16];
u32 bearer_id;
u32 with_this_prio;
u32 i;
int res = -EINVAL;
if (!tn->own_addr) {
pr_warn("Bearer <%s> rejected, not supported in standalone mode\n",
name);
return -ENOPROTOOPT;
}
if (!bearer_name_validate(name, &b_names)) {
pr_warn("Bearer <%s> rejected, illegal name\n", name);
return -EINVAL;
}
if (tipc_addr_domain_valid(disc_domain) &&
(disc_domain != tn->own_addr)) {
if (tipc_in_scope(disc_domain, tn->own_addr)) {
disc_domain = tn->own_addr & TIPC_CLUSTER_MASK;
res = 0; /* accept any node in own cluster */
} else if (in_own_cluster_exact(net, disc_domain))
res = 0; /* accept specified node in own cluster */
}
if (res) {
pr_warn("Bearer <%s> rejected, illegal discovery domain\n",
name);
return -EINVAL;
}
if ((priority > TIPC_MAX_LINK_PRI) &&
(priority != TIPC_MEDIA_LINK_PRI)) {
pr_warn("Bearer <%s> rejected, illegal priority\n", name);
return -EINVAL;
}
m_ptr = tipc_media_find(b_names.media_name);
if (!m_ptr) {
pr_warn("Bearer <%s> rejected, media <%s> not registered\n",
name, b_names.media_name);
return -EINVAL;
}
if (priority == TIPC_MEDIA_LINK_PRI)
priority = m_ptr->priority;
restart:
bearer_id = MAX_BEARERS;
with_this_prio = 1;
for (i = MAX_BEARERS; i-- != 0; ) {
b_ptr = rtnl_dereference(tn->bearer_list[i]);
if (!b_ptr) {
bearer_id = i;
continue;
}
if (!strcmp(name, b_ptr->name)) {
pr_warn("Bearer <%s> rejected, already enabled\n",
name);
return -EINVAL;
}
if ((b_ptr->priority == priority) &&
(++with_this_prio > 2)) {
if (priority-- == 0) {
pr_warn("Bearer <%s> rejected, duplicate priority\n",
name);
return -EINVAL;
}
pr_warn("Bearer <%s> priority adjustment required %u->%u\n",
name, priority + 1, priority);
goto restart;
}
}
if (bearer_id >= MAX_BEARERS) {
pr_warn("Bearer <%s> rejected, bearer limit reached (%u)\n",
name, MAX_BEARERS);
return -EINVAL;
}
b_ptr = kzalloc(sizeof(*b_ptr), GFP_ATOMIC);
if (!b_ptr)
return -ENOMEM;
strcpy(b_ptr->name, name);
b_ptr->media = m_ptr;
res = m_ptr->enable_media(net, b_ptr);
if (res) {
pr_warn("Bearer <%s> rejected, enable failure (%d)\n",
name, -res);
return -EINVAL;
}
b_ptr->identity = bearer_id;
b_ptr->tolerance = m_ptr->tolerance;
b_ptr->window = m_ptr->window;
b_ptr->domain = disc_domain;
b_ptr->net_plane = bearer_id + 'A';
b_ptr->priority = priority;
res = tipc_disc_create(net, b_ptr, &b_ptr->bcast_addr);
if (res) {
bearer_disable(net, b_ptr, false);
pr_warn("Bearer <%s> rejected, discovery object creation failed\n",
name);
return -EINVAL;
}
rcu_assign_pointer(tn->bearer_list[bearer_id], b_ptr);
pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
name,
tipc_addr_string_fill(addr_string, disc_domain), priority);
return res;
}
/**
* tipc_reset_bearer - Reset all links established over this bearer
*/
static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b_ptr)
{
pr_info("Resetting bearer <%s>\n", b_ptr->name);
tipc_link_reset_list(net, b_ptr->identity);
tipc_disc_reset(net, b_ptr);
return 0;
}
/**
* bearer_disable
*
* Note: This routine assumes caller holds RTNL lock.
*/
static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr,
bool shutting_down)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
u32 i;
pr_info("Disabling bearer <%s>\n", b_ptr->name);
b_ptr->media->disable_media(b_ptr);
tipc_link_delete_list(net, b_ptr->identity, shutting_down);
if (b_ptr->link_req)
tipc_disc_delete(b_ptr->link_req);
for (i = 0; i < MAX_BEARERS; i++) {
if (b_ptr == rtnl_dereference(tn->bearer_list[i])) {
RCU_INIT_POINTER(tn->bearer_list[i], NULL);
break;
}
}
kfree_rcu(b_ptr, rcu);
}
int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b)
{
struct net_device *dev;
char *driver_name = strchr((const char *)b->name, ':') + 1;
/* Find device with specified name */
dev = dev_get_by_name(net, driver_name);
if (!dev)
return -ENODEV;
/* Associate TIPC bearer with L2 bearer */
rcu_assign_pointer(b->media_ptr, dev);
memset(&b->bcast_addr, 0, sizeof(b->bcast_addr));
memcpy(b->bcast_addr.value, dev->broadcast, b->media->hwaddr_len);
b->bcast_addr.media_id = b->media->type_id;
b->bcast_addr.broadcast = 1;
b->mtu = dev->mtu;
b->media->raw2addr(b, &b->addr, (char *)dev->dev_addr);
rcu_assign_pointer(dev->tipc_ptr, b);
return 0;
}
/* tipc_disable_l2_media - detach TIPC bearer from an L2 interface
*
* Mark L2 bearer as inactive so that incoming buffers are thrown away,
* then get worker thread to complete bearer cleanup. (Can't do cleanup
* here because cleanup code needs to sleep and caller holds spinlocks.)
*/
void tipc_disable_l2_media(struct tipc_bearer *b)
{
struct net_device *dev;
dev = (struct net_device *)rtnl_dereference(b->media_ptr);
RCU_INIT_POINTER(b->media_ptr, NULL);
RCU_INIT_POINTER(dev->tipc_ptr, NULL);
synchronize_net();
dev_put(dev);
}
/**
* tipc_l2_send_msg - send a TIPC packet out over an L2 interface
* @buf: the packet to be sent
* @b_ptr: the bearer through which the packet is to be sent
* @dest: peer destination address
*/
int tipc_l2_send_msg(struct net *net, struct sk_buff *buf,
struct tipc_bearer *b, struct tipc_media_addr *dest)
{
struct sk_buff *clone;
struct net_device *dev;
int delta;
dev = (struct net_device *)rcu_dereference_rtnl(b->media_ptr);
if (!dev)
return 0;
clone = skb_clone(buf, GFP_ATOMIC);
if (!clone)
return 0;
delta = dev->hard_header_len - skb_headroom(buf);
if ((delta > 0) &&
pskb_expand_head(clone, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) {
kfree_skb(clone);
return 0;
}
skb_reset_network_header(clone);
clone->dev = dev;
clone->protocol = htons(ETH_P_TIPC);
dev_hard_header(clone, dev, ETH_P_TIPC, dest->value,
dev->dev_addr, clone->len);
dev_queue_xmit(clone);
return 0;
}
/* tipc_bearer_send- sends buffer to destination over bearer
*
* IMPORTANT:
* The media send routine must not alter the buffer being passed in
* as it may be needed for later retransmission!
*/
void tipc_bearer_send(struct net *net, u32 bearer_id, struct sk_buff *buf,
struct tipc_media_addr *dest)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_bearer *b_ptr;
rcu_read_lock();
b_ptr = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
if (likely(b_ptr))
b_ptr->media->send_msg(net, buf, b_ptr, dest);
rcu_read_unlock();
}
/**
* tipc_l2_rcv_msg - handle incoming TIPC message from an interface
* @buf: the received packet
* @dev: the net device that the packet was received on
* @pt: the packet_type structure which was used to register this handler
* @orig_dev: the original receive net device in case the device is a bond
*
* Accept only packets explicitly sent to this node, or broadcast packets;
* ignores packets sent using interface multicast, and traffic sent to other
* nodes (which can happen if interface is running in promiscuous mode).
*/
static int tipc_l2_rcv_msg(struct sk_buff *buf, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
struct tipc_bearer *b_ptr;
rcu_read_lock();
b_ptr = rcu_dereference_rtnl(dev->tipc_ptr);
if (likely(b_ptr)) {
if (likely(buf->pkt_type <= PACKET_BROADCAST)) {
buf->next = NULL;
tipc_rcv(dev_net(dev), buf, b_ptr);
rcu_read_unlock();
return NET_RX_SUCCESS;
}
}
rcu_read_unlock();
kfree_skb(buf);
return NET_RX_DROP;
}
/**
* tipc_l2_device_event - handle device events from network device
* @nb: the context of the notification
* @evt: the type of event
* @ptr: the net device that the event was on
*
* This function is called by the Ethernet driver in case of link
* change event.
*/
static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct net *net = dev_net(dev);
struct tipc_bearer *b_ptr;
b_ptr = rtnl_dereference(dev->tipc_ptr);
if (!b_ptr)
return NOTIFY_DONE;
b_ptr->mtu = dev->mtu;
switch (evt) {
case NETDEV_CHANGE:
if (netif_carrier_ok(dev))
break;
case NETDEV_DOWN:
case NETDEV_CHANGEMTU:
tipc_reset_bearer(net, b_ptr);
break;
case NETDEV_CHANGEADDR:
b_ptr->media->raw2addr(b_ptr, &b_ptr->addr,
(char *)dev->dev_addr);
tipc_reset_bearer(net, b_ptr);
break;
case NETDEV_UNREGISTER:
case NETDEV_CHANGENAME:
bearer_disable(dev_net(dev), b_ptr, false);
break;
}
return NOTIFY_OK;
}
static struct packet_type tipc_packet_type __read_mostly = {
.type = htons(ETH_P_TIPC),
.func = tipc_l2_rcv_msg,
};
static struct notifier_block notifier = {
.notifier_call = tipc_l2_device_event,
.priority = 0,
};
int tipc_bearer_setup(void)
{
int err;
err = register_netdevice_notifier(¬ifier);
if (err)
return err;
dev_add_pack(&tipc_packet_type);
return 0;
}
void tipc_bearer_cleanup(void)
{
unregister_netdevice_notifier(¬ifier);
dev_remove_pack(&tipc_packet_type);
}
void tipc_bearer_stop(struct net *net)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_bearer *b_ptr;
u32 i;
for (i = 0; i < MAX_BEARERS; i++) {
b_ptr = rtnl_dereference(tn->bearer_list[i]);
if (b_ptr) {
bearer_disable(net, b_ptr, true);
tn->bearer_list[i] = NULL;
}
}
}
/* Caller should hold rtnl_lock to protect the bearer */
static int __tipc_nl_add_bearer(struct tipc_nl_msg *msg,
struct tipc_bearer *bearer)
{
void *hdr;
struct nlattr *attrs;
struct nlattr *prop;
hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
NLM_F_MULTI, TIPC_NL_BEARER_GET);
if (!hdr)
return -EMSGSIZE;
attrs = nla_nest_start(msg->skb, TIPC_NLA_BEARER);
if (!attrs)
goto msg_full;
if (nla_put_string(msg->skb, TIPC_NLA_BEARER_NAME, bearer->name))
goto attr_msg_full;
prop = nla_nest_start(msg->skb, TIPC_NLA_BEARER_PROP);
if (!prop)
goto prop_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, bearer->priority))
goto prop_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, bearer->tolerance))
goto prop_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bearer->window))
goto prop_msg_full;
nla_nest_end(msg->skb, prop);
nla_nest_end(msg->skb, attrs);
genlmsg_end(msg->skb, hdr);
return 0;
prop_msg_full:
nla_nest_cancel(msg->skb, prop);
attr_msg_full:
nla_nest_cancel(msg->skb, attrs);
msg_full:
genlmsg_cancel(msg->skb, hdr);
return -EMSGSIZE;
}
int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
int err;
int i = cb->args[0];
struct tipc_bearer *bearer;
struct tipc_nl_msg msg;
struct net *net = sock_net(skb->sk);
struct tipc_net *tn = net_generic(net, tipc_net_id);
if (i == MAX_BEARERS)
return 0;
msg.skb = skb;
msg.portid = NETLINK_CB(cb->skb).portid;
msg.seq = cb->nlh->nlmsg_seq;
rtnl_lock();
for (i = 0; i < MAX_BEARERS; i++) {
bearer = rtnl_dereference(tn->bearer_list[i]);
if (!bearer)
continue;
err = __tipc_nl_add_bearer(&msg, bearer);
if (err)
break;
}
rtnl_unlock();
cb->args[0] = i;
return skb->len;
}
int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info)
{
int err;
char *name;
struct sk_buff *rep;
struct tipc_bearer *bearer;
struct tipc_nl_msg msg;
struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
struct net *net = genl_info_net(info);
if (!info->attrs[TIPC_NLA_BEARER])
return -EINVAL;
err = nla_parse_nested(attrs, TIPC_NLA_BEARER_MAX,
info->attrs[TIPC_NLA_BEARER],
tipc_nl_bearer_policy);
if (err)
return err;
if (!attrs[TIPC_NLA_BEARER_NAME])
return -EINVAL;
name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
rep = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (!rep)
return -ENOMEM;
msg.skb = rep;
msg.portid = info->snd_portid;
msg.seq = info->snd_seq;
rtnl_lock();
bearer = tipc_bearer_find(net, name);
if (!bearer) {
err = -EINVAL;
goto err_out;
}
err = __tipc_nl_add_bearer(&msg, bearer);
if (err)
goto err_out;
rtnl_unlock();
return genlmsg_reply(rep, info);
err_out:
rtnl_unlock();
nlmsg_free(rep);
return err;
}
int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
{
int err;
char *name;
struct tipc_bearer *bearer;
struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
struct net *net = sock_net(skb->sk);
if (!info->attrs[TIPC_NLA_BEARER])
return -EINVAL;
err = nla_parse_nested(attrs, TIPC_NLA_BEARER_MAX,
info->attrs[TIPC_NLA_BEARER],
tipc_nl_bearer_policy);
if (err)
return err;
if (!attrs[TIPC_NLA_BEARER_NAME])
return -EINVAL;
name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
rtnl_lock();
bearer = tipc_bearer_find(net, name);
if (!bearer) {
rtnl_unlock();
return -EINVAL;
}
bearer_disable(net, bearer, false);
rtnl_unlock();
return 0;
}
int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
{
int err;
char *bearer;
struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
struct net *net = sock_net(skb->sk);
struct tipc_net *tn = net_generic(net, tipc_net_id);
u32 domain;
u32 prio;
prio = TIPC_MEDIA_LINK_PRI;
domain = tn->own_addr & TIPC_CLUSTER_MASK;
if (!info->attrs[TIPC_NLA_BEARER])
return -EINVAL;
err = nla_parse_nested(attrs, TIPC_NLA_BEARER_MAX,
info->attrs[TIPC_NLA_BEARER],
tipc_nl_bearer_policy);
if (err)
return err;
if (!attrs[TIPC_NLA_BEARER_NAME])
return -EINVAL;
bearer = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
if (attrs[TIPC_NLA_BEARER_DOMAIN])
domain = nla_get_u32(attrs[TIPC_NLA_BEARER_DOMAIN]);
if (attrs[TIPC_NLA_BEARER_PROP]) {
struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_BEARER_PROP],
props);
if (err)
return err;
if (props[TIPC_NLA_PROP_PRIO])
prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
}
rtnl_lock();
err = tipc_enable_bearer(net, bearer, domain, prio);
if (err) {
rtnl_unlock();
return err;
}
rtnl_unlock();
return 0;
}
int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
{
int err;
char *name;
struct tipc_bearer *b;
struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
struct net *net = genl_info_net(info);
if (!info->attrs[TIPC_NLA_BEARER])
return -EINVAL;
err = nla_parse_nested(attrs, TIPC_NLA_BEARER_MAX,
info->attrs[TIPC_NLA_BEARER],
tipc_nl_bearer_policy);
if (err)
return err;
if (!attrs[TIPC_NLA_BEARER_NAME])
return -EINVAL;
name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
rtnl_lock();
b = tipc_bearer_find(net, name);
if (!b) {
rtnl_unlock();
return -EINVAL;
}
if (attrs[TIPC_NLA_BEARER_PROP]) {
struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_BEARER_PROP],
props);
if (err) {
rtnl_unlock();
return err;
}
if (props[TIPC_NLA_PROP_TOL])
b->tolerance = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
if (props[TIPC_NLA_PROP_PRIO])
b->priority = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
if (props[TIPC_NLA_PROP_WIN])
b->window = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
}
rtnl_unlock();
return 0;
}
static int __tipc_nl_add_media(struct tipc_nl_msg *msg,
struct tipc_media *media)
{
void *hdr;
struct nlattr *attrs;
struct nlattr *prop;
hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
NLM_F_MULTI, TIPC_NL_MEDIA_GET);
if (!hdr)
return -EMSGSIZE;
attrs = nla_nest_start(msg->skb, TIPC_NLA_MEDIA);
if (!attrs)
goto msg_full;
if (nla_put_string(msg->skb, TIPC_NLA_MEDIA_NAME, media->name))
goto attr_msg_full;
prop = nla_nest_start(msg->skb, TIPC_NLA_MEDIA_PROP);
if (!prop)
goto prop_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, media->priority))
goto prop_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, media->tolerance))
goto prop_msg_full;
if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, media->window))
goto prop_msg_full;
nla_nest_end(msg->skb, prop);
nla_nest_end(msg->skb, attrs);
genlmsg_end(msg->skb, hdr);
return 0;
prop_msg_full:
nla_nest_cancel(msg->skb, prop);
attr_msg_full:
nla_nest_cancel(msg->skb, attrs);
msg_full:
genlmsg_cancel(msg->skb, hdr);
return -EMSGSIZE;
}
int tipc_nl_media_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
int err;
int i = cb->args[0];
struct tipc_nl_msg msg;
if (i == MAX_MEDIA)
return 0;
msg.skb = skb;
msg.portid = NETLINK_CB(cb->skb).portid;
msg.seq = cb->nlh->nlmsg_seq;
rtnl_lock();
for (; media_info_array[i] != NULL; i++) {
err = __tipc_nl_add_media(&msg, media_info_array[i]);
if (err)
break;
}
rtnl_unlock();
cb->args[0] = i;
return skb->len;
}
int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info)
{
int err;
char *name;
struct tipc_nl_msg msg;
struct tipc_media *media;
struct sk_buff *rep;
struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
if (!info->attrs[TIPC_NLA_MEDIA])
return -EINVAL;
err = nla_parse_nested(attrs, TIPC_NLA_MEDIA_MAX,
info->attrs[TIPC_NLA_MEDIA],
tipc_nl_media_policy);
if (err)
return err;
if (!attrs[TIPC_NLA_MEDIA_NAME])
return -EINVAL;
name = nla_data(attrs[TIPC_NLA_MEDIA_NAME]);
rep = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
if (!rep)
return -ENOMEM;
msg.skb = rep;
msg.portid = info->snd_portid;
msg.seq = info->snd_seq;
rtnl_lock();
media = tipc_media_find(name);
if (!media) {
err = -EINVAL;
goto err_out;
}
err = __tipc_nl_add_media(&msg, media);
if (err)
goto err_out;
rtnl_unlock();
return genlmsg_reply(rep, info);
err_out:
rtnl_unlock();
nlmsg_free(rep);
return err;
}
int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
{
int err;
char *name;
struct tipc_media *m;
struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
if (!info->attrs[TIPC_NLA_MEDIA])
return -EINVAL;
err = nla_parse_nested(attrs, TIPC_NLA_MEDIA_MAX,
info->attrs[TIPC_NLA_MEDIA],
tipc_nl_media_policy);
if (!attrs[TIPC_NLA_MEDIA_NAME])
return -EINVAL;
name = nla_data(attrs[TIPC_NLA_MEDIA_NAME]);
rtnl_lock();
m = tipc_media_find(name);
if (!m) {
rtnl_unlock();
return -EINVAL;
}
if (attrs[TIPC_NLA_MEDIA_PROP]) {
struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_MEDIA_PROP],
props);
if (err) {
rtnl_unlock();
return err;
}
if (props[TIPC_NLA_PROP_TOL])
m->tolerance = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
if (props[TIPC_NLA_PROP_PRIO])
m->priority = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
if (props[TIPC_NLA_PROP_WIN])
m->window = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
}
rtnl_unlock();
return 0;
}
| gpl-2.0 |
filippz/kernel-adaptation-n950-n9 | arch/arm/mach-pxa/spitz.c | 131 | 25716 | /*
* Support for Sharp SL-Cxx00 Series of PDAs
* Models: SL-C3000 (Spitz), SL-C1000 (Akita) and SL-C3100 (Borzoi)
*
* Copyright (c) 2005 Richard Purdie
*
* Based on Sharp's 2.4 kernel patches/lubbock.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/gpio_keys.h>
#include <linux/gpio.h>
#include <linux/leds.h>
#include <linux/i2c.h>
#include <linux/i2c/pxa-i2c.h>
#include <linux/i2c/pca953x.h>
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
#include <linux/spi/corgi_lcd.h>
#include <linux/spi/pxa2xx_spi.h>
#include <linux/mtd/sharpsl.h>
#include <linux/mtd/physmap.h>
#include <linux/input/matrix_keypad.h>
#include <linux/regulator/machine.h>
#include <linux/io.h>
#include <linux/module.h>
#include <asm/setup.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/sharpsl_param.h>
#include <asm/hardware/scoop.h>
#include <mach/pxa27x.h>
#include <mach/pxa27x-udc.h>
#include <mach/reset.h>
#include <mach/irda.h>
#include <mach/mmc.h>
#include <mach/ohci.h>
#include <mach/pxafb.h>
#include <mach/spitz.h>
#include <mach/sharpsl_pm.h>
#include <mach/smemc.h>
#include "generic.h"
#include "devices.h"
/******************************************************************************
* Pin configuration
******************************************************************************/
static unsigned long spitz_pin_config[] __initdata = {
/* Chip Selects */
GPIO78_nCS_2, /* SCOOP #2 */
GPIO79_nCS_3, /* NAND */
GPIO80_nCS_4, /* SCOOP #1 */
/* LCD - 16bpp Active TFT */
GPIOxx_LCD_TFT_16BPP,
/* PC Card */
GPIO48_nPOE,
GPIO49_nPWE,
GPIO50_nPIOR,
GPIO51_nPIOW,
GPIO85_nPCE_1,
GPIO54_nPCE_2,
GPIO55_nPREG,
GPIO56_nPWAIT,
GPIO57_nIOIS16,
GPIO104_PSKTSEL,
/* I2S */
GPIO28_I2S_BITCLK_OUT,
GPIO29_I2S_SDATA_IN,
GPIO30_I2S_SDATA_OUT,
GPIO31_I2S_SYNC,
/* MMC */
GPIO32_MMC_CLK,
GPIO112_MMC_CMD,
GPIO92_MMC_DAT_0,
GPIO109_MMC_DAT_1,
GPIO110_MMC_DAT_2,
GPIO111_MMC_DAT_3,
/* GPIOs */
GPIO9_GPIO, /* SPITZ_GPIO_nSD_DETECT */
GPIO16_GPIO, /* SPITZ_GPIO_SYNC */
GPIO81_GPIO, /* SPITZ_GPIO_nSD_WP */
GPIO41_GPIO, /* SPITZ_GPIO_USB_CONNECT */
GPIO37_GPIO, /* SPITZ_GPIO_USB_HOST */
GPIO35_GPIO, /* SPITZ_GPIO_USB_DEVICE */
GPIO22_GPIO, /* SPITZ_GPIO_HSYNC */
GPIO94_GPIO, /* SPITZ_GPIO_CF_CD */
GPIO105_GPIO, /* SPITZ_GPIO_CF_IRQ */
GPIO106_GPIO, /* SPITZ_GPIO_CF2_IRQ */
/* GPIO matrix keypad */
GPIO88_GPIO, /* column 0 */
GPIO23_GPIO, /* column 1 */
GPIO24_GPIO, /* column 2 */
GPIO25_GPIO, /* column 3 */
GPIO26_GPIO, /* column 4 */
GPIO27_GPIO, /* column 5 */
GPIO52_GPIO, /* column 6 */
GPIO103_GPIO, /* column 7 */
GPIO107_GPIO, /* column 8 */
GPIO108_GPIO, /* column 9 */
GPIO114_GPIO, /* column 10 */
GPIO12_GPIO, /* row 0 */
GPIO17_GPIO, /* row 1 */
GPIO91_GPIO, /* row 2 */
GPIO34_GPIO, /* row 3 */
GPIO36_GPIO, /* row 4 */
GPIO38_GPIO, /* row 5 */
GPIO39_GPIO, /* row 6 */
/* I2C */
GPIO117_I2C_SCL,
GPIO118_I2C_SDA,
GPIO0_GPIO | WAKEUP_ON_EDGE_RISE, /* SPITZ_GPIO_KEY_INT */
GPIO1_GPIO | WAKEUP_ON_EDGE_FALL, /* SPITZ_GPIO_RESET */
};
/******************************************************************************
* Scoop GPIO expander
******************************************************************************/
#if defined(CONFIG_SHARP_SCOOP) || defined(CONFIG_SHARP_SCOOP_MODULE)
/* SCOOP Device #1 */
static struct resource spitz_scoop_1_resources[] = {
[0] = {
.start = 0x10800000,
.end = 0x10800fff,
.flags = IORESOURCE_MEM,
},
};
static struct scoop_config spitz_scoop_1_setup = {
.io_dir = SPITZ_SCP_IO_DIR,
.io_out = SPITZ_SCP_IO_OUT,
.suspend_clr = SPITZ_SCP_SUS_CLR,
.suspend_set = SPITZ_SCP_SUS_SET,
.gpio_base = SPITZ_SCP_GPIO_BASE,
};
struct platform_device spitz_scoop_1_device = {
.name = "sharp-scoop",
.id = 0,
.dev = {
.platform_data = &spitz_scoop_1_setup,
},
.num_resources = ARRAY_SIZE(spitz_scoop_1_resources),
.resource = spitz_scoop_1_resources,
};
/* SCOOP Device #2 */
static struct resource spitz_scoop_2_resources[] = {
[0] = {
.start = 0x08800040,
.end = 0x08800fff,
.flags = IORESOURCE_MEM,
},
};
static struct scoop_config spitz_scoop_2_setup = {
.io_dir = SPITZ_SCP2_IO_DIR,
.io_out = SPITZ_SCP2_IO_OUT,
.suspend_clr = SPITZ_SCP2_SUS_CLR,
.suspend_set = SPITZ_SCP2_SUS_SET,
.gpio_base = SPITZ_SCP2_GPIO_BASE,
};
struct platform_device spitz_scoop_2_device = {
.name = "sharp-scoop",
.id = 1,
.dev = {
.platform_data = &spitz_scoop_2_setup,
},
.num_resources = ARRAY_SIZE(spitz_scoop_2_resources),
.resource = spitz_scoop_2_resources,
};
static void __init spitz_scoop_init(void)
{
platform_device_register(&spitz_scoop_1_device);
/* Akita doesn't have the second SCOOP chip */
if (!machine_is_akita())
platform_device_register(&spitz_scoop_2_device);
}
/* Power control is shared with between one of the CF slots and SD */
static void spitz_card_pwr_ctrl(uint8_t enable, uint8_t new_cpr)
{
unsigned short cpr;
unsigned long flags;
if (new_cpr & 0x7) {
gpio_set_value(SPITZ_GPIO_CF_POWER, 1);
mdelay(5);
}
local_irq_save(flags);
cpr = read_scoop_reg(&spitz_scoop_1_device.dev, SCOOP_CPR);
if (enable & new_cpr)
cpr |= new_cpr;
else
cpr &= ~enable;
write_scoop_reg(&spitz_scoop_1_device.dev, SCOOP_CPR, cpr);
local_irq_restore(flags);
if (!(cpr & 0x7)) {
mdelay(1);
gpio_set_value(SPITZ_GPIO_CF_POWER, 0);
}
}
#else
static inline void spitz_scoop_init(void) {}
static inline void spitz_card_pwr_ctrl(uint8_t enable, uint8_t new_cpr) {}
#endif
/******************************************************************************
* PCMCIA
******************************************************************************/
#if defined(CONFIG_PCMCIA_PXA2XX) || defined(CONFIG_PCMCIA_PXA2XX_MODULE)
static void spitz_pcmcia_pwr(struct device *scoop, uint16_t cpr, int nr)
{
/* Only need to override behaviour for slot 0 */
if (nr == 0)
spitz_card_pwr_ctrl(
cpr & (SCOOP_CPR_CF_3V | SCOOP_CPR_CF_XV), cpr);
else
write_scoop_reg(scoop, SCOOP_CPR, cpr);
}
static struct scoop_pcmcia_dev spitz_pcmcia_scoop[] = {
{
.dev = &spitz_scoop_1_device.dev,
.irq = SPITZ_IRQ_GPIO_CF_IRQ,
.cd_irq = SPITZ_IRQ_GPIO_CF_CD,
.cd_irq_str = "PCMCIA0 CD",
}, {
.dev = &spitz_scoop_2_device.dev,
.irq = SPITZ_IRQ_GPIO_CF2_IRQ,
.cd_irq = -1,
},
};
static struct scoop_pcmcia_config spitz_pcmcia_config = {
.devs = &spitz_pcmcia_scoop[0],
.num_devs = 2,
.power_ctrl = spitz_pcmcia_pwr,
};
static void __init spitz_pcmcia_init(void)
{
/* Akita has only one PCMCIA slot used */
if (machine_is_akita())
spitz_pcmcia_config.num_devs = 1;
platform_scoop_config = &spitz_pcmcia_config;
}
#else
static inline void spitz_pcmcia_init(void) {}
#endif
/******************************************************************************
* GPIO keyboard
******************************************************************************/
#if defined(CONFIG_KEYBOARD_MATRIX) || defined(CONFIG_KEYBOARD_MATRIX_MODULE)
#define SPITZ_KEY_CALENDAR KEY_F1
#define SPITZ_KEY_ADDRESS KEY_F2
#define SPITZ_KEY_FN KEY_F3
#define SPITZ_KEY_CANCEL KEY_F4
#define SPITZ_KEY_EXOK KEY_F5
#define SPITZ_KEY_EXCANCEL KEY_F6
#define SPITZ_KEY_EXJOGDOWN KEY_F7
#define SPITZ_KEY_EXJOGUP KEY_F8
#define SPITZ_KEY_JAP1 KEY_LEFTALT
#define SPITZ_KEY_JAP2 KEY_RIGHTCTRL
#define SPITZ_KEY_SYNC KEY_F9
#define SPITZ_KEY_MAIL KEY_F10
#define SPITZ_KEY_OK KEY_F11
#define SPITZ_KEY_MENU KEY_F12
static const uint32_t spitz_keymap[] = {
KEY(0, 0, KEY_LEFTCTRL),
KEY(0, 1, KEY_1),
KEY(0, 2, KEY_3),
KEY(0, 3, KEY_5),
KEY(0, 4, KEY_6),
KEY(0, 5, KEY_7),
KEY(0, 6, KEY_9),
KEY(0, 7, KEY_0),
KEY(0, 8, KEY_BACKSPACE),
KEY(0, 9, SPITZ_KEY_EXOK), /* EXOK */
KEY(0, 10, SPITZ_KEY_EXCANCEL), /* EXCANCEL */
KEY(1, 1, KEY_2),
KEY(1, 2, KEY_4),
KEY(1, 3, KEY_R),
KEY(1, 4, KEY_Y),
KEY(1, 5, KEY_8),
KEY(1, 6, KEY_I),
KEY(1, 7, KEY_O),
KEY(1, 8, KEY_P),
KEY(1, 9, SPITZ_KEY_EXJOGDOWN), /* EXJOGDOWN */
KEY(1, 10, SPITZ_KEY_EXJOGUP), /* EXJOGUP */
KEY(2, 0, KEY_TAB),
KEY(2, 1, KEY_Q),
KEY(2, 2, KEY_E),
KEY(2, 3, KEY_T),
KEY(2, 4, KEY_G),
KEY(2, 5, KEY_U),
KEY(2, 6, KEY_J),
KEY(2, 7, KEY_K),
KEY(3, 0, SPITZ_KEY_ADDRESS), /* ADDRESS */
KEY(3, 1, KEY_W),
KEY(3, 2, KEY_S),
KEY(3, 3, KEY_F),
KEY(3, 4, KEY_V),
KEY(3, 5, KEY_H),
KEY(3, 6, KEY_M),
KEY(3, 7, KEY_L),
KEY(3, 9, KEY_RIGHTSHIFT),
KEY(4, 0, SPITZ_KEY_CALENDAR), /* CALENDAR */
KEY(4, 1, KEY_A),
KEY(4, 2, KEY_D),
KEY(4, 3, KEY_C),
KEY(4, 4, KEY_B),
KEY(4, 5, KEY_N),
KEY(4, 6, KEY_DOT),
KEY(4, 8, KEY_ENTER),
KEY(4, 9, KEY_LEFTSHIFT),
KEY(5, 0, SPITZ_KEY_MAIL), /* MAIL */
KEY(5, 1, KEY_Z),
KEY(5, 2, KEY_X),
KEY(5, 3, KEY_MINUS),
KEY(5, 4, KEY_SPACE),
KEY(5, 5, KEY_COMMA),
KEY(5, 7, KEY_UP),
KEY(5, 10, SPITZ_KEY_FN), /* FN */
KEY(6, 0, KEY_SYSRQ),
KEY(6, 1, SPITZ_KEY_JAP1), /* JAP1 */
KEY(6, 2, SPITZ_KEY_JAP2), /* JAP2 */
KEY(6, 3, SPITZ_KEY_CANCEL), /* CANCEL */
KEY(6, 4, SPITZ_KEY_OK), /* OK */
KEY(6, 5, SPITZ_KEY_MENU), /* MENU */
KEY(6, 6, KEY_LEFT),
KEY(6, 7, KEY_DOWN),
KEY(6, 8, KEY_RIGHT),
};
static const struct matrix_keymap_data spitz_keymap_data = {
.keymap = spitz_keymap,
.keymap_size = ARRAY_SIZE(spitz_keymap),
};
static const uint32_t spitz_row_gpios[] =
{ 12, 17, 91, 34, 36, 38, 39 };
static const uint32_t spitz_col_gpios[] =
{ 88, 23, 24, 25, 26, 27, 52, 103, 107, 108, 114 };
static struct matrix_keypad_platform_data spitz_mkp_pdata = {
.keymap_data = &spitz_keymap_data,
.row_gpios = spitz_row_gpios,
.col_gpios = spitz_col_gpios,
.num_row_gpios = ARRAY_SIZE(spitz_row_gpios),
.num_col_gpios = ARRAY_SIZE(spitz_col_gpios),
.col_scan_delay_us = 10,
.debounce_ms = 10,
.wakeup = 1,
};
static struct platform_device spitz_mkp_device = {
.name = "matrix-keypad",
.id = -1,
.dev = {
.platform_data = &spitz_mkp_pdata,
},
};
static void __init spitz_mkp_init(void)
{
platform_device_register(&spitz_mkp_device);
}
#else
static inline void spitz_mkp_init(void) {}
#endif
/******************************************************************************
* GPIO keys
******************************************************************************/
#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
static struct gpio_keys_button spitz_gpio_keys[] = {
{
.type = EV_PWR,
.code = KEY_SUSPEND,
.gpio = SPITZ_GPIO_ON_KEY,
.desc = "On Off",
.wakeup = 1,
},
/* Two buttons detecting the lid state */
{
.type = EV_SW,
.code = 0,
.gpio = SPITZ_GPIO_SWA,
.desc = "Display Down",
},
{
.type = EV_SW,
.code = 1,
.gpio = SPITZ_GPIO_SWB,
.desc = "Lid Closed",
},
};
static struct gpio_keys_platform_data spitz_gpio_keys_platform_data = {
.buttons = spitz_gpio_keys,
.nbuttons = ARRAY_SIZE(spitz_gpio_keys),
};
static struct platform_device spitz_gpio_keys_device = {
.name = "gpio-keys",
.id = -1,
.dev = {
.platform_data = &spitz_gpio_keys_platform_data,
},
};
static void __init spitz_keys_init(void)
{
platform_device_register(&spitz_gpio_keys_device);
}
#else
static inline void spitz_keys_init(void) {}
#endif
/******************************************************************************
* LEDs
******************************************************************************/
#if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE)
static struct gpio_led spitz_gpio_leds[] = {
{
.name = "spitz:amber:charge",
.default_trigger = "sharpsl-charge",
.gpio = SPITZ_GPIO_LED_ORANGE,
},
{
.name = "spitz:green:hddactivity",
.default_trigger = "ide-disk",
.gpio = SPITZ_GPIO_LED_GREEN,
},
};
static struct gpio_led_platform_data spitz_gpio_leds_info = {
.leds = spitz_gpio_leds,
.num_leds = ARRAY_SIZE(spitz_gpio_leds),
};
static struct platform_device spitz_led_device = {
.name = "leds-gpio",
.id = -1,
.dev = {
.platform_data = &spitz_gpio_leds_info,
},
};
static void __init spitz_leds_init(void)
{
platform_device_register(&spitz_led_device);
}
#else
static inline void spitz_leds_init(void) {}
#endif
/******************************************************************************
* SSP Devices
******************************************************************************/
#if defined(CONFIG_SPI_PXA2XX) || defined(CONFIG_SPI_PXA2XX_MODULE)
static void spitz_ads7846_wait_for_hsync(void)
{
while (gpio_get_value(SPITZ_GPIO_HSYNC))
cpu_relax();
while (!gpio_get_value(SPITZ_GPIO_HSYNC))
cpu_relax();
}
static struct ads7846_platform_data spitz_ads7846_info = {
.model = 7846,
.vref_delay_usecs = 100,
.x_plate_ohms = 419,
.y_plate_ohms = 486,
.pressure_max = 1024,
.gpio_pendown = SPITZ_GPIO_TP_INT,
.wait_for_sync = spitz_ads7846_wait_for_hsync,
};
static struct pxa2xx_spi_chip spitz_ads7846_chip = {
.gpio_cs = SPITZ_GPIO_ADS7846_CS,
};
static void spitz_bl_kick_battery(void)
{
void (*kick_batt)(void);
kick_batt = symbol_get(sharpsl_battery_kick);
if (kick_batt) {
kick_batt();
symbol_put(sharpsl_battery_kick);
}
}
static struct corgi_lcd_platform_data spitz_lcdcon_info = {
.init_mode = CORGI_LCD_MODE_VGA,
.max_intensity = 0x2f,
.default_intensity = 0x1f,
.limit_mask = 0x0b,
.gpio_backlight_cont = SPITZ_GPIO_BACKLIGHT_CONT,
.gpio_backlight_on = SPITZ_GPIO_BACKLIGHT_ON,
.kick_battery = spitz_bl_kick_battery,
};
static struct pxa2xx_spi_chip spitz_lcdcon_chip = {
.gpio_cs = SPITZ_GPIO_LCDCON_CS,
};
static struct pxa2xx_spi_chip spitz_max1111_chip = {
.gpio_cs = SPITZ_GPIO_MAX1111_CS,
};
static struct spi_board_info spitz_spi_devices[] = {
{
.modalias = "ads7846",
.max_speed_hz = 1200000,
.bus_num = 2,
.chip_select = 0,
.platform_data = &spitz_ads7846_info,
.controller_data = &spitz_ads7846_chip,
.irq = PXA_GPIO_TO_IRQ(SPITZ_GPIO_TP_INT),
}, {
.modalias = "corgi-lcd",
.max_speed_hz = 50000,
.bus_num = 2,
.chip_select = 1,
.platform_data = &spitz_lcdcon_info,
.controller_data = &spitz_lcdcon_chip,
}, {
.modalias = "max1111",
.max_speed_hz = 450000,
.bus_num = 2,
.chip_select = 2,
.controller_data = &spitz_max1111_chip,
},
};
static struct pxa2xx_spi_master spitz_spi_info = {
.num_chipselect = 3,
};
static void __init spitz_spi_init(void)
{
struct corgi_lcd_platform_data *lcd_data = &spitz_lcdcon_info;
if (machine_is_akita()) {
lcd_data->gpio_backlight_cont = AKITA_GPIO_BACKLIGHT_CONT;
lcd_data->gpio_backlight_on = AKITA_GPIO_BACKLIGHT_ON;
}
pxa2xx_set_spi_info(2, &spitz_spi_info);
spi_register_board_info(ARRAY_AND_SIZE(spitz_spi_devices));
}
#else
static inline void spitz_spi_init(void) {}
#endif
/******************************************************************************
* SD/MMC card controller
******************************************************************************/
#if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE)
/*
* NOTE: The card detect interrupt isn't debounced so we delay it by 250ms to
* give the card a chance to fully insert/eject.
*/
static void spitz_mci_setpower(struct device *dev, unsigned int vdd)
{
struct pxamci_platform_data* p_d = dev->platform_data;
if ((1 << vdd) & p_d->ocr_mask)
spitz_card_pwr_ctrl(SCOOP_CPR_SD_3V, SCOOP_CPR_SD_3V);
else
spitz_card_pwr_ctrl(SCOOP_CPR_SD_3V, 0x0);
}
static struct pxamci_platform_data spitz_mci_platform_data = {
.detect_delay_ms = 250,
.ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
.setpower = spitz_mci_setpower,
.gpio_card_detect = SPITZ_GPIO_nSD_DETECT,
.gpio_card_ro = SPITZ_GPIO_nSD_WP,
.gpio_power = -1,
};
static void __init spitz_mmc_init(void)
{
pxa_set_mci_info(&spitz_mci_platform_data);
}
#else
static inline void spitz_mmc_init(void) {}
#endif
/******************************************************************************
* USB Host
******************************************************************************/
#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
static int spitz_ohci_init(struct device *dev)
{
int err;
err = gpio_request(SPITZ_GPIO_USB_HOST, "USB_HOST");
if (err)
return err;
/* Only Port 2 is connected, setup USB Port 2 Output Control Register */
UP2OCR = UP2OCR_HXS | UP2OCR_HXOE | UP2OCR_DPPDE | UP2OCR_DMPDE;
return gpio_direction_output(SPITZ_GPIO_USB_HOST, 1);
}
static void spitz_ohci_exit(struct device *dev)
{
gpio_free(SPITZ_GPIO_USB_HOST);
}
static struct pxaohci_platform_data spitz_ohci_platform_data = {
.port_mode = PMM_NPS_MODE,
.init = spitz_ohci_init,
.exit = spitz_ohci_exit,
.flags = ENABLE_PORT_ALL | NO_OC_PROTECTION,
.power_budget = 150,
};
static void __init spitz_uhc_init(void)
{
pxa_set_ohci_info(&spitz_ohci_platform_data);
}
#else
static inline void spitz_uhc_init(void) {}
#endif
/******************************************************************************
* IrDA
******************************************************************************/
#if defined(CONFIG_PXA_FICP) || defined(CONFIG_PXA_FICP_MODULE)
static struct pxaficp_platform_data spitz_ficp_platform_data = {
.transceiver_cap = IR_SIRMODE | IR_OFF,
};
static void __init spitz_irda_init(void)
{
if (machine_is_akita())
spitz_ficp_platform_data.gpio_pwdown = AKITA_GPIO_IR_ON;
else
spitz_ficp_platform_data.gpio_pwdown = SPITZ_GPIO_IR_ON;
pxa_set_ficp_info(&spitz_ficp_platform_data);
}
#else
static inline void spitz_irda_init(void) {}
#endif
/******************************************************************************
* Framebuffer
******************************************************************************/
#if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE)
static struct pxafb_mode_info spitz_pxafb_modes[] = {
{
.pixclock = 19231,
.xres = 480,
.yres = 640,
.bpp = 16,
.hsync_len = 40,
.left_margin = 46,
.right_margin = 125,
.vsync_len = 3,
.upper_margin = 1,
.lower_margin = 0,
.sync = 0,
}, {
.pixclock = 134617,
.xres = 240,
.yres = 320,
.bpp = 16,
.hsync_len = 20,
.left_margin = 20,
.right_margin = 46,
.vsync_len = 2,
.upper_margin = 1,
.lower_margin = 0,
.sync = 0,
},
};
static struct pxafb_mach_info spitz_pxafb_info = {
.modes = spitz_pxafb_modes,
.num_modes = ARRAY_SIZE(spitz_pxafb_modes),
.fixed_modes = 1,
.lcd_conn = LCD_COLOR_TFT_16BPP | LCD_ALTERNATE_MAPPING,
};
static void __init spitz_lcd_init(void)
{
pxa_set_fb_info(NULL, &spitz_pxafb_info);
}
#else
static inline void spitz_lcd_init(void) {}
#endif
/******************************************************************************
* Framebuffer
******************************************************************************/
#if defined(CONFIG_MTD_NAND_SHARPSL) || defined(CONFIG_MTD_NAND_SHARPSL_MODULE)
static struct mtd_partition spitz_nand_partitions[] = {
{
.name = "System Area",
.offset = 0,
.size = 7 * 1024 * 1024,
}, {
.name = "Root Filesystem",
.offset = 7 * 1024 * 1024,
}, {
.name = "Home Filesystem",
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL,
},
};
static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
static struct nand_bbt_descr spitz_nand_bbt = {
.options = 0,
.offs = 4,
.len = 2,
.pattern = scan_ff_pattern
};
static struct nand_ecclayout akita_oobinfo = {
.oobfree = { {0x08, 0x09} },
.eccbytes = 24,
.eccpos = {
0x05, 0x01, 0x02, 0x03, 0x06, 0x07, 0x15, 0x11,
0x12, 0x13, 0x16, 0x17, 0x25, 0x21, 0x22, 0x23,
0x26, 0x27, 0x35, 0x31, 0x32, 0x33, 0x36, 0x37,
},
};
static struct sharpsl_nand_platform_data spitz_nand_pdata = {
.badblock_pattern = &spitz_nand_bbt,
.partitions = spitz_nand_partitions,
.nr_partitions = ARRAY_SIZE(spitz_nand_partitions),
};
static struct resource spitz_nand_resources[] = {
{
.start = PXA_CS3_PHYS,
.end = PXA_CS3_PHYS + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device spitz_nand_device = {
.name = "sharpsl-nand",
.id = -1,
.resource = spitz_nand_resources,
.num_resources = ARRAY_SIZE(spitz_nand_resources),
.dev = {
.platform_data = &spitz_nand_pdata,
}
};
static void __init spitz_nand_init(void)
{
if (machine_is_spitz()) {
spitz_nand_partitions[1].size = 5 * 1024 * 1024;
} else if (machine_is_akita()) {
spitz_nand_partitions[1].size = 58 * 1024 * 1024;
spitz_nand_bbt.len = 1;
spitz_nand_pdata.ecc_layout = &akita_oobinfo;
} else if (machine_is_borzoi()) {
spitz_nand_partitions[1].size = 32 * 1024 * 1024;
spitz_nand_bbt.len = 1;
spitz_nand_pdata.ecc_layout = &akita_oobinfo;
}
platform_device_register(&spitz_nand_device);
}
#else
static inline void spitz_nand_init(void) {}
#endif
/******************************************************************************
* NOR Flash
******************************************************************************/
#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
static struct mtd_partition spitz_rom_parts[] = {
{
.name ="Boot PROM Filesystem",
.offset = 0x00140000,
.size = MTDPART_SIZ_FULL,
},
};
static struct physmap_flash_data spitz_rom_data = {
.width = 2,
.nr_parts = ARRAY_SIZE(spitz_rom_parts),
.parts = spitz_rom_parts,
};
static struct resource spitz_rom_resources[] = {
{
.start = PXA_CS0_PHYS,
.end = PXA_CS0_PHYS + SZ_8M - 1,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device spitz_rom_device = {
.name = "physmap-flash",
.id = -1,
.resource = spitz_rom_resources,
.num_resources = ARRAY_SIZE(spitz_rom_resources),
.dev = {
.platform_data = &spitz_rom_data,
},
};
static void __init spitz_nor_init(void)
{
platform_device_register(&spitz_rom_device);
}
#else
static inline void spitz_nor_init(void) {}
#endif
/******************************************************************************
* GPIO expander
******************************************************************************/
#if defined(CONFIG_I2C_PXA) || defined(CONFIG_I2C_PXA_MODULE)
static struct pca953x_platform_data akita_pca953x_pdata = {
.gpio_base = AKITA_IOEXP_GPIO_BASE,
};
static struct i2c_board_info spitz_i2c_devs[] = {
{
.type = "wm8750",
.addr = 0x1b,
}, {
.type = "max7310",
.addr = 0x18,
.platform_data = &akita_pca953x_pdata,
},
};
static struct regulator_consumer_supply isl6271a_consumers[] = {
REGULATOR_SUPPLY("vcc_core", NULL),
};
static struct regulator_init_data isl6271a_info[] = {
{
.constraints = {
.name = "vcc_core range",
.min_uV = 850000,
.max_uV = 1600000,
.always_on = 1,
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE,
},
.consumer_supplies = isl6271a_consumers,
.num_consumer_supplies = ARRAY_SIZE(isl6271a_consumers),
}
};
static struct i2c_board_info spitz_pi2c_devs[] = {
{
.type = "isl6271a",
.addr = 0x0c,
.platform_data = &isl6271a_info,
},
};
static void __init spitz_i2c_init(void)
{
int size = ARRAY_SIZE(spitz_i2c_devs);
/* Only Akita has the max7310 chip */
if (!machine_is_akita())
size--;
pxa_set_i2c_info(NULL);
pxa27x_set_i2c_power_info(NULL);
i2c_register_board_info(0, spitz_i2c_devs, size);
i2c_register_board_info(1, ARRAY_AND_SIZE(spitz_pi2c_devs));
}
#else
static inline void spitz_i2c_init(void) {}
#endif
/******************************************************************************
* Machine init
******************************************************************************/
static void spitz_poweroff(void)
{
pxa_restart('g', NULL);
}
static void spitz_restart(char mode, const char *cmd)
{
uint32_t msc0 = __raw_readl(MSC0);
/* Bootloader magic for a reboot */
if ((msc0 & 0xffff0000) == 0x7ff00000)
__raw_writel((msc0 & 0xffff) | 0x7ee00000, MSC0);
spitz_poweroff();
}
static void __init spitz_init(void)
{
init_gpio_reset(SPITZ_GPIO_ON_RESET, 1, 0);
pm_power_off = spitz_poweroff;
PMCR = 0x00;
/* Stop 3.6MHz and drive HIGH to PCMCIA and CS */
PCFR |= PCFR_OPDE;
pxa2xx_mfp_config(ARRAY_AND_SIZE(spitz_pin_config));
pxa_set_ffuart_info(NULL);
pxa_set_btuart_info(NULL);
pxa_set_stuart_info(NULL);
spitz_spi_init();
spitz_scoop_init();
spitz_mkp_init();
spitz_keys_init();
spitz_leds_init();
spitz_mmc_init();
spitz_pcmcia_init();
spitz_irda_init();
spitz_uhc_init();
spitz_lcd_init();
spitz_nor_init();
spitz_nand_init();
spitz_i2c_init();
}
static void __init spitz_fixup(struct tag *tags, char **cmdline,
struct meminfo *mi)
{
sharpsl_save_param();
mi->nr_banks = 1;
mi->bank[0].start = 0xa0000000;
mi->bank[0].size = (64*1024*1024);
}
#ifdef CONFIG_MACH_SPITZ
MACHINE_START(SPITZ, "SHARP Spitz")
.restart_mode = 'g',
.fixup = spitz_fixup,
.map_io = pxa27x_map_io,
.nr_irqs = PXA_NR_IRQS,
.init_irq = pxa27x_init_irq,
.handle_irq = pxa27x_handle_irq,
.init_machine = spitz_init,
.timer = &pxa_timer,
.restart = spitz_restart,
MACHINE_END
#endif
#ifdef CONFIG_MACH_BORZOI
MACHINE_START(BORZOI, "SHARP Borzoi")
.restart_mode = 'g',
.fixup = spitz_fixup,
.map_io = pxa27x_map_io,
.nr_irqs = PXA_NR_IRQS,
.init_irq = pxa27x_init_irq,
.handle_irq = pxa27x_handle_irq,
.init_machine = spitz_init,
.timer = &pxa_timer,
.restart = spitz_restart,
MACHINE_END
#endif
#ifdef CONFIG_MACH_AKITA
MACHINE_START(AKITA, "SHARP Akita")
.restart_mode = 'g',
.fixup = spitz_fixup,
.map_io = pxa27x_map_io,
.nr_irqs = PXA_NR_IRQS,
.init_irq = pxa27x_init_irq,
.handle_irq = pxa27x_handle_irq,
.init_machine = spitz_init,
.timer = &pxa_timer,
.restart = spitz_restart,
MACHINE_END
#endif
| gpl-2.0 |
entrusc/linux-lcd | drivers/iio/dac/max517.c | 131 | 5697 | /*
* max517.c - Support for Maxim MAX517, MAX518 and MAX519
*
* Copyright (C) 2010, 2011 Roland Stigge <stigge@antcom.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
#include <linux/err.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/dac/max517.h>
#define MAX517_DRV_NAME "max517"
/* Commands */
#define COMMAND_CHANNEL0 0x00
#define COMMAND_CHANNEL1 0x01 /* for MAX518 and MAX519 */
#define COMMAND_PD 0x08 /* Power Down */
enum max517_device_ids {
ID_MAX517,
ID_MAX518,
ID_MAX519,
};
struct max517_data {
struct i2c_client *client;
unsigned short vref_mv[2];
};
/*
* channel: bit 0: channel 1
* bit 1: channel 2
* (this way, it's possible to set both channels at once)
*/
static int max517_set_value(struct iio_dev *indio_dev,
long val, int channel)
{
struct max517_data *data = iio_priv(indio_dev);
struct i2c_client *client = data->client;
u8 outbuf[2];
int res;
if (val < 0 || val > 255)
return -EINVAL;
outbuf[0] = channel;
outbuf[1] = val;
res = i2c_master_send(client, outbuf, 2);
if (res < 0)
return res;
else if (res != 2)
return -EIO;
else
return 0;
}
static int max517_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val,
int *val2,
long m)
{
struct max517_data *data = iio_priv(indio_dev);
unsigned int scale_uv;
switch (m) {
case IIO_CHAN_INFO_SCALE:
/* Corresponds to Vref / 2^(bits) */
scale_uv = (data->vref_mv[chan->channel] * 1000) >> 8;
*val = scale_uv / 1000000;
*val2 = scale_uv % 1000000;
return IIO_VAL_INT_PLUS_MICRO;
default:
break;
}
return -EINVAL;
}
static int max517_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int val, int val2, long mask)
{
int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
ret = max517_set_value(indio_dev, val, chan->channel);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
#ifdef CONFIG_PM_SLEEP
static int max517_suspend(struct device *dev)
{
u8 outbuf = COMMAND_PD;
return i2c_master_send(to_i2c_client(dev), &outbuf, 1);
}
static int max517_resume(struct device *dev)
{
u8 outbuf = 0;
return i2c_master_send(to_i2c_client(dev), &outbuf, 1);
}
static SIMPLE_DEV_PM_OPS(max517_pm_ops, max517_suspend, max517_resume);
#define MAX517_PM_OPS (&max517_pm_ops)
#else
#define MAX517_PM_OPS NULL
#endif
static const struct iio_info max517_info = {
.read_raw = max517_read_raw,
.write_raw = max517_write_raw,
.driver_module = THIS_MODULE,
};
#define MAX517_CHANNEL(chan) { \
.type = IIO_VOLTAGE, \
.indexed = 1, \
.output = 1, \
.channel = (chan), \
.info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | \
IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \
.scan_type = IIO_ST('u', 8, 8, 0), \
}
static const struct iio_chan_spec max517_channels[] = {
MAX517_CHANNEL(0),
MAX517_CHANNEL(1)
};
static int __devinit max517_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct max517_data *data;
struct iio_dev *indio_dev;
struct max517_platform_data *platform_data = client->dev.platform_data;
int err;
indio_dev = iio_device_alloc(sizeof(*data));
if (indio_dev == NULL) {
err = -ENOMEM;
goto exit;
}
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
data->client = client;
/* establish that the iio_dev is a child of the i2c device */
indio_dev->dev.parent = &client->dev;
/* reduced channel set for MAX517 */
if (id->driver_data == ID_MAX517)
indio_dev->num_channels = 1;
else
indio_dev->num_channels = 2;
indio_dev->channels = max517_channels;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &max517_info;
/*
* Reference voltage on MAX518 and default is 5V, else take vref_mv
* from platform_data
*/
if (id->driver_data == ID_MAX518 || !platform_data) {
data->vref_mv[0] = data->vref_mv[1] = 5000; /* mV */
} else {
data->vref_mv[0] = platform_data->vref_mv[0];
data->vref_mv[1] = platform_data->vref_mv[1];
}
err = iio_device_register(indio_dev);
if (err)
goto exit_free_device;
dev_info(&client->dev, "DAC registered\n");
return 0;
exit_free_device:
iio_device_free(indio_dev);
exit:
return err;
}
static int __devexit max517_remove(struct i2c_client *client)
{
iio_device_unregister(i2c_get_clientdata(client));
iio_device_free(i2c_get_clientdata(client));
return 0;
}
static const struct i2c_device_id max517_id[] = {
{ "max517", ID_MAX517 },
{ "max518", ID_MAX518 },
{ "max519", ID_MAX519 },
{ }
};
MODULE_DEVICE_TABLE(i2c, max517_id);
static struct i2c_driver max517_driver = {
.driver = {
.name = MAX517_DRV_NAME,
.pm = MAX517_PM_OPS,
},
.probe = max517_probe,
.remove = __devexit_p(max517_remove),
.id_table = max517_id,
};
module_i2c_driver(max517_driver);
MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
MODULE_DESCRIPTION("MAX517/MAX518/MAX519 8-bit DAC");
MODULE_LICENSE("GPL");
| gpl-2.0 |
mitake/linux | drivers/isdn/hisax/isurf.c | 131 | 7938 | /* $Id: isurf.c,v 1.12.2.4 2004/01/13 21:46:03 keil Exp $
*
* low level stuff for Siemens I-Surf/I-Talk cards
*
* Author Karsten Keil
* Copyright by Karsten Keil <keil@isdn4linux.de>
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include <linux/init.h>
#include "hisax.h"
#include "isac.h"
#include "isar.h"
#include "isdnl1.h"
#include <linux/isapnp.h>
static const char *ISurf_revision = "$Revision: 1.12.2.4 $";
#define byteout(addr, val) outb(val, addr)
#define bytein(addr) inb(addr)
#define ISURF_ISAR_RESET 1
#define ISURF_ISAC_RESET 2
#define ISURF_ISAR_EA 4
#define ISURF_ARCOFI_RESET 8
#define ISURF_RESET (ISURF_ISAR_RESET | ISURF_ISAC_RESET | ISURF_ARCOFI_RESET)
#define ISURF_ISAR_OFFSET 0
#define ISURF_ISAC_OFFSET 0x100
#define ISURF_IOMEM_SIZE 0x400
/* Interface functions */
static u_char
ReadISAC(struct IsdnCardState *cs, u_char offset)
{
return (readb(cs->hw.isurf.isac + offset));
}
static void
WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value)
{
writeb(value, cs->hw.isurf.isac + offset); mb();
}
static void
ReadISACfifo(struct IsdnCardState *cs, u_char *data, int size)
{
register int i;
for (i = 0; i < size; i++)
data[i] = readb(cs->hw.isurf.isac);
}
static void
WriteISACfifo(struct IsdnCardState *cs, u_char *data, int size)
{
register int i;
for (i = 0; i < size; i++) {
writeb(data[i], cs->hw.isurf.isac); mb();
}
}
/* ISAR access routines
* mode = 0 access with IRQ on
* mode = 1 access with IRQ off
* mode = 2 access with IRQ off and using last offset
*/
static u_char
ReadISAR(struct IsdnCardState *cs, int mode, u_char offset)
{
return (readb(cs->hw.isurf.isar + offset));
}
static void
WriteISAR(struct IsdnCardState *cs, int mode, u_char offset, u_char value)
{
writeb(value, cs->hw.isurf.isar + offset); mb();
}
static irqreturn_t
isurf_interrupt(int intno, void *dev_id)
{
struct IsdnCardState *cs = dev_id;
u_char val;
int cnt = 5;
u_long flags;
spin_lock_irqsave(&cs->lock, flags);
val = readb(cs->hw.isurf.isar + ISAR_IRQBIT);
Start_ISAR:
if (val & ISAR_IRQSTA)
isar_int_main(cs);
val = readb(cs->hw.isurf.isac + ISAC_ISTA);
Start_ISAC:
if (val)
isac_interrupt(cs, val);
val = readb(cs->hw.isurf.isar + ISAR_IRQBIT);
if ((val & ISAR_IRQSTA) && --cnt) {
if (cs->debug & L1_DEB_HSCX)
debugl1(cs, "ISAR IntStat after IntRoutine");
goto Start_ISAR;
}
val = readb(cs->hw.isurf.isac + ISAC_ISTA);
if (val && --cnt) {
if (cs->debug & L1_DEB_ISAC)
debugl1(cs, "ISAC IntStat after IntRoutine");
goto Start_ISAC;
}
if (!cnt)
printk(KERN_WARNING "ISurf IRQ LOOP\n");
writeb(0, cs->hw.isurf.isar + ISAR_IRQBIT); mb();
writeb(0xFF, cs->hw.isurf.isac + ISAC_MASK); mb();
writeb(0, cs->hw.isurf.isac + ISAC_MASK); mb();
writeb(ISAR_IRQMSK, cs->hw.isurf.isar + ISAR_IRQBIT); mb();
spin_unlock_irqrestore(&cs->lock, flags);
return IRQ_HANDLED;
}
static void
release_io_isurf(struct IsdnCardState *cs)
{
release_region(cs->hw.isurf.reset, 1);
iounmap(cs->hw.isurf.isar);
release_mem_region(cs->hw.isurf.phymem, ISURF_IOMEM_SIZE);
}
static void
reset_isurf(struct IsdnCardState *cs, u_char chips)
{
printk(KERN_INFO "ISurf: resetting card\n");
byteout(cs->hw.isurf.reset, chips); /* Reset On */
mdelay(10);
byteout(cs->hw.isurf.reset, ISURF_ISAR_EA); /* Reset Off */
mdelay(10);
}
static int
ISurf_card_msg(struct IsdnCardState *cs, int mt, void *arg)
{
u_long flags;
switch (mt) {
case CARD_RESET:
spin_lock_irqsave(&cs->lock, flags);
reset_isurf(cs, ISURF_RESET);
spin_unlock_irqrestore(&cs->lock, flags);
return (0);
case CARD_RELEASE:
release_io_isurf(cs);
return (0);
case CARD_INIT:
spin_lock_irqsave(&cs->lock, flags);
reset_isurf(cs, ISURF_RESET);
clear_pending_isac_ints(cs);
writeb(0, cs->hw.isurf.isar + ISAR_IRQBIT); mb();
initisac(cs);
initisar(cs);
/* Reenable ISAC IRQ */
cs->writeisac(cs, ISAC_MASK, 0);
/* RESET Receiver and Transmitter */
cs->writeisac(cs, ISAC_CMDR, 0x41);
spin_unlock_irqrestore(&cs->lock, flags);
return (0);
case CARD_TEST:
return (0);
}
return (0);
}
static int
isurf_auxcmd(struct IsdnCardState *cs, isdn_ctrl *ic) {
int ret;
u_long flags;
if ((ic->command == ISDN_CMD_IOCTL) && (ic->arg == 9)) {
ret = isar_auxcmd(cs, ic);
spin_lock_irqsave(&cs->lock, flags);
if (!ret) {
reset_isurf(cs, ISURF_ISAR_EA | ISURF_ISAC_RESET |
ISURF_ARCOFI_RESET);
initisac(cs);
cs->writeisac(cs, ISAC_MASK, 0);
cs->writeisac(cs, ISAC_CMDR, 0x41);
}
spin_unlock_irqrestore(&cs->lock, flags);
return (ret);
}
return (isar_auxcmd(cs, ic));
}
#ifdef __ISAPNP__
static struct pnp_card *pnp_c __devinitdata = NULL;
#endif
int __devinit
setup_isurf(struct IsdnCard *card)
{
int ver;
struct IsdnCardState *cs = card->cs;
char tmp[64];
strcpy(tmp, ISurf_revision);
printk(KERN_INFO "HiSax: ISurf driver Rev. %s\n", HiSax_getrev(tmp));
if (cs->typ != ISDN_CTYPE_ISURF)
return (0);
if (card->para[1] && card->para[2]) {
cs->hw.isurf.reset = card->para[1];
cs->hw.isurf.phymem = card->para[2];
cs->irq = card->para[0];
} else {
#ifdef __ISAPNP__
if (isapnp_present()) {
struct pnp_dev *pnp_d = NULL;
int err;
cs->subtyp = 0;
if ((pnp_c = pnp_find_card(
ISAPNP_VENDOR('S', 'I', 'E'),
ISAPNP_FUNCTION(0x0010), pnp_c))) {
if (!(pnp_d = pnp_find_dev(pnp_c,
ISAPNP_VENDOR('S', 'I', 'E'),
ISAPNP_FUNCTION(0x0010), pnp_d))) {
printk(KERN_ERR "ISurfPnP: PnP error card found, no device\n");
return (0);
}
pnp_disable_dev(pnp_d);
err = pnp_activate_dev(pnp_d);
if (err < 0) {
pr_warn("%s: pnp_activate_dev ret=%d\n",
__func__, err);
return 0;
}
cs->hw.isurf.reset = pnp_port_start(pnp_d, 0);
cs->hw.isurf.phymem = pnp_mem_start(pnp_d, 1);
cs->irq = pnp_irq(pnp_d, 0);
if (!cs->irq || !cs->hw.isurf.reset || !cs->hw.isurf.phymem) {
printk(KERN_ERR "ISurfPnP:some resources are missing %d/%x/%lx\n",
cs->irq, cs->hw.isurf.reset, cs->hw.isurf.phymem);
pnp_disable_dev(pnp_d);
return (0);
}
} else {
printk(KERN_INFO "ISurfPnP: no ISAPnP card found\n");
return (0);
}
} else {
printk(KERN_INFO "ISurfPnP: no ISAPnP bus found\n");
return (0);
}
#else
printk(KERN_WARNING "HiSax: Siemens I-Surf port/mem not set\n");
return (0);
#endif
}
if (!request_region(cs->hw.isurf.reset, 1, "isurf isdn")) {
printk(KERN_WARNING
"HiSax: Siemens I-Surf config port %x already in use\n",
cs->hw.isurf.reset);
return (0);
}
if (!request_region(cs->hw.isurf.phymem, ISURF_IOMEM_SIZE, "isurf iomem")) {
printk(KERN_WARNING "HiSax: Siemens I-Surf memory region "
"%lx-%lx already in use\n",
cs->hw.isurf.phymem,
cs->hw.isurf.phymem + ISURF_IOMEM_SIZE);
release_region(cs->hw.isurf.reset, 1);
return (0);
}
cs->hw.isurf.isar = ioremap(cs->hw.isurf.phymem, ISURF_IOMEM_SIZE);
cs->hw.isurf.isac = cs->hw.isurf.isar + ISURF_ISAC_OFFSET;
printk(KERN_INFO
"ISurf: defined at 0x%x 0x%lx IRQ %d\n",
cs->hw.isurf.reset,
cs->hw.isurf.phymem,
cs->irq);
setup_isac(cs);
cs->cardmsg = &ISurf_card_msg;
cs->irq_func = &isurf_interrupt;
cs->auxcmd = &isurf_auxcmd;
cs->readisac = &ReadISAC;
cs->writeisac = &WriteISAC;
cs->readisacfifo = &ReadISACfifo;
cs->writeisacfifo = &WriteISACfifo;
cs->bcs[0].hw.isar.reg = &cs->hw.isurf.isar_r;
cs->bcs[1].hw.isar.reg = &cs->hw.isurf.isar_r;
test_and_set_bit(HW_ISAR, &cs->HW_Flags);
ISACVersion(cs, "ISurf:");
cs->BC_Read_Reg = &ReadISAR;
cs->BC_Write_Reg = &WriteISAR;
cs->BC_Send_Data = &isar_fill_fifo;
ver = ISARVersion(cs, "ISurf:");
if (ver < 0) {
printk(KERN_WARNING
"ISurf: wrong ISAR version (ret = %d)\n", ver);
release_io_isurf(cs);
return (0);
}
return (1);
}
| gpl-2.0 |
junkyde/vikinger-stock | drivers/staging/prima/CORE/MAC/src/pe/lim/limSendSmeRspMessages.c | 387 | 84998 | /*
* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* Airgo Networks, Inc proprietary. All rights reserved.
* This file limSendSmeRspMessages.cc contains the functions
* for sending SME response/notification messages to applications
* above MAC software.
* Author: Chandra Modumudi
* Date: 02/13/02
* History:-
* Date Modified by Modification Information
* --------------------------------------------------------------------
*/
#include "vos_types.h"
#include "wniApi.h"
#include "sirCommon.h"
#include "aniGlobal.h"
#if (WNI_POLARIS_FW_PRODUCT == AP)
#include "wniCfgAp.h"
#else
#include "wniCfgSta.h"
#endif
#include "sysDef.h"
#include "cfgApi.h"
#ifdef FEATURE_WLAN_NON_INTEGRATED_SOC
#include "halDataStruct.h"
#include "halCommonApi.h"
#endif
#include "schApi.h"
#include "utilsApi.h"
#include "limUtils.h"
#include "limSecurityUtils.h"
#include "limSerDesUtils.h"
#include "limSendSmeRspMessages.h"
#include "limIbssPeerMgmt.h"
#include "limSessionUtils.h"
/**
* limSendSmeRsp()
*
*FUNCTION:
* This function is called by limProcessSmeReqMessages() to send
* eWNI_SME_START_RSP, eWNI_SME_MEASUREMENT_RSP, eWNI_SME_STOP_BSS_RSP
* or eWNI_SME_SWITCH_CHL_RSP messages to applications above MAC
* Software.
*
*PARAMS:
*
*LOGIC:
*
*ASSUMPTIONS:
* NA
*
*NOTE:
* NA
*
* @param pMac Pointer to Global MAC structure
* @param msgType Indicates message type
* @param resultCode Indicates the result of previously issued
* eWNI_SME_msgType_REQ message
*
* @return None
*/
void
limSendSmeRsp(tpAniSirGlobal pMac, tANI_U16 msgType,
tSirResultCodes resultCode,tANI_U8 smesessionId, tANI_U16 smetransactionId)
{
tSirMsgQ mmhMsg;
tSirSmeRsp *pSirSmeRsp;
PELOG1(limLog(pMac, LOG1,
FL("Sending message %s with reasonCode %s\n"),
limMsgStr(msgType), limResultCodeStr(resultCode));)
if( eHAL_STATUS_SUCCESS != palAllocateMemory( pMac->hHdd, (void **)&pSirSmeRsp, sizeof(tSirSmeRsp)))
{
/// Buffer not available. Log error
limLog(pMac, LOGP,
FL("call to palAllocateMemory failed for eWNI_SME_*_RSP\n"));
return;
}
#if defined (ANI_PRODUCT_TYPE_AP) && defined (ANI_LITTLE_BYTE_ENDIAN)
sirStoreU16N((tANI_U8*)&pSirSmeRsp->messageType, msgType);
sirStoreU16N((tANI_U8*)&pSirSmeRsp->length, sizeof(tSirSmeRsp));
#else
pSirSmeRsp->messageType = msgType;
pSirSmeRsp->length = sizeof(tSirSmeRsp);
#endif
pSirSmeRsp->statusCode = resultCode;
/* Update SME session Id and Transaction Id */
pSirSmeRsp->sessionId = smesessionId;
pSirSmeRsp->transactionId = smetransactionId;
mmhMsg.type = msgType;
mmhMsg.bodyptr = pSirSmeRsp;
mmhMsg.bodyval = 0;
MTRACE(macTraceMsgTx(pMac, NO_SESSION, mmhMsg.type));
#ifdef FEATURE_WLAN_DIAG_SUPPORT_LIM //FEATURE_WLAN_DIAG_SUPPORT
{
tpPESession psessionEntry = peGetValidPowerSaveSession(pMac);
switch(msgType)
{
case eWNI_PMC_ENTER_BMPS_RSP:
limDiagEventReport(pMac, WLAN_PE_DIAG_ENTER_BMPS_RSP_EVENT, psessionEntry, (tANI_U16)resultCode, 0);
break;
case eWNI_PMC_EXIT_BMPS_RSP:
limDiagEventReport(pMac, WLAN_PE_DIAG_EXIT_BMPS_RSP_EVENT, psessionEntry, (tANI_U16)resultCode, 0);
break;
case eWNI_PMC_ENTER_IMPS_RSP:
limDiagEventReport(pMac, WLAN_PE_DIAG_ENTER_IMPS_RSP_EVENT, psessionEntry, (tANI_U16)resultCode, 0);
break;
case eWNI_PMC_EXIT_IMPS_RSP:
limDiagEventReport(pMac, WLAN_PE_DIAG_EXIT_IMPS_RSP_EVENT, psessionEntry, (tANI_U16)resultCode, 0);
break;
case eWNI_PMC_ENTER_UAPSD_RSP:
limDiagEventReport(pMac, WLAN_PE_DIAG_ENTER_UAPSD_RSP_EVENT, psessionEntry, (tANI_U16)resultCode, 0);
break;
case eWNI_PMC_EXIT_UAPSD_RSP:
limDiagEventReport(pMac, WLAN_PE_DIAG_EXIT_UAPSD_RSP_EVENT, psessionEntry, (tANI_U16)resultCode, 0);
break;
case eWNI_SME_SWITCH_CHL_RSP:
limDiagEventReport(pMac, WLAN_PE_DIAG_SWITCH_CHL_RSP_EVENT, NULL, (tANI_U16)resultCode, 0);
break;
case eWNI_SME_STOP_BSS_RSP:
limDiagEventReport(pMac, WLAN_PE_DIAG_STOP_BSS_RSP_EVENT, NULL, (tANI_U16)resultCode, 0);
break;
case eWNI_PMC_ENTER_WOWL_RSP:
limDiagEventReport(pMac, WLAN_PE_DIAG_ENTER_WOWL_RSP_EVENT, psessionEntry, (tANI_U16)resultCode, 0);
break;
case eWNI_PMC_EXIT_WOWL_RSP:
limDiagEventReport(pMac, WLAN_PE_DIAG_EXIT_WOWL_RSP_EVENT, psessionEntry, (tANI_U16)resultCode, 0);
break;
}
}
#endif //FEATURE_WLAN_DIAG_SUPPORT
limSysProcessMmhMsgApi(pMac, &mmhMsg, ePROT);
} /*** end limSendSmeRsp() ***/
/**
* limSendSmeJoinReassocRspAfterResume()
*
*FUNCTION:
* This function is called to send Join/Reassoc rsp
* message to SME after the resume link.
*
*PARAMS:
*
*LOGIC:
*
*ASSUMPTIONS:
* NA
*
*NOTE:
* NA
*
* @param pMac Pointer to Global MAC structure
* @param status Resume link status
* @param ctx context passed while calling resmune link.
* (join response to be sent)
*
* @return None
*/
static void limSendSmeJoinReassocRspAfterResume( tpAniSirGlobal pMac,
eHalStatus status, tANI_U32 *ctx)
{
tSirMsgQ mmhMsg;
tpSirSmeJoinRsp pSirSmeJoinRsp = (tpSirSmeJoinRsp) ctx;
mmhMsg.type = pSirSmeJoinRsp->messageType;
mmhMsg.bodyptr = pSirSmeJoinRsp;
mmhMsg.bodyval = 0;
MTRACE(macTraceMsgTx(pMac, NO_SESSION, mmhMsg.type));
limSysProcessMmhMsgApi(pMac, &mmhMsg, ePROT);
}
/**
* limSendSmeJoinReassocRsp()
*
*FUNCTION:
* This function is called by limProcessSmeReqMessages() to send
* eWNI_SME_JOIN_RSP or eWNI_SME_REASSOC_RSP messages to applications
* above MAC Software.
*
*PARAMS:
*
*LOGIC:
*
*ASSUMPTIONS:
* NA
*
*NOTE:
* NA
*
* @param pMac Pointer to Global MAC structure
* @param msgType Indicates message type
* @param resultCode Indicates the result of previously issued
* eWNI_SME_msgType_REQ message
*
* @return None
*/
void
limSendSmeJoinReassocRsp(tpAniSirGlobal pMac, tANI_U16 msgType,
tSirResultCodes resultCode, tANI_U16 protStatusCode,
tpPESession psessionEntry,tANI_U8 smesessionId,tANI_U16 smetransactionId)
{
tpSirSmeJoinRsp pSirSmeJoinRsp;
tANI_U32 rspLen;
tpDphHashNode pStaDs = NULL;
#ifdef FEATURE_WLAN_DIAG_SUPPORT_LIM //FEATURE_WLAN_DIAG_SUPPORT
if (msgType == eWNI_SME_REASSOC_RSP)
limDiagEventReport(pMac, WLAN_PE_DIAG_REASSOC_RSP_EVENT, psessionEntry, (tANI_U16)resultCode, 0);
else
limDiagEventReport(pMac, WLAN_PE_DIAG_JOIN_RSP_EVENT, psessionEntry, (tANI_U16)resultCode, 0);
#endif //FEATURE_WLAN_DIAG_SUPPORT
PELOG1(limLog(pMac, LOG1,
FL("Sending message %s with reasonCode %s\n"),
limMsgStr(msgType), limResultCodeStr(resultCode));)
if(psessionEntry == NULL)
{
rspLen = sizeof(tSirSmeJoinRsp);
if( eHAL_STATUS_SUCCESS != palAllocateMemory( pMac->hHdd, (void **)&pSirSmeJoinRsp, rspLen))
{
/// Buffer not available. Log error
limLog(pMac, LOGP, FL("call to palAllocateMemory failed for JOIN/REASSOC_RSP\n"));
return;
}
palZeroMemory(pMac, (tANI_U8*)pSirSmeJoinRsp, rspLen);
pSirSmeJoinRsp->beaconLength = 0;
pSirSmeJoinRsp->assocReqLength = 0;
pSirSmeJoinRsp->assocRspLength = 0;
}
else
{
rspLen = psessionEntry->assocReqLen + psessionEntry->assocRspLen +
psessionEntry->bcnLen +
#ifdef WLAN_FEATURE_VOWIFI_11R
psessionEntry->RICDataLen +
#endif
#ifdef FEATURE_WLAN_CCX
psessionEntry->tspecLen +
#endif
sizeof(tSirSmeJoinRsp) - sizeof(tANI_U8) ;
if( eHAL_STATUS_SUCCESS != palAllocateMemory( pMac->hHdd, (void **)&pSirSmeJoinRsp, rspLen))
{
/// Buffer not available. Log error
limLog(pMac, LOGP,
FL("call to palAllocateMemory failed for JOIN/REASSOC_RSP\n"));
return;
}
palZeroMemory(pMac, (tANI_U8*)pSirSmeJoinRsp, rspLen);
#if defined (ANI_PRODUCT_TYPE_AP) && defined (ANI_LITTLE_BYTE_ENDIAN)
sirStoreU16N((tANI_U8*)&pSirSmeJoinRsp->messageType, msgType);
sirStoreU16N((tANI_U8*)&pSirSmeJoinRsp->length, rspLen);
#endif
#if (WNI_POLARIS_FW_PRODUCT == WLAN_STA)
if (resultCode == eSIR_SME_SUCCESS)
{
pStaDs = dphGetHashEntry(pMac, DPH_STA_HASH_INDEX_PEER, &psessionEntry->dph.dphHashTable);
if (pStaDs == NULL)
{
PELOGE(limLog(pMac, LOGE, FL("could not Get Self Entry for the station\n"));)
}
else
{
//Pass the peer's staId
pSirSmeJoinRsp->staId = pStaDs->staIndex;
pSirSmeJoinRsp->ucastSig = pStaDs->ucUcastSig;
pSirSmeJoinRsp->bcastSig = pStaDs->ucBcastSig;
}
}
#endif
#if (WNI_POLARIS_FW_PACKAGE == ADVANCED)
if (resultCode == eSIR_SME_TRANSFER_STA)
{
palCopyMemory( pMac->hHdd, pSirSmeJoinRsp->alternateBssId,
pMac->lim.gLimAlternateRadio.bssId,
sizeof(tSirMacAddr));
pSirSmeJoinRsp->alternateChannelId =
pMac->lim.gLimAlternateRadio.channelId;
}
#endif
pSirSmeJoinRsp->beaconLength = 0;
pSirSmeJoinRsp->assocReqLength = 0;
pSirSmeJoinRsp->assocRspLength = 0;
#ifdef WLAN_FEATURE_VOWIFI_11R
pSirSmeJoinRsp->parsedRicRspLen = 0;
#endif
#ifdef FEATURE_WLAN_CCX
pSirSmeJoinRsp->tspecIeLen = 0;
#endif
if(resultCode == eSIR_SME_SUCCESS)
{
if(psessionEntry->beacon != NULL)
{
pSirSmeJoinRsp->beaconLength = psessionEntry->bcnLen;
palCopyMemory(pMac->hHdd, pSirSmeJoinRsp->frames, psessionEntry->beacon, pSirSmeJoinRsp->beaconLength);
palFreeMemory(pMac->hHdd, psessionEntry->beacon);
psessionEntry->beacon = NULL;
#ifdef WLAN_FEATURE_VOWIFI_11R_DEBUG
PELOG1(limLog(pMac, LOG1, FL("Beacon=%d\n"), psessionEntry->bcnLen);)
#endif
}
if(psessionEntry->assocReq != NULL)
{
pSirSmeJoinRsp->assocReqLength = psessionEntry->assocReqLen;
palCopyMemory(pMac->hHdd, pSirSmeJoinRsp->frames + psessionEntry->bcnLen, psessionEntry->assocReq, pSirSmeJoinRsp->assocReqLength);
palFreeMemory(pMac->hHdd, psessionEntry->assocReq);
psessionEntry->assocReq = NULL;
#ifdef WLAN_FEATURE_VOWIFI_11R_DEBUG
PELOG1(limLog(pMac, LOG1, FL("AssocReq=%d\n"), psessionEntry->assocReqLen);)
#endif
}
if(psessionEntry->assocRsp != NULL)
{
pSirSmeJoinRsp->assocRspLength = psessionEntry->assocRspLen;
palCopyMemory(pMac->hHdd, pSirSmeJoinRsp->frames + psessionEntry->bcnLen + psessionEntry->assocReqLen, psessionEntry->assocRsp, pSirSmeJoinRsp->assocRspLength);
palFreeMemory(pMac->hHdd, psessionEntry->assocRsp);
psessionEntry->assocRsp = NULL;
}
#ifdef WLAN_FEATURE_VOWIFI_11R
if(psessionEntry->ricData != NULL)
{
pSirSmeJoinRsp->parsedRicRspLen = psessionEntry->RICDataLen;
palCopyMemory(pMac->hHdd, pSirSmeJoinRsp->frames + psessionEntry->bcnLen + psessionEntry->assocReqLen + psessionEntry->assocRspLen, psessionEntry->ricData, pSirSmeJoinRsp->parsedRicRspLen);
palFreeMemory(pMac->hHdd, psessionEntry->ricData);
psessionEntry->ricData = NULL;
PELOG1(limLog(pMac, LOG1, FL("RicLength=%d\n"), pSirSmeJoinRsp->parsedRicRspLen);)
}
#endif
#ifdef FEATURE_WLAN_CCX
if(psessionEntry->tspecIes != NULL)
{
pSirSmeJoinRsp->tspecIeLen = psessionEntry->tspecLen;
palCopyMemory(pMac->hHdd, pSirSmeJoinRsp->frames + psessionEntry->bcnLen + psessionEntry->assocReqLen + psessionEntry->assocRspLen + psessionEntry->RICDataLen, psessionEntry->tspecIes, pSirSmeJoinRsp->tspecIeLen);
palFreeMemory(pMac->hHdd, psessionEntry->tspecIes);
psessionEntry->tspecIes = NULL;
PELOG1(limLog(pMac, LOG1, FL("CCX-TspecLen=%d\n"), psessionEntry->tspecLen);)
}
#endif
pSirSmeJoinRsp->aid = psessionEntry->limAID;
#ifdef WLAN_FEATURE_VOWIFI_11R_DEBUG
PELOG1(limLog(pMac, LOG1, FL("AssocRsp=%d\n"), psessionEntry->assocRspLen);)
#endif
}
}
pSirSmeJoinRsp->messageType = msgType;
pSirSmeJoinRsp->length = (tANI_U16) rspLen;
pSirSmeJoinRsp->statusCode = resultCode;
pSirSmeJoinRsp->protStatusCode = protStatusCode;
/* Update SME session ID and transaction Id */
pSirSmeJoinRsp->sessionId = smesessionId;
pSirSmeJoinRsp->transactionId = smetransactionId;
if(IS_MCC_SUPPORTED && limIsLinkSuspended( pMac ) )
{
if( psessionEntry && psessionEntry->limSmeState == eLIM_SME_LINK_EST_STATE )
{
#ifdef WLAN_FEATURE_11AC
if (psessionEntry->vhtCapability)
{
ePhyChanBondState htSecondaryChannelOffset;
/*Get 11ac cbState from 11n cbState*/
htSecondaryChannelOffset = limGet11ACPhyCBState(pMac,
psessionEntry->currentOperChannel,
psessionEntry->htSecondaryChannelOffset);
peSetResumeChannel( pMac, psessionEntry->currentOperChannel, htSecondaryChannelOffset);
}
else
#endif
peSetResumeChannel( pMac, psessionEntry->currentOperChannel, psessionEntry->htSecondaryChannelOffset);
}
else
{
peSetResumeChannel( pMac, 0, 0);
}
limResumeLink( pMac, limSendSmeJoinReassocRspAfterResume,
(tANI_U32*) pSirSmeJoinRsp );
}
else
{
limSendSmeJoinReassocRspAfterResume( pMac, eHAL_STATUS_SUCCESS,
(tANI_U32*) pSirSmeJoinRsp );
}
} /*** end limSendSmeJoinReassocRsp() ***/
/**
* limSendSmeStartBssRsp()
*
*FUNCTION:
* This function is called to send eWNI_SME_START_BSS_RSP
* message to applications above MAC Software.
*
*PARAMS:
*
*LOGIC:
*
*ASSUMPTIONS:
* NA
*
*NOTE:
* NA
*
* @param pMac Pointer to Global MAC structure
* @param msgType Indicates message type
* @param resultCode Indicates the result of previously issued
* eWNI_SME_msgType_REQ message
*
* @return None
*/
void
limSendSmeStartBssRsp(tpAniSirGlobal pMac,
tANI_U16 msgType, tSirResultCodes resultCode,tpPESession psessionEntry,
tANI_U8 smesessionId,tANI_U16 smetransactionId)
{
tANI_U16 size = 0;
tSirMsgQ mmhMsg;
tSirSmeStartBssRsp *pSirSmeRsp;
tANI_U16 ieLen;
tANI_U16 ieOffset, curLen;
PELOG1(limLog(pMac, LOG1, FL("Sending message %s with reasonCode %s\n"),
limMsgStr(msgType), limResultCodeStr(resultCode));)
size = sizeof(tSirSmeStartBssRsp);
if(psessionEntry == NULL)
{
if( eHAL_STATUS_SUCCESS != palAllocateMemory( pMac->hHdd, (void **)&pSirSmeRsp, size))
{
/// Buffer not available. Log error
limLog(pMac, LOGP,FL("call to palAllocateMemory failed for eWNI_SME_START_BSS_RSP\n"));
return;
}
palZeroMemory(pMac, (tANI_U8*)pSirSmeRsp, size);
}
else
{
//subtract size of beaconLength + Mac Hdr + Fixed Fields before SSID
ieOffset = sizeof(tAniBeaconStruct) + SIR_MAC_B_PR_SSID_OFFSET;
ieLen = pMac->sch.schObject.gSchBeaconOffsetBegin + pMac->sch.schObject.gSchBeaconOffsetEnd - ieOffset;
//calculate the memory size to allocate
size += ieLen;
if( eHAL_STATUS_SUCCESS != palAllocateMemory( pMac->hHdd, (void **)&pSirSmeRsp, size))
{
/// Buffer not available. Log error
limLog(pMac, LOGP,
FL("call to palAllocateMemory failed for eWNI_SME_START_BSS_RSP\n"));
return;
}
palZeroMemory(pMac, (tANI_U8*)pSirSmeRsp, size);
size = sizeof(tSirSmeStartBssRsp);
if (resultCode == eSIR_SME_SUCCESS)
{
sirCopyMacAddr(pSirSmeRsp->bssDescription.bssId, psessionEntry->bssId);
/* Read beacon interval from session */
pSirSmeRsp->bssDescription.beaconInterval = (tANI_U16) psessionEntry->beaconParams.beaconInterval;
pSirSmeRsp->bssType = psessionEntry->bssType;
if (cfgGetCapabilityInfo( pMac, &pSirSmeRsp->bssDescription.capabilityInfo,psessionEntry)
!= eSIR_SUCCESS)
limLog(pMac, LOGP, FL("could not retrieve Capabilities value\n"));
limGetPhyMode(pMac, (tANI_U32 *)&pSirSmeRsp->bssDescription.nwType, psessionEntry);
#if 0
if (wlan_cfgGetInt(pMac, WNI_CFG_CURRENT_CHANNEL, &len) != eSIR_SUCCESS)
limLog(pMac, LOGP, FL("could not retrieve CURRENT_CHANNEL from CFG\n"));
#endif// TO SUPPORT BT-AMP
pSirSmeRsp->bssDescription.channelId = psessionEntry->currentOperChannel;
pSirSmeRsp->bssDescription.aniIndicator = 1;
curLen = pMac->sch.schObject.gSchBeaconOffsetBegin - ieOffset;
palCopyMemory( pMac->hHdd, (tANI_U8 *) &pSirSmeRsp->bssDescription.ieFields,
pMac->sch.schObject.gSchBeaconFrameBegin + ieOffset,
(tANI_U32)curLen);
palCopyMemory( pMac->hHdd, ((tANI_U8 *) &pSirSmeRsp->bssDescription.ieFields) + curLen,
pMac->sch.schObject.gSchBeaconFrameEnd,
(tANI_U32)pMac->sch.schObject.gSchBeaconOffsetEnd);
//subtracting size of length indicator itself and size of pointer to ieFields
pSirSmeRsp->bssDescription.length = sizeof(tSirBssDescription) -
sizeof(tANI_U16) - sizeof(tANI_U32) +
ieLen;
//This is the size of the message, subtracting the size of the pointer to ieFields
size += ieLen - sizeof(tANI_U32);
}
#if defined (ANI_PRODUCT_TYPE_AP) && defined (ANI_LITTLE_BYTE_ENDIAN)
sirStoreU16N((tANI_U8*)&pSirSmeRsp->messageType, msgType);
sirStoreU16N((tANI_U8*)&pSirSmeRsp->length, size);
#endif
}
pSirSmeRsp->messageType = msgType;
pSirSmeRsp->length = size;
/* Update SME session Id and transaction Id */
pSirSmeRsp->sessionId = smesessionId;
pSirSmeRsp->transactionId = smetransactionId;
pSirSmeRsp->statusCode = resultCode;
#ifdef WLAN_SOFTAP_FEATURE
if(psessionEntry != NULL )
pSirSmeRsp->staId = psessionEntry->staId; //else it will be always zero smeRsp StaID = 0
#endif
mmhMsg.type = msgType;
mmhMsg.bodyptr = pSirSmeRsp;
mmhMsg.bodyval = 0;
MTRACE(macTraceMsgTx(pMac, psessionEntry->peSessionId, mmhMsg.type));
#ifdef FEATURE_WLAN_DIAG_SUPPORT_LIM //FEATURE_WLAN_DIAG_SUPPORT
limDiagEventReport(pMac, WLAN_PE_DIAG_START_BSS_RSP_EVENT, psessionEntry, (tANI_U16)resultCode, 0);
#endif //FEATURE_WLAN_DIAG_SUPPORT
limSysProcessMmhMsgApi(pMac, &mmhMsg, ePROT);
} /*** end limSendSmeStartBssRsp() ***/
#define LIM_MAX_NUM_OF_SCAN_RESULTS_REPORTED 20
#define LIM_SIZE_OF_EACH_BSS 400 // this is a rough estimate
/**
* limSendSmeScanRsp()
*
*FUNCTION:
* This function is called by limProcessSmeReqMessages() to send
* eWNI_SME_SCAN_RSP message to applications above MAC
* Software.
*
*PARAMS:
*
*LOGIC:
*
*ASSUMPTIONS:
* NA
*
*NOTE:
* NA
*
* @param pMac Pointer to Global MAC structure
* @param length Indicates length of message
* @param resultCode Indicates the result of previously issued
* eWNI_SME_SCAN_REQ message
*
* @return None
*/
void
limSendSmeScanRsp(tpAniSirGlobal pMac, tANI_U16 length,
tSirResultCodes resultCode,tANI_U8 smesessionId,tANI_U16 smetranscationId)
{
tSirMsgQ mmhMsg;
tpSirSmeScanRsp pSirSmeScanRsp=NULL;
tLimScanResultNode *ptemp = NULL;
tANI_U16 msgLen, allocLength, curMsgLen = 0;
tANI_U16 i, bssCount;
tANI_U8 *pbBuf;
tSirBssDescription *pDesc;
PELOG1(limLog(pMac, LOG1,
FL("Sending message SME_SCAN_RSP with length=%d reasonCode %s\n"),
length, limResultCodeStr(resultCode));)
if (resultCode != eSIR_SME_SUCCESS)
{
limPostSmeScanRspMessage(pMac, length, resultCode,smesessionId,smetranscationId);
return;
}
mmhMsg.type = eWNI_SME_SCAN_RSP;
i = 0;
bssCount = 0;
msgLen = 0;
allocLength = LIM_MAX_NUM_OF_SCAN_RESULTS_REPORTED * LIM_SIZE_OF_EACH_BSS;
if( eHAL_STATUS_SUCCESS != palAllocateMemory( pMac->hHdd, (void **)&pSirSmeScanRsp, allocLength))
{
// Log error
limLog(pMac, LOGP,
FL("call to palAllocateMemory failed for eWNI_SME_SCAN_RSP\n"));
return;
}
for (i = 0; i < LIM_MAX_NUM_OF_SCAN_RESULTS; i++)
{
//when ptemp is not NULL it is a left over
ptemp = pMac->lim.gLimCachedScanHashTable[i];
while(ptemp)
{
pbBuf = ((tANI_U8 *)pSirSmeScanRsp) + msgLen;
if(0 == bssCount)
{
msgLen = sizeof(tSirSmeScanRsp) -
sizeof(tSirBssDescription) +
ptemp->bssDescription.length +
sizeof(ptemp->bssDescription.length);
pDesc = pSirSmeScanRsp->bssDescription;
}
else
{
msgLen += ptemp->bssDescription.length +
sizeof(ptemp->bssDescription.length);
pDesc = (tSirBssDescription *)pbBuf;
}
if( (allocLength < msgLen) ||
(LIM_MAX_NUM_OF_SCAN_RESULTS_REPORTED <= bssCount++) )
{
pSirSmeScanRsp->statusCode =
eSIR_SME_MORE_SCAN_RESULTS_FOLLOW;
#if defined (ANI_PRODUCT_TYPE_AP) && defined (ANI_LITTLE_BYTE_ENDIAN)
sirStoreU16N((tANI_U8*)&pSirSmeScanRsp->messageType,
eWNI_SME_SCAN_RSP);
sirStoreU16N((tANI_U8*)&pSirSmeScanRsp->length, curMsgLen);
#else
pSirSmeScanRsp->messageType = eWNI_SME_SCAN_RSP;
pSirSmeScanRsp->length = curMsgLen;
#endif
mmhMsg.bodyptr = pSirSmeScanRsp;
mmhMsg.bodyval = 0;
MTRACE(macTraceMsgTx(pMac, NO_SESSION, mmhMsg.type));
limSysProcessMmhMsgApi(pMac, &mmhMsg, ePROT);
if( eHAL_STATUS_SUCCESS != palAllocateMemory( pMac->hHdd, (void **)&pSirSmeScanRsp, allocLength))
{
// Log error
limLog(pMac, LOGP,
FL("call to palAllocateMemory failed for eWNI_SME_SCAN_RSP\n"));
return;
}
msgLen = sizeof(tSirSmeScanRsp) -
sizeof(tSirBssDescription) +
ptemp->bssDescription.length +
sizeof(ptemp->bssDescription.length);
pDesc = pSirSmeScanRsp->bssDescription;
bssCount = 1;
}
curMsgLen = msgLen;
PELOG2(limLog(pMac, LOG2, FL("ScanRsp : msgLen %d, bssDescr Len=%d\n"),
msgLen, ptemp->bssDescription.length);)
#if defined (ANI_PRODUCT_TYPE_AP) && defined (ANI_LITTLE_BYTE_ENDIAN)
sirStoreU16N((tANI_U8*)&pDesc->length,
ptemp->bssDescription.length);
#else
pDesc->length
= ptemp->bssDescription.length;
#endif
palCopyMemory( pMac->hHdd, (tANI_U8 *) &pDesc->bssId,
(tANI_U8 *) &ptemp->bssDescription.bssId,
ptemp->bssDescription.length);
PELOG2(limLog(pMac, LOG2, FL("BssId "));
limPrintMacAddr(pMac, ptemp->bssDescription.bssId, LOG2);)
pSirSmeScanRsp->sessionId = smesessionId;
pSirSmeScanRsp->transcationId = smetranscationId;
ptemp = ptemp->next;
} //while(ptemp)
} //for (i = 0; i < LIM_MAX_NUM_OF_SCAN_RESULTS; i++)
if(0 == bssCount)
{
limPostSmeScanRspMessage(pMac, length, resultCode, smesessionId, smetranscationId);
if (NULL != pSirSmeScanRsp)
{
palFreeMemory( pMac->hHdd, pSirSmeScanRsp);
pSirSmeScanRsp = NULL;
}
}
else
{
// send last message
pSirSmeScanRsp->statusCode = eSIR_SME_SUCCESS;
#if defined (ANI_PRODUCT_TYPE_AP) && defined (ANI_LITTLE_BYTE_ENDIAN)
sirStoreU16N((tANI_U8*)&pSirSmeScanRsp->messageType,
eWNI_SME_SCAN_RSP);
sirStoreU16N((tANI_U8*)&pSirSmeScanRsp->length, curMsgLen);
#else
pSirSmeScanRsp->messageType = eWNI_SME_SCAN_RSP;
pSirSmeScanRsp->length = curMsgLen;
#endif
/* Update SME session Id and SME transcation Id */
pSirSmeScanRsp->sessionId = smesessionId;
pSirSmeScanRsp->transcationId = smetranscationId;
mmhMsg.type = eWNI_SME_SCAN_RSP;
mmhMsg.bodyptr = pSirSmeScanRsp;
mmhMsg.bodyval = 0;
MTRACE(macTraceMsgTx(pMac, NO_SESSION, mmhMsg.type));
limSysProcessMmhMsgApi(pMac, &mmhMsg, ePROT);
PELOG2(limLog(pMac, LOG2, FL("statusCode : eSIR_SME_SUCCESS\n"));)
}
return;
} /*** end limSendSmeScanRsp() ***/
/**
* limPostSmeScanRspMessage()
*
*FUNCTION:
* This function is called by limSendSmeScanRsp() to send
* eWNI_SME_SCAN_RSP message with failed result code
*
*NOTE:
* NA
*
* @param pMac Pointer to Global MAC structure
* @param length Indicates length of message
* @param resultCode failed result code
*
* @return None
*/
void
limPostSmeScanRspMessage(tpAniSirGlobal pMac,
tANI_U16 length,
tSirResultCodes resultCode,tANI_U8 smesessionId, tANI_U16 smetransactionId)
{
tpSirSmeScanRsp pSirSmeScanRsp;
tSirMsgQ mmhMsg;
PELOG1(limLog(pMac, LOG1,
FL("limPostSmeScanRspMessage: send SME_SCAN_RSP (len %d, reasonCode %s). \n"),
length, limResultCodeStr(resultCode));)
if( eHAL_STATUS_SUCCESS != palAllocateMemory( pMac->hHdd, (void **)&pSirSmeScanRsp, length))
{
limLog(pMac, LOGP, FL("palAllocateMemory failed for eWNI_SME_SCAN_RSP\n"));
return;
}
palZeroMemory(pMac->hHdd, (void*)pSirSmeScanRsp, length);
#if defined (ANI_PRODUCT_TYPE_AP) && defined (ANI_LITTLE_BYTE_ENDIAN)
sirStoreU16N((tANI_U8*)&pSirSmeScanRsp->messageType, eWNI_SME_SCAN_RSP);
sirStoreU16N((tANI_U8*)&pSirSmeScanRsp->length, length);
#else
pSirSmeScanRsp->messageType = eWNI_SME_SCAN_RSP;
pSirSmeScanRsp->length = length;
#endif
if(sizeof(tSirSmeScanRsp) <= length)
{
pSirSmeScanRsp->bssDescription->length = sizeof(tSirBssDescription);
}
pSirSmeScanRsp->statusCode = resultCode;
/*Update SME session Id and transaction Id */
pSirSmeScanRsp->sessionId = smesessionId;
pSirSmeScanRsp->transcationId = smetransactionId;
mmhMsg.type = eWNI_SME_SCAN_RSP;
mmhMsg.bodyptr = pSirSmeScanRsp;
mmhMsg.bodyval = 0;
MTRACE(macTraceMsgTx(pMac, NO_SESSION, mmhMsg.type));
#ifdef FEATURE_WLAN_DIAG_SUPPORT_LIM //FEATURE_WLAN_DIAG_SUPPORT
limDiagEventReport(pMac, WLAN_PE_DIAG_SCAN_RSP_EVENT, NULL, (tANI_U16)resultCode, 0);
#endif //FEATURE_WLAN_DIAG_SUPPORT
limSysProcessMmhMsgApi(pMac, &mmhMsg, ePROT);
return;
} /*** limPostSmeScanRspMessage ***/
#ifdef FEATURE_OEM_DATA_SUPPORT
/**
* limSendSmeOemDataRsp()
*
*FUNCTION:
* This function is called by limProcessSmeReqMessages() to send
* eWNI_SME_OEM_DATA_RSP message to applications above MAC
* Software.
*
*PARAMS:
*
*LOGIC:
*
*ASSUMPTIONS:
* NA
*
*NOTE:
* NA
*
* @param pMac Pointer to Global MAC structure
* @param pMsgBuf Indicates the mlm message
* @param resultCode Indicates the result of previously issued
* eWNI_SME_OEM_DATA_RSP message
*
* @return None
*/
void limSendSmeOemDataRsp(tpAniSirGlobal pMac, tANI_U32* pMsgBuf, tSirResultCodes resultCode)
{
tSirMsgQ mmhMsg;
tSirOemDataRsp* pSirSmeOemDataRsp=NULL;
tLimMlmOemDataRsp* pMlmOemDataRsp=NULL;
tANI_U16 msgLength;
//get the pointer to the mlm message
pMlmOemDataRsp = (tLimMlmOemDataRsp*)(pMsgBuf);
msgLength = sizeof(tSirOemDataRsp);
//now allocate memory for the char buffer
if(eHAL_STATUS_SUCCESS != palAllocateMemory(pMac->hHdd, (void**)&pSirSmeOemDataRsp, msgLength))
{
limLog(pMac, LOGP, FL("call to palAllocateMemory failed for pSirSmeOemDataRsp\n"));
return;
}
#if defined (ANI_LITTLE_BYTE_ENDIAN)
sirStoreU16N((tANI_U8*)&pSirSmeOemDataRsp->length, msgLength);
sirStoreU16N((tANI_U8*)&pSirSmeOemDataRsp->messageType, eWNI_SME_OEM_DATA_RSP);
#else
pSirSmeOemDataRsp->length = msgLength;
pSirSmeOemDataRsp->messageType = eWNI_SME_OEM_DATA_RSP;
#endif
palCopyMemory(pMac->hHdd, pSirSmeOemDataRsp->oemDataRsp, pMlmOemDataRsp->oemDataRsp, OEM_DATA_RSP_SIZE);
//Now free the memory from MLM Rsp Message
palFreeMemory(pMac->hHdd, pMlmOemDataRsp);
mmhMsg.type = eWNI_SME_OEM_DATA_RSP;
mmhMsg.bodyptr = pSirSmeOemDataRsp;
mmhMsg.bodyval = 0;
limSysProcessMmhMsgApi(pMac, &mmhMsg, ePROT);
return;
} /*** limSendSmeOemDataRsp ***/
#endif
/**
* limSendSmeAuthRsp()
*
*FUNCTION:
* This function is called by limProcessSmeMessages() to send
* eWNI_SME_AUTH_RSP message to host
*
*PARAMS:
*
*LOGIC:
*
*ASSUMPTIONS:
* NA
*
*NOTE:
* NA
*
* @param pMac Pointer to Global MAC structure
* @param statusCode Indicates the result of previously issued
* eWNI_SME_AUTH_REQ message
*
* @return None
*/
void
limSendSmeAuthRsp(tpAniSirGlobal pMac,
tSirResultCodes statusCode,
tSirMacAddr peerMacAddr,
tAniAuthType authType,
tANI_U16 protStatusCode,
tpPESession psessionEntry,tANI_U8 smesessionId,
tANI_U16 smetransactionId)
{
#if 0
tSirMsgQ mmhMsg;
tSirSmeAuthRsp *pSirSmeAuthRsp;
if( eHAL_STATUS_SUCCESS != palAllocateMemory( pMac->hHdd, (void **)&pSirSmeAuthRsp, sizeof(tSirSmeAuthRsp)))
{
// Log error
limLog(pMac, LOGP,
FL("call to palAllocateMemory failed for eWNI_SME_AUTH_RSP\n"));
return;
}
#if defined (ANI_PRODUCT_TYPE_AP) && defined (ANI_LITTLE_BYTE_ENDIAN)
sirStoreU16N((tANI_U8*)&pSirSmeAuthRsp->messageType, eWNI_SME_AUTH_RSP);
sirStoreU16N((tANI_U8*)&pSirSmeAuthRsp->length, sizeof(tSirSmeAuthRsp));
#endif
if(psessionEntry != NULL)
{
palCopyMemory( pMac->hHdd, (tANI_U8 *) pSirSmeAuthRsp->peerMacAddr,
(tANI_U8 *) peerMacAddr, sizeof(tSirMacAddr));
pSirSmeAuthRsp->authType = authType;
}
pSirSmeAuthRsp->messageType = eWNI_SME_AUTH_RSP;
pSirSmeAuthRsp->length = sizeof(tSirSmeAuthRsp);
pSirSmeAuthRsp->statusCode = statusCode;
pSirSmeAuthRsp->protStatusCode = protStatusCode;
/* Update SME session and transaction Id*/
pSirSmeAuthRsp->sessionId = smesessionId;
pSirSmeAuthRsp->transactionId = smetransactionId;
mmhMsg.type = eWNI_SME_AUTH_RSP;
mmhMsg.bodyptr = pSirSmeAuthRsp;
mmhMsg.bodyval = 0;
MTRACE(macTraceMsgTx(pMac, 0, mmhMsg.type));
limSysProcessMmhMsgApi(pMac, &mmhMsg, ePROT);
#endif
} /*** end limSendSmeAuthRsp() ***/
void limSendSmeDisassocDeauthNtfPostResume( tpAniSirGlobal pMac,
eHalStatus status, tANI_U32 *pCtx )
{
tSirMsgQ mmhMsg;
tSirMsgQ *pMsg = (tSirMsgQ*) pCtx;
mmhMsg.type = pMsg->type;
mmhMsg.bodyptr = pMsg;
mmhMsg.bodyval = 0;
MTRACE(macTraceMsgTx(pMac, NO_SESSION, mmhMsg.type));
limSysProcessMmhMsgApi(pMac, &mmhMsg, ePROT);
}
/**
* limSendSmeDisassocNtf()
*
*FUNCTION:
* This function is called by limProcessSmeMessages() to send
* eWNI_SME_DISASSOC_RSP/IND message to host
*
*PARAMS:
*
*LOGIC:
*
*ASSUMPTIONS:
* NA
*
*NOTE:
* This function is used for sending eWNI_SME_DISASSOC_CNF,
* or eWNI_SME_DISASSOC_IND to host depending on
* disassociation trigger.
*
* @param peerMacAddr Indicates the peer MAC addr to which
* disassociate was initiated
* @param reasonCode Indicates the reason for Disassociation
* @param disassocTrigger Indicates the trigger for Disassociation
* @param aid Indicates the STAID. This parameter is
* present only on AP.
*
* @return None
*/
void
limSendSmeDisassocNtf(tpAniSirGlobal pMac,
tSirMacAddr peerMacAddr,
tSirResultCodes reasonCode,
tANI_U16 disassocTrigger,
tANI_U16 aid,
tANI_U8 smesessionId,
tANI_U16 smetransactionId,
tpPESession psessionEntry)
{
tANI_U8 *pBuf;
tSirSmeDisassocRsp *pSirSmeDisassocRsp;
tSirSmeDisassocInd *pSirSmeDisassocInd;
tANI_U32 *pMsg;
switch (disassocTrigger)
{
case eLIM_PEER_ENTITY_DISASSOC:
return;
case eLIM_HOST_DISASSOC:
/**
* Disassociation response due to
* host triggered disassociation
*/
if( eHAL_STATUS_SUCCESS != palAllocateMemory( pMac->hHdd, (void **)&pSirSmeDisassocRsp, sizeof(tSirSmeDisassocRsp)))
{
// Log error
limLog(pMac, LOGP,
FL("call to palAllocateMemory failed for eWNI_SME_DISASSOC_RSP\n"));
return;
}
#if defined (ANI_PRODUCT_TYPE_AP) && defined (ANI_LITTLE_BYTE_ENDIAN)
sirStoreU16N((tANI_U8*)&pSirSmeDisassocRsp->messageType,
eWNI_SME_DISASSOC_RSP);
sirStoreU16N((tANI_U8*)&pSirSmeDisassocRsp->length,
sizeof(tSirSmeDisassocRsp));
#else
pSirSmeDisassocRsp->messageType = eWNI_SME_DISASSOC_RSP;
pSirSmeDisassocRsp->length = sizeof(tSirSmeDisassocRsp);
#endif
//sessionId
pBuf = (tANI_U8 *) &pSirSmeDisassocRsp->sessionId;
*pBuf = smesessionId;
pBuf++;
//transactionId
limCopyU16(pBuf, smetransactionId);
pBuf += sizeof(tANI_U16);
//statusCode
limCopyU32(pBuf, reasonCode);
pBuf += sizeof(tSirResultCodes);
//peerMacAddr
palCopyMemory( pMac->hHdd, pBuf, peerMacAddr, sizeof(tSirMacAddr));
pBuf += sizeof(tSirMacAddr);
#if (WNI_POLARIS_FW_PRODUCT == AP)
limCopyU16(pBuf, aid);
pBuf += sizeof(tANI_U16);
// perStaStats
limStatSerDes(pMac, &pMac->hal.halMac.macStats.pPerStaStats[aid].staStat, pBuf);
#else
// Clear Station Stats
//for sta, it is always 1, IBSS is handled at halInitSta
#endif//#if (WNI_POLARIS_FW_PRODUCT == AP)
#ifdef FEATURE_WLAN_DIAG_SUPPORT_LIM //FEATURE_WLAN_DIAG_SUPPORT
limDiagEventReport(pMac, WLAN_PE_DIAG_DISASSOC_RSP_EVENT,
psessionEntry, (tANI_U16)reasonCode, 0);
#endif
pMsg = (tANI_U32*) pSirSmeDisassocRsp;
break;
default:
/**
* Disassociation indication due to Disassociation
* frame reception from peer entity or due to
* loss of link with peer entity.
*/
if( eHAL_STATUS_SUCCESS != palAllocateMemory( pMac->hHdd, (void **)&pSirSmeDisassocInd, sizeof(tSirSmeDisassocInd)))
{
// Log error
limLog(pMac, LOGP,
FL("call to palAllocateMemory failed for eWNI_SME_DISASSOC_IND\n"));
return;
}
#if defined (ANI_PRODUCT_TYPE_AP) && defined (ANI_LITTLE_BYTE_ENDIAN)
sirStoreU16N((tANI_U8*)&pSirSmeDisassocInd->messageType,
eWNI_SME_DISASSOC_IND);
sirStoreU16N((tANI_U8*)&pSirSmeDisassocInd->length,
sizeof(tSirSmeDisassocInd));
#else
pSirSmeDisassocInd->messageType = eWNI_SME_DISASSOC_IND;
pSirSmeDisassocInd->length = sizeof(tSirSmeDisassocInd);
/* Update SME session Id and Transaction Id */
pSirSmeDisassocInd->sessionId = smesessionId;
pSirSmeDisassocInd->transactionId = smetransactionId;
pSirSmeDisassocInd->reasonCode = reasonCode;
#endif
pBuf = (tANI_U8 *) &pSirSmeDisassocInd->statusCode;
limCopyU32(pBuf, reasonCode);
pBuf += sizeof(tSirResultCodes);
palCopyMemory( pMac->hHdd, pBuf, psessionEntry->bssId, sizeof(tSirMacAddr));
pBuf += sizeof(tSirMacAddr);
palCopyMemory( pMac->hHdd, pBuf, peerMacAddr, sizeof(tSirMacAddr));
#if (WNI_POLARIS_FW_PRODUCT == AP)
pBuf += sizeof(tSirMacAddr);
limCopyU16(pBuf, aid);
pBuf += sizeof(tANI_U16);
limStatSerDes(pMac, &pMac->hal.halMac.macStats.pPerStaStats[aid].staStat, pBuf);
#endif//#if (WNI_POLARIS_FW_PRODUCT == AP)
#ifdef FEATURE_WLAN_DIAG_SUPPORT_LIM //FEATURE_WLAN_DIAG_SUPPORT
limDiagEventReport(pMac, WLAN_PE_DIAG_DISASSOC_IND_EVENT,
psessionEntry, (tANI_U16)reasonCode, 0);
#endif
pMsg = (tANI_U32*) pSirSmeDisassocInd;
#if (WNI_POLARIS_FW_PRODUCT == AP)
PELOG1(limLog(pMac, LOG1,
FL("*** Sending DisAssocInd staId=%d, reasonCode=%d ***\n"),
aid, reasonCode);)
#endif
break;
}
/* Delete the PE session Created */
if((psessionEntry != NULL) && ((psessionEntry ->limSystemRole == eLIM_STA_ROLE) ||
(psessionEntry ->limSystemRole == eLIM_BT_AMP_STA_ROLE)) )
{
peDeleteSession(pMac,psessionEntry);
}
if( IS_MCC_SUPPORTED && limIsLinkSuspended( pMac ) )
{
//Resume on the first active session channel.
tANI_U8 resumeChannel;
ePhyChanBondState resumePhyCbState;
peGetActiveSessionChannel( pMac, &resumeChannel, &resumePhyCbState );
peSetResumeChannel( pMac, resumeChannel, resumePhyCbState );
limResumeLink( pMac, limSendSmeDisassocDeauthNtfPostResume,
(tANI_U32*) pMsg );
}
else
{
limSendSmeDisassocDeauthNtfPostResume( pMac, eHAL_STATUS_SUCCESS,
(tANI_U32*) pMsg );
}
} /*** end limSendSmeDisassocNtf() ***/
/** -----------------------------------------------------------------
\brief limSendSmeDisassocInd() - sends SME_DISASSOC_IND
After receiving disassociation frame from peer entity, this
function sends a eWNI_SME_DISASSOC_IND to SME with a specific
reason code.
\param pMac - global mac structure
\param pStaDs - station dph hash node
\return none
\sa
----------------------------------------------------------------- */
void
limSendSmeDisassocInd(tpAniSirGlobal pMac, tpDphHashNode pStaDs,tpPESession psessionEntry)
{
tSirMsgQ mmhMsg;
tSirSmeDisassocInd *pSirSmeDisassocInd;
if( eHAL_STATUS_SUCCESS != palAllocateMemory( pMac->hHdd, (void **)&pSirSmeDisassocInd, sizeof(tSirSmeDisassocInd)))
{
limLog(pMac, LOGP, FL("palAllocateMemory failed for eWNI_SME_DISASSOC_IND\n"));
return;
}
//psessionEntry = peFindSessionByBssid(pMac,pStaDs->staAddr,&sessionId);
#if defined (ANI_PRODUCT_TYPE_AP) && defined (ANI_LITTLE_BYTE_ENDIAN)
sirStoreU16N((tANI_U8*)&pSirSmeDisassocInd->messageType, eWNI_SME_DISASSOC_IND);
sirStoreU16N((tANI_U8*)&pSirSmeDisassocInd->length, sizeof(tSirSmeDisassocInd));
#else
pSirSmeDisassocInd->messageType = eWNI_SME_DISASSOC_IND;
pSirSmeDisassocInd->length = sizeof(tSirSmeDisassocInd);
#endif
#if 0 //Commenting out all the serialization
//statusCode
pBuf = (tANI_U8 *) &pSirSmeDisassocInd->statusCode;
limCopyU32(pBuf, pStaDs->mlmStaContext.disassocReason);
pBuf += sizeof(tSirResultCodes);
//peerMacAddr
palCopyMemory( pMac->hHdd, pBuf, pStaDs->staAddr, sizeof(tSirMacAddr));
#ifdef ANI_PRODUCT_TYPE_AP
pBuf += sizeof(tSirMacAddr);
//aid
limCopyU16(pBuf, pStaDs->assocId);
pBuf += sizeof(tANI_U16);
//perStaStats
limStatSerDes(pMac, &pMac->hal.halMac.macStats.pPerStaStats[pStaDs->assocId].staStat, pBuf);
#endif
#endif
pSirSmeDisassocInd->sessionId = psessionEntry->smeSessionId;
pSirSmeDisassocInd->transactionId = psessionEntry->transactionId;
pSirSmeDisassocInd->statusCode = pStaDs->mlmStaContext.disassocReason;
pSirSmeDisassocInd->reasonCode = pStaDs->mlmStaContext.disassocReason;
palCopyMemory( pMac->hHdd, pSirSmeDisassocInd->bssId , psessionEntry->bssId , sizeof(tSirMacAddr));
palCopyMemory( pMac->hHdd, pSirSmeDisassocInd->peerMacAddr , pStaDs->staAddr, sizeof(tSirMacAddr));
#ifdef ANI_PRODUCT_TYPE_AP
pSirSmeDisassocInd->aid = pStaDs->assocId;
limStatSerDes(pMac, &pMac->hal.halMac.macStats.pPerStaStats[pStaDs->assocId].staStat,(tANI_U8*)&pSirSmeDisassocInd-> perStaStats );
#endif
#ifdef WLAN_SOFTAP_FEATURE
pSirSmeDisassocInd->staId = pStaDs->staIndex;
#endif
mmhMsg.type = eWNI_SME_DISASSOC_IND;
mmhMsg.bodyptr = pSirSmeDisassocInd;
mmhMsg.bodyval = 0;
MTRACE(macTraceMsgTx(pMac, psessionEntry->peSessionId, mmhMsg.type));
#ifdef FEATURE_WLAN_DIAG_SUPPORT_LIM //FEATURE_WLAN_DIAG_SUPPORT
limDiagEventReport(pMac, WLAN_PE_DIAG_DISASSOC_IND_EVENT, psessionEntry, 0, (tANI_U16)pStaDs->mlmStaContext.disassocReason);
#endif //FEATURE_WLAN_DIAG_SUPPORT
limSysProcessMmhMsgApi(pMac, &mmhMsg, ePROT);
} /*** end limSendSmeDisassocInd() ***/
/** -----------------------------------------------------------------
\brief limSendSmeDeauthInd() - sends SME_DEAUTH_IND
After receiving deauthentication frame from peer entity, this
function sends a eWNI_SME_DEAUTH_IND to SME with a specific
reason code.
\param pMac - global mac structure
\param pStaDs - station dph hash node
\return none
\sa
----------------------------------------------------------------- */
void
limSendSmeDeauthInd(tpAniSirGlobal pMac, tpDphHashNode pStaDs, tpPESession psessionEntry)
{
#ifndef WLAN_SOFTAP_FEATURE
tANI_U8 *pBuf;
#endif
tSirMsgQ mmhMsg;
tSirSmeDeauthInd *pSirSmeDeauthInd;
if( eHAL_STATUS_SUCCESS != palAllocateMemory( pMac->hHdd, (void **)&pSirSmeDeauthInd, sizeof(tSirSmeDeauthInd)))
{
limLog(pMac, LOGP, FL("palAllocateMemory failed for eWNI_SME_DEAUTH_IND \n"));
return;
}
#if defined (ANI_PRODUCT_TYPE_AP) && defined (ANI_LITTLE_BYTE_ENDIAN)
sirStoreU16N((tANI_U8*)&pSirSmeDeauthInd->messageType, eWNI_SME_DEAUTH_IND);
sirStoreU16N((tANI_U8*)&pSirSmeDeauthInd->length, sizeof(tSirSmeDeauthInd));
#else
pSirSmeDeauthInd->messageType = eWNI_SME_DEAUTH_IND;
pSirSmeDeauthInd->length = sizeof(tSirSmeDeauthInd);
#endif
#ifdef WLAN_SOFTAP_FEATURE
pSirSmeDeauthInd->sessionId = psessionEntry->smeSessionId;
pSirSmeDeauthInd->transactionId = psessionEntry->transactionId;
if(eSIR_INFRA_AP_MODE == psessionEntry->bssType)
{
pSirSmeDeauthInd->statusCode = (tSirResultCodes)pStaDs->mlmStaContext.cleanupTrigger;
}
else
{
//Need to indicatet he reascon code over the air
pSirSmeDeauthInd->statusCode = (tSirResultCodes)pStaDs->mlmStaContext.disassocReason;
}
//BSSID
palCopyMemory( pMac->hHdd, pSirSmeDeauthInd->bssId, psessionEntry->bssId, sizeof(tSirMacAddr));
//peerMacAddr
palCopyMemory( pMac->hHdd, pSirSmeDeauthInd->peerMacAddr, pStaDs->staAddr, sizeof(tSirMacAddr));
pSirSmeDeauthInd->reasonCode = pStaDs->mlmStaContext.disassocReason;
#else
//sessionId
pBuf = (tANI_U8 *) &pSirSmeDeauthInd->sessionId;
*pBuf++ = psessionEntry->smeSessionId;
//transactionId
limCopyU16(pBuf, 0);
pBuf += sizeof(tANI_U16);
// status code
limCopyU32(pBuf, pStaDs->mlmStaContext.cleanupTrigger);
pBuf += sizeof(tSirResultCodes);
//bssid
palCopyMemory( pMac->hHdd, pBuf, psessionEntry->bssId, sizeof(tSirMacAddr));
pBuf += sizeof(tSirMacAddr);
//peerMacAddr
palCopyMemory( pMac->hHdd, pBuf, pStaDs->staAddr, sizeof(tSirMacAddr));
#endif
#if (WNI_POLARIS_FW_PRODUCT == AP)
pBuf += sizeof(tSirMacAddr);
limCopyU16(pBuf, pStaDs->staAddr);
#endif
#ifdef WLAN_SOFTAP_FEATURE
pSirSmeDeauthInd->staId = pStaDs->staIndex;
#endif
mmhMsg.type = eWNI_SME_DEAUTH_IND;
mmhMsg.bodyptr = pSirSmeDeauthInd;
mmhMsg.bodyval = 0;
MTRACE(macTraceMsgTx(pMac, psessionEntry->peSessionId, mmhMsg.type));
#ifdef FEATURE_WLAN_DIAG_SUPPORT_LIM //FEATURE_WLAN_DIAG_SUPPORT
limDiagEventReport(pMac, WLAN_PE_DIAG_DEAUTH_IND_EVENT, psessionEntry, 0, pStaDs->mlmStaContext.cleanupTrigger);
#endif //FEATURE_WLAN_DIAG_SUPPORT
limSysProcessMmhMsgApi(pMac, &mmhMsg, ePROT);
return;
} /*** end limSendSmeDeauthInd() ***/
/**
* limSendSmeDeauthNtf()
*
*FUNCTION:
* This function is called by limProcessSmeMessages() to send
* eWNI_SME_DISASSOC_RSP/IND message to host
*
*PARAMS:
*
*LOGIC:
*
*ASSUMPTIONS:
* NA
*
*NOTE:
* This function is used for sending eWNI_SME_DEAUTH_CNF or
* eWNI_SME_DEAUTH_IND to host depending on deauthentication trigger.
*
* @param peerMacAddr Indicates the peer MAC addr to which
* deauthentication was initiated
* @param reasonCode Indicates the reason for Deauthetication
* @param deauthTrigger Indicates the trigger for Deauthetication
* @param aid Indicates the STAID. This parameter is present
* only on AP.
*
* @return None
*/
void
limSendSmeDeauthNtf(tpAniSirGlobal pMac, tSirMacAddr peerMacAddr, tSirResultCodes reasonCode,
tANI_U16 deauthTrigger, tANI_U16 aid,tANI_U8 smesessionId, tANI_U16 smetransactionId)
{
tANI_U8 *pBuf;
tSirSmeDeauthRsp *pSirSmeDeauthRsp;
tSirSmeDeauthInd *pSirSmeDeauthInd;
tpPESession psessionEntry;
tANI_U8 sessionId;
tANI_U32 *pMsg;
psessionEntry = peFindSessionByBssid(pMac,peerMacAddr,&sessionId);
switch (deauthTrigger)
{
case eLIM_PEER_ENTITY_DEAUTH:
return;
case eLIM_HOST_DEAUTH:
/**
* Deauthentication response to host triggered
* deauthentication.
*/
if( eHAL_STATUS_SUCCESS != palAllocateMemory( pMac->hHdd, (void **)&pSirSmeDeauthRsp, sizeof(tSirSmeDeauthRsp)))
{
// Log error
limLog(pMac, LOGP,
FL("call to palAllocateMemory failed for eWNI_SME_DEAUTH_RSP\n"));
return;
}
#if defined (ANI_PRODUCT_TYPE_AP) && defined (ANI_LITTLE_BYTE_ENDIAN)
sirStoreU16N((tANI_U8*) &(pSirSmeDeauthRsp->messageType),
eWNI_SME_DEAUTH_RSP);
sirStoreU16N((tANI_U8*) &(pSirSmeDeauthRsp->length),
sizeof(tSirSmeDeauthRsp));
#else
pSirSmeDeauthRsp->messageType = eWNI_SME_DEAUTH_RSP;
pSirSmeDeauthRsp->length = sizeof(tSirSmeDeauthRsp);
#endif
pSirSmeDeauthRsp->statusCode = reasonCode;
pSirSmeDeauthRsp->sessionId = smesessionId;
pSirSmeDeauthRsp->transactionId = smetransactionId;
pBuf = (tANI_U8 *) pSirSmeDeauthRsp->peerMacAddr;
palCopyMemory( pMac->hHdd, pBuf, peerMacAddr, sizeof(tSirMacAddr));
#if (WNI_POLARIS_FW_PRODUCT == AP)
pBuf += sizeof(tSirMacAddr);
limCopyU16(pBuf, aid);
#endif
#ifdef FEATURE_WLAN_DIAG_SUPPORT_LIM //FEATURE_WLAN_DIAG_SUPPORT
limDiagEventReport(pMac, WLAN_PE_DIAG_DEAUTH_RSP_EVENT,
psessionEntry, 0, (tANI_U16)reasonCode);
#endif
pMsg = (tANI_U32*)pSirSmeDeauthRsp;
break;
default:
/**
* Deauthentication indication due to Deauthentication
* frame reception from peer entity or due to
* loss of link with peer entity.
*/
if( eHAL_STATUS_SUCCESS != palAllocateMemory( pMac->hHdd, (void **)&pSirSmeDeauthInd, sizeof(tSirSmeDeauthInd)))
{
// Log error
limLog(pMac, LOGP,
FL("call to palAllocateMemory failed for eWNI_SME_DEAUTH_Ind\n"));
return;
}
#if defined (ANI_PRODUCT_TYPE_AP) && defined (ANI_LITTLE_BYTE_ENDIAN)
sirStoreU16N((tANI_U8*)&pSirSmeDeauthInd->messageType,
eWNI_SME_DEAUTH_IND);
sirStoreU16N((tANI_U8*)&pSirSmeDeauthInd->length,
sizeof(tSirSmeDeauthInd));
#else
pSirSmeDeauthInd->messageType = eWNI_SME_DEAUTH_IND;
pSirSmeDeauthInd->length = sizeof(tSirSmeDeauthInd);
pSirSmeDeauthInd->reasonCode = eSIR_MAC_UNSPEC_FAILURE_REASON;
#endif
// sessionId
pBuf = (tANI_U8*) &pSirSmeDeauthInd->sessionId;
*pBuf++ = smesessionId;
//transaction ID
limCopyU16(pBuf, smetransactionId);
pBuf += sizeof(tANI_U16);
// status code
limCopyU32(pBuf, reasonCode);
pBuf += sizeof(tSirResultCodes);
//bssId
palCopyMemory( pMac->hHdd, pBuf, psessionEntry->bssId, sizeof(tSirMacAddr));
pBuf += sizeof(tSirMacAddr);
//peerMacAddr
palCopyMemory( pMac->hHdd, pSirSmeDeauthInd->peerMacAddr, peerMacAddr, sizeof(tSirMacAddr));
#if (WNI_POLARIS_FW_PRODUCT == AP)
pBuf += sizeof(tSirMacAddr);
limCopyU16(pBuf, aid);
#endif
#ifdef FEATURE_WLAN_DIAG_SUPPORT_LIM //FEATURE_WLAN_DIAG_SUPPORT
limDiagEventReport(pMac, WLAN_PE_DIAG_DEAUTH_IND_EVENT,
psessionEntry, 0, (tANI_U16)reasonCode);
#endif //FEATURE_WLAN_DIAG_SUPPORT
pMsg = (tANI_U32*)pSirSmeDeauthInd;
break;
}
/*Delete the PE session created */
if(psessionEntry != NULL)
{
peDeleteSession(pMac,psessionEntry);
}
if( IS_MCC_SUPPORTED && limIsLinkSuspended( pMac ) )
{
//Resume on the first active session channel.
tANI_U8 resumeChannel;
ePhyChanBondState resumePhyCbState;
peGetActiveSessionChannel( pMac, &resumeChannel, &resumePhyCbState );
peSetResumeChannel( pMac, resumeChannel, resumePhyCbState );
limResumeLink( pMac, limSendSmeDisassocDeauthNtfPostResume,
(tANI_U32*) pMsg );
}
else
{
limSendSmeDisassocDeauthNtfPostResume( pMac, eHAL_STATUS_SUCCESS,
(tANI_U32*) pMsg );
}
} /*** end limSendSmeDeauthNtf() ***/
/**
* limSendSmeWmStatusChangeNtf()
*
*FUNCTION:
* This function is called by limProcessSmeMessages() to send
* eWNI_SME_WM_STATUS_CHANGE_NTF message to host.
*
*PARAMS:
*
*LOGIC:
*
*ASSUMPTIONS:
* NA
*
*NOTE:
*
* @param statusChangeCode Indicates the change in the wireless medium.
* @param statusChangeInfo Indicates the information associated with
* change in the wireless medium.
* @param infoLen Indicates the length of status change information
* being sent.
*
* @return None
*/
void
limSendSmeWmStatusChangeNtf(tpAniSirGlobal pMac, tSirSmeStatusChangeCode statusChangeCode,
tANI_U32 *pStatusChangeInfo, tANI_U16 infoLen, tANI_U8 sessionId)
{
tSirMsgQ mmhMsg;
tSirSmeWmStatusChangeNtf *pSirSmeWmStatusChangeNtf;
eHalStatus status;
#if (WNI_POLARIS_FW_PACKAGE == ADVANCED) && (WNI_POLARIS_FW_PRODUCT == AP)
tANI_U32 bufLen;
tANI_U16 length=0;
tANI_U8 *pBuf;
#endif
status = palAllocateMemory( pMac->hHdd, (void **)&pSirSmeWmStatusChangeNtf,
sizeof(tSirSmeWmStatusChangeNtf));
if (status != eHAL_STATUS_SUCCESS)
{
limLog(pMac, LOGE,
FL("call to palAllocateMemory failed for eWNI_SME_WM_STATUS_CHANGE_NTF, status = %d\n"),
status);
return;
}
#if (WNI_POLARIS_FW_PACKAGE == ADVANCED) && (WNI_POLARIS_FW_PRODUCT == AP)
pBuf = (tANI_U8 *)pSirSmeWmStatusChangeNtf;
#endif
mmhMsg.type = eWNI_SME_WM_STATUS_CHANGE_NTF;
mmhMsg.bodyval = 0;
mmhMsg.bodyptr = pSirSmeWmStatusChangeNtf;
switch(statusChangeCode)
{
case eSIR_SME_RADAR_DETECTED:
#if (WNI_POLARIS_FW_PACKAGE == ADVANCED) && (WNI_POLARIS_FW_PRODUCT == AP)
bufLen = sizeof(tSirSmeWmStatusChangeNtf);
if ((limSmeWmStatusChangeHeaderSerDes(pMac,
statusChangeCode,
pBuf,
&length,
bufLen,
sessionId) != eSIR_SUCCESS))
{
palFreeMemory(pMac->hHdd, (void *) pSirSmeWmStatusChangeNtf);
limLog(pMac, LOGP, FL("Header SerDes failed \n"));
return;
}
pBuf += length;
bufLen -= length;
if ((limRadioInfoSerDes(pMac,
(tpSirRadarInfo)pStatusChangeInfo,
pBuf,
&length,
bufLen) != eSIR_SUCCESS))
{
palFreeMemory(pMac->hHdd, (void *) pSirSmeWmStatusChangeNtf);
limLog(pMac, LOGP, FL("Radio Info SerDes failed \n"));
return;
}
pBuf = (tANI_U8 *) pSirSmeWmStatusChangeNtf;
pBuf += sizeof(tANI_U16);
limCopyU16(pBuf, length);
#endif
break;
case eSIR_SME_CB_LEGACY_BSS_FOUND_BY_AP:
#if (WNI_POLARIS_FW_PACKAGE == ADVANCED) && (WNI_POLARIS_FW_PRODUCT == AP)
if( eSIR_SUCCESS != nonTitanBssFoundSerDes( pMac,
(tpSirNeighborBssWdsInfo) pStatusChangeInfo,
pBuf,
&length,
sessionId))
{
palFreeMemory(pMac->hHdd, (void *) pSirSmeWmStatusChangeNtf);
limLog( pMac, LOGP,
FL("Unable to serialize nonTitanBssFoundSerDes!\n"));
return;
}
#endif
break;
case eSIR_SME_BACKGROUND_SCAN_FAIL:
limPackBkgndScanFailNotify(pMac,
statusChangeCode,
(tpSirBackgroundScanInfo)pStatusChangeInfo,
pSirSmeWmStatusChangeNtf, sessionId);
break;
default:
#if defined (ANI_PRODUCT_TYPE_AP) && defined (ANI_LITTLE_BYTE_ENDIAN)
sirStoreU16N((tANI_U8*)&pSirSmeWmStatusChangeNtf->messageType,
eWNI_SME_WM_STATUS_CHANGE_NTF );
sirStoreU16N((tANI_U8*)&pSirSmeWmStatusChangeNtf->length,
(sizeof(tSirSmeWmStatusChangeNtf)));
pSirSmeWmStatusChangeNtf->sessionId = sessionId;
sirStoreU32N((tANI_U8*)&pSirSmeWmStatusChangeNtf->statusChangeCode,
statusChangeCode);
#else
pSirSmeWmStatusChangeNtf->messageType = eWNI_SME_WM_STATUS_CHANGE_NTF;
pSirSmeWmStatusChangeNtf->statusChangeCode = statusChangeCode;
pSirSmeWmStatusChangeNtf->length = sizeof(tSirSmeWmStatusChangeNtf);
pSirSmeWmStatusChangeNtf->sessionId = sessionId;
#endif
if(sizeof(pSirSmeWmStatusChangeNtf->statusChangeInfo) >= infoLen)
{
palCopyMemory( pMac->hHdd, (tANI_U8 *)&pSirSmeWmStatusChangeNtf->statusChangeInfo, (tANI_U8 *)pStatusChangeInfo, infoLen);
}
limLog(pMac, LOGE, FL("***---*** StatusChg: code 0x%x, length %d ***---***\n"),
statusChangeCode, infoLen);
break;
}
MTRACE(macTraceMsgTx(pMac, NO_SESSION, mmhMsg.type));
if (eSIR_SUCCESS != limSysProcessMmhMsgApi(pMac, &mmhMsg, ePROT))
{
palFreeMemory(pMac->hHdd, (void *) pSirSmeWmStatusChangeNtf);
limLog( pMac, LOGP, FL("limSysProcessMmhMsgApi failed\n"));
}
} /*** end limSendSmeWmStatusChangeNtf() ***/
/**
* limSendSmeSetContextRsp()
*
*FUNCTION:
* This function is called by limProcessSmeMessages() to send
* eWNI_SME_SETCONTEXT_RSP message to host
*
*PARAMS:
*
*LOGIC:
*
*ASSUMPTIONS:
* NA
*
*NOTE:
*
* @param pMac Pointer to Global MAC structure
* @param peerMacAddr Indicates the peer MAC addr to which
* setContext was performed
* @param aid Indicates the aid corresponding to the peer MAC
* address
* @param resultCode Indicates the result of previously issued
* eWNI_SME_SETCONTEXT_RSP message
*
* @return None
*/
void
limSendSmeSetContextRsp(tpAniSirGlobal pMac,
tSirMacAddr peerMacAddr, tANI_U16 aid,
tSirResultCodes resultCode,
tpPESession psessionEntry,tANI_U8 smesessionId,tANI_U16 smetransactionId)
{
tANI_U8 *pBuf;
tSirMsgQ mmhMsg;
tSirSmeSetContextRsp *pSirSmeSetContextRsp;
if( eHAL_STATUS_SUCCESS != palAllocateMemory( pMac->hHdd, (void **)&pSirSmeSetContextRsp, sizeof(tSirSmeSetContextRsp)))
{
// Log error
limLog(pMac, LOGP,
FL("call to palAllocateMemory failed for SmeSetContextRsp\n"));
return;
}
#if defined (ANI_PRODUCT_TYPE_AP) && defined (ANI_LITTLE_BYTE_ENDIAN)
sirStoreU16N((tANI_U8*)&pSirSmeSetContextRsp->messageType,
eWNI_SME_SETCONTEXT_RSP);
sirStoreU16N((tANI_U8*)&pSirSmeSetContextRsp->length,
sizeof(tSirSmeSetContextRsp));
#else
pSirSmeSetContextRsp->messageType = eWNI_SME_SETCONTEXT_RSP;
pSirSmeSetContextRsp->length = sizeof(tSirSmeSetContextRsp);
#endif
pSirSmeSetContextRsp->statusCode = resultCode;
pBuf = pSirSmeSetContextRsp->peerMacAddr;
palCopyMemory( pMac->hHdd, pBuf, (tANI_U8 *) peerMacAddr, sizeof(tSirMacAddr));
pBuf += sizeof(tSirMacAddr);
#if (WNI_POLARIS_FW_PRODUCT == AP)
limCopyU16(pBuf, aid);
pBuf += sizeof(tANI_U16);
#endif
/* Update SME session and transaction Id*/
pSirSmeSetContextRsp->sessionId = smesessionId;
pSirSmeSetContextRsp->transactionId = smetransactionId;
mmhMsg.type = eWNI_SME_SETCONTEXT_RSP;
mmhMsg.bodyptr = pSirSmeSetContextRsp;
mmhMsg.bodyval = 0;
MTRACE(macTraceMsgTx(pMac, psessionEntry->peSessionId, mmhMsg.type));
#ifdef FEATURE_WLAN_DIAG_SUPPORT_LIM //FEATURE_WLAN_DIAG_SUPPORT
limDiagEventReport(pMac, WLAN_PE_DIAG_SETCONTEXT_RSP_EVENT, psessionEntry, (tANI_U16)resultCode, 0);
#endif //FEATURE_WLAN_DIAG_SUPPORT
limSysProcessMmhMsgApi(pMac, &mmhMsg, ePROT);
} /*** end limSendSmeSetContextRsp() ***/
/**
* limSendSmeRemoveKeyRsp()
*
*FUNCTION:
* This function is called by limProcessSmeMessages() to send
* eWNI_SME_REMOVEKEY_RSP message to host
*
*PARAMS:
*
*LOGIC:
*
*ASSUMPTIONS:
* NA
*
*NOTE:
*
* @param pMac Pointer to Global MAC structure
* @param peerMacAddr Indicates the peer MAC addr to which
* Removekey was performed
* @param aid Indicates the aid corresponding to the peer MAC
* address
* @param resultCode Indicates the result of previously issued
* eWNI_SME_REMOVEKEY_RSP message
*
* @return None
*/
void
limSendSmeRemoveKeyRsp(tpAniSirGlobal pMac,
tSirMacAddr peerMacAddr,
tSirResultCodes resultCode,
tpPESession psessionEntry,tANI_U8 smesessionId,
tANI_U16 smetransactionId)
{
tANI_U8 *pBuf;
tSirMsgQ mmhMsg;
tSirSmeRemoveKeyRsp *pSirSmeRemoveKeyRsp;
if( eHAL_STATUS_SUCCESS != palAllocateMemory( pMac->hHdd, (void **)&pSirSmeRemoveKeyRsp, sizeof(tSirSmeRemoveKeyRsp)))
{
// Log error
limLog(pMac, LOGP,
FL("call to palAllocateMemory failed for SmeRemoveKeyRsp\n"));
return;
}
#if defined (ANI_PRODUCT_TYPE_AP) && defined(ANI_LITTLE_BYTE_ENDIAN)
sirStoreU16N((tANI_U8*)&pSirSmeRemoveKeyRsp->messageType,
eWNI_SME_REMOVEKEY_RSP);
sirStoreU16N((tANI_U8*)&pSirSmeRemoveKeyRsp->length,
sizeof(tSirSmeRemoveKeyRsp));
#endif
if(psessionEntry != NULL)
{
pBuf = pSirSmeRemoveKeyRsp->peerMacAddr;
palCopyMemory( pMac->hHdd, pBuf, (tANI_U8 *) peerMacAddr, sizeof(tSirMacAddr));
pBuf += sizeof(tSirMacAddr);
limCopyU32(pBuf, resultCode);
}
pSirSmeRemoveKeyRsp->messageType = eWNI_SME_REMOVEKEY_RSP;
pSirSmeRemoveKeyRsp->length = sizeof(tSirSmeRemoveKeyRsp);
pSirSmeRemoveKeyRsp->statusCode = resultCode;
/* Update SME session and transaction Id*/
pSirSmeRemoveKeyRsp->sessionId = smesessionId;
pSirSmeRemoveKeyRsp->transactionId = smetransactionId;
mmhMsg.type = eWNI_SME_REMOVEKEY_RSP;
mmhMsg.bodyptr = pSirSmeRemoveKeyRsp;
mmhMsg.bodyval = 0;
MTRACE(macTraceMsgTx(pMac, psessionEntry->peSessionId, mmhMsg.type));
limSysProcessMmhMsgApi(pMac, &mmhMsg, ePROT);
} /*** end limSendSmeSetContextRsp() ***/
/**
* limSendSmePromiscuousModeRsp()
*
*FUNCTION:
* This function is called by limProcessSmeMessages() to send
* eWNI_PROMISCUOUS_MODE_RSP message to host
*
*PARAMS:
*
*LOGIC:
*
*ASSUMPTIONS:
* NA
*
*NOTE:
* This function is used for sending eWNI_SME_PROMISCUOUS_MODE_RSP to
* host as a reply to eWNI_SME_PROMISCUOUS_MODE_REQ directive from it.
*
* @param None
* @return None
*/
void
limSendSmePromiscuousModeRsp(tpAniSirGlobal pMac)
{
#if 0
tSirMsgQ mmhMsg;
tSirMbMsg *pMbMsg;
if( eHAL_STATUS_SUCCESS != palAllocateMemory( pMac->hHdd, (void **)&pMbMsg, sizeof(tSirMbMsg)))
{
// Log error
limLog(pMac, LOGP, FL("call to palAllocateMemory failed\n"));
return;
}
pMbMsg->type = eWNI_SME_PROMISCUOUS_MODE_RSP;
pMbMsg->msgLen = 4;
mmhMsg.type = eWNI_SME_PROMISCUOUS_MODE_RSP;
mmhMsg.bodyptr = pMbMsg;
mmhMsg.bodyval = 0;
MTRACE(macTraceMsgTx(pMac, 0, mmhMsg.type));
limSysProcessMmhMsgApi(pMac, &mmhMsg, ePROT);
#endif
} /*** end limSendSmePromiscuousModeRsp() ***/
/**
* limSendSmeNeighborBssInd()
*
*FUNCTION:
* This function is called by limLookupNaddHashEntry() to send
* eWNI_SME_NEIGHBOR_BSS_IND message to host
*
*PARAMS:
*
*LOGIC:
*
*ASSUMPTIONS:
* NA
*
*NOTE:
* This function is used for sending eWNI_SME_NEIGHBOR_BSS_IND to
* host upon detecting new BSS during background scanning if CFG
* option is enabled for sending such indication
*
* @param pMac - Pointer to Global MAC structure
* @return None
*/
void
limSendSmeNeighborBssInd(tpAniSirGlobal pMac,
tLimScanResultNode *pBssDescr)
{
tSirMsgQ msgQ;
tANI_U32 val;
tSirSmeNeighborBssInd *pNewBssInd;
if ((pMac->lim.gLimSmeState != eLIM_SME_LINK_EST_WT_SCAN_STATE) ||
((pMac->lim.gLimSmeState == eLIM_SME_LINK_EST_WT_SCAN_STATE) &&
pMac->lim.gLimRspReqd))
{
// LIM is not in background scan state OR
// current scan is initiated by HDD.
// No need to send new BSS indication to HDD
return;
}
if (wlan_cfgGetInt(pMac, WNI_CFG_NEW_BSS_FOUND_IND, &val) != eSIR_SUCCESS)
{
limLog(pMac, LOGP, FL("could not get NEIGHBOR_BSS_IND from CFG\n"));
return;
}
if (val == 0)
return;
/**
* Need to indicate new BSSs found during
* background scanning to host.
* Allocate buffer for sending indication.
* Length of buffer is length of BSS description
* and length of header itself
*/
val = pBssDescr->bssDescription.length + sizeof(tANI_U16) + sizeof(tANI_U32) + sizeof(tANI_U8);
if( eHAL_STATUS_SUCCESS != palAllocateMemory( pMac->hHdd, (void **)&pNewBssInd, val))
{
// Log error
limLog(pMac, LOGP,
FL("call to palAllocateMemory failed for eWNI_SME_NEIGHBOR_BSS_IND\n"));
return;
}
#if defined (ANI_PRODUCT_TYPE_AP) && defined (ANI_LITTLE_BYTE_ENDIAN)
sirStoreU16N((tANI_U8*) &pNewBssInd->messageType,
eWNI_SME_NEIGHBOR_BSS_IND);
sirStoreU16N((tANI_U8*)&pNewBssInd->length, (tANI_U16)val );
#else
pNewBssInd->messageType = eWNI_SME_NEIGHBOR_BSS_IND;
pNewBssInd->length = (tANI_U16) val;
#endif
pNewBssInd->sessionId = 0;
#if (WNI_POLARIS_FW_PRODUCT == WLAN_STA)
palCopyMemory( pMac->hHdd, (tANI_U8 *) pNewBssInd->bssDescription,
(tANI_U8 *) &pBssDescr->bssDescription,
pBssDescr->bssDescription.length + sizeof(tANI_U16));
#endif
msgQ.type = eWNI_SME_NEIGHBOR_BSS_IND;
msgQ.bodyptr = pNewBssInd;
msgQ.bodyval = 0;
MTRACE(macTraceMsgTx(pMac, NO_SESSION, msgQ.type));
limSysProcessMmhMsgApi(pMac, &msgQ, ePROT);
} /*** end limSendSmeNeighborBssInd() ***/
/** -----------------------------------------------------------------
\brief limSendSmeAddtsRsp() - sends SME ADDTS RSP
\ This function sends a eWNI_SME_ADDTS_RSP to SME.
\ SME only looks at rc and tspec field.
\param pMac - global mac structure
\param rspReqd - is SmeAddTsRsp required
\param status - status code of SME_ADD_TS_RSP
\return tspec
\sa
----------------------------------------------------------------- */
void
limSendSmeAddtsRsp(tpAniSirGlobal pMac, tANI_U8 rspReqd, tANI_U32 status, tpPESession psessionEntry,
tSirMacTspecIE tspec, tANI_U8 smesessionId, tANI_U16 smetransactionId)
{
tpSirAddtsRsp rsp;
tSirMsgQ mmhMsg;
if (! rspReqd)
return;
if( eHAL_STATUS_SUCCESS != palAllocateMemory( pMac->hHdd, (void **)&rsp, sizeof(tSirAddtsRsp)))
{
limLog(pMac, LOGP, FL("palAllocateMemory failed for ADDTS_RSP"));
return;
}
palZeroMemory( pMac->hHdd, (tANI_U8 *) rsp, sizeof(*rsp));
rsp->messageType = eWNI_SME_ADDTS_RSP;
rsp->rc = status;
rsp->rsp.status = (enum eSirMacStatusCodes) status;
//palCopyMemory( pMac->hHdd, (tANI_U8 *) &rsp->rsp.tspec, (tANI_U8 *) &addts->tspec, sizeof(addts->tspec));
rsp->rsp.tspec = tspec;
/* Update SME session Id and transcation Id */
rsp->sessionId = smesessionId;
rsp->transactionId = smetransactionId;
mmhMsg.type = eWNI_SME_ADDTS_RSP;
mmhMsg.bodyptr = rsp;
mmhMsg.bodyval = 0;
MTRACE(macTraceMsgTx(pMac, psessionEntry->peSessionId, mmhMsg.type));
#ifdef FEATURE_WLAN_DIAG_SUPPORT_LIM //FEATURE_WLAN_DIAG_SUPPORT
limDiagEventReport(pMac, WLAN_PE_DIAG_ADDTS_RSP_EVENT, psessionEntry, 0, 0);
#endif //FEATURE_WLAN_DIAG_SUPPORT
limSysProcessMmhMsgApi(pMac, &mmhMsg, ePROT);
return;
}
void
limSendSmeAddtsInd(tpAniSirGlobal pMac, tpSirAddtsReqInfo addts)
{
tpSirAddtsRsp rsp;
tSirMsgQ mmhMsg;
limLog(pMac, LOGW, "SendSmeAddtsInd (token %d, tsid %d, up %d)\n",
addts->dialogToken,
addts->tspec.tsinfo.traffic.tsid,
addts->tspec.tsinfo.traffic.userPrio);
if( eHAL_STATUS_SUCCESS != palAllocateMemory( pMac->hHdd, (void **)&rsp, sizeof(tSirAddtsRsp)))
{
// Log error
limLog(pMac, LOGP, FL("palAllocateMemory failed for ADDTS_IND\n"));
return;
}
palZeroMemory( pMac->hHdd, (tANI_U8 *) rsp, sizeof(*rsp));
rsp->messageType = eWNI_SME_ADDTS_IND;
palCopyMemory( pMac->hHdd, (tANI_U8 *) &rsp->rsp, (tANI_U8 *) addts, sizeof(*addts));
mmhMsg.type = eWNI_SME_ADDTS_IND;
mmhMsg.bodyptr = rsp;
mmhMsg.bodyval = 0;
MTRACE(macTraceMsgTx(pMac, NO_SESSION, mmhMsg.type));
limSysProcessMmhMsgApi(pMac, &mmhMsg, ePROT);
}
void
limSendSmeDeltsRsp(tpAniSirGlobal pMac, tpSirDeltsReq delts, tANI_U32 status,tpPESession psessionEntry,tANI_U8 smesessionId,tANI_U16 smetransactionId)
{
tpSirDeltsRsp rsp;
tSirMsgQ mmhMsg;
limLog(pMac, LOGW, "SendSmeDeltsRsp (aid %d, tsid %d, up %d) status %d\n",
delts->aid,
delts->req.tsinfo.traffic.tsid,
delts->req.tsinfo.traffic.userPrio,
status);
if (! delts->rspReqd)
return;
if( eHAL_STATUS_SUCCESS != palAllocateMemory( pMac->hHdd, (void **)&rsp, sizeof(tSirDeltsRsp)))
{
// Log error
limLog(pMac, LOGP, FL("palAllocateMemory failed for DELTS_RSP\n"));
return;
}
palZeroMemory( pMac->hHdd, (tANI_U8 *) rsp, sizeof(*rsp));
if(psessionEntry != NULL)
{
rsp->aid = delts->aid;
palCopyMemory( pMac->hHdd, (tANI_U8 *) &rsp->macAddr[0], (tANI_U8 *) &delts->macAddr[0], 6);
palCopyMemory( pMac->hHdd, (tANI_U8 *) &rsp->rsp, (tANI_U8 *) &delts->req, sizeof(tSirDeltsReqInfo));
}
rsp->messageType = eWNI_SME_DELTS_RSP;
rsp->rc = status;
/* Update SME session Id and transcation Id */
rsp->sessionId = smesessionId;
rsp->transactionId = smetransactionId;
mmhMsg.type = eWNI_SME_DELTS_RSP;
mmhMsg.bodyptr = rsp;
mmhMsg.bodyval = 0;
MTRACE(macTraceMsgTx(pMac, psessionEntry->peSessionId, mmhMsg.type));
#ifdef FEATURE_WLAN_DIAG_SUPPORT_LIM //FEATURE_WLAN_DIAG_SUPPORT
limDiagEventReport(pMac, WLAN_PE_DIAG_DELTS_RSP_EVENT, psessionEntry, (tANI_U16)status, 0);
#endif //FEATURE_WLAN_DIAG_SUPPORT
limSysProcessMmhMsgApi(pMac, &mmhMsg, ePROT);
}
void
limSendSmeDeltsInd(tpAniSirGlobal pMac, tpSirDeltsReqInfo delts, tANI_U16 aid,tpPESession psessionEntry)
{
tpSirDeltsRsp rsp;
tSirMsgQ mmhMsg;
limLog(pMac, LOGW, "SendSmeDeltsInd (aid %d, tsid %d, up %d)\n",
aid,
delts->tsinfo.traffic.tsid,
delts->tsinfo.traffic.userPrio);
if( eHAL_STATUS_SUCCESS != palAllocateMemory( pMac->hHdd, (void **)&rsp, sizeof(tSirDeltsRsp)))
{
// Log error
limLog(pMac, LOGP, FL("palAllocateMemory failed for DELTS_IND\n"));
return;
}
palZeroMemory( pMac->hHdd, (tANI_U8 *) rsp, sizeof(*rsp));
rsp->messageType = eWNI_SME_DELTS_IND;
rsp->rc = eSIR_SUCCESS;
rsp->aid = aid;
palCopyMemory( pMac->hHdd, (tANI_U8 *) &rsp->rsp, (tANI_U8 *) delts, sizeof(*delts));
/* Update SME session Id and SME transaction Id */
rsp->sessionId = psessionEntry->smeSessionId;
rsp->transactionId = psessionEntry->transactionId;
mmhMsg.type = eWNI_SME_DELTS_IND;
mmhMsg.bodyptr = rsp;
mmhMsg.bodyval = 0;
MTRACE(macTraceMsgTx(pMac, psessionEntry->peSessionId, mmhMsg.type));
#ifdef FEATURE_WLAN_DIAG_SUPPORT_LIM //FEATURE_WLAN_DIAG_SUPPORT
limDiagEventReport(pMac, WLAN_PE_DIAG_DELTS_IND_EVENT, psessionEntry, 0, 0);
#endif //FEATURE_WLAN_DIAG_SUPPORT
limSysProcessMmhMsgApi(pMac, &mmhMsg, ePROT);
}
/**
* limSendSmeStatsRsp()
*
*FUNCTION:
* This function is called to send 802.11 statistics response to HDD.
* This function posts the result back to HDD. This is a response to
* HDD's request for statistics.
*
*PARAMS:
*
*LOGIC:
*
*ASSUMPTIONS:
* NA
*
*NOTE:
* NA
*
* @param pMac Pointer to Global MAC structure
* @param p80211Stats Statistics sent in response
* @param resultCode TODO:
*
*
* @return none
*/
void
limSendSmeStatsRsp(tpAniSirGlobal pMac, tANI_U16 msgType, void* stats)
{
tSirMsgQ mmhMsg;
tSirSmeRsp *pMsgHdr = (tSirSmeRsp*) stats;
switch(msgType)
{
case WDA_STA_STAT_RSP:
mmhMsg.type = eWNI_SME_STA_STAT_RSP;
break;
case WDA_AGGR_STAT_RSP:
mmhMsg.type = eWNI_SME_AGGR_STAT_RSP;
break;
case WDA_GLOBAL_STAT_RSP:
mmhMsg.type = eWNI_SME_GLOBAL_STAT_RSP;
break;
case WDA_STAT_SUMM_RSP:
mmhMsg.type = eWNI_SME_STAT_SUMM_RSP;
break;
default:
mmhMsg.type = msgType; //Response from within PE
break;
}
pMsgHdr->messageType = mmhMsg.type;
mmhMsg.bodyptr = stats;
mmhMsg.bodyval = 0;
MTRACE(macTraceMsgTx(pMac, NO_SESSION, mmhMsg.type));
limSysProcessMmhMsgApi(pMac, &mmhMsg, ePROT);
return;
} /*** end limSendSmeStatsRsp() ***/
/**
* limSendSmePEStatisticsRsp()
*
*FUNCTION:
* This function is called to send 802.11 statistics response to HDD.
* This function posts the result back to HDD. This is a response to
* HDD's request for statistics.
*
*PARAMS:
*
*LOGIC:
*
*ASSUMPTIONS:
* NA
*
*NOTE:
* NA
*
* @param pMac Pointer to Global MAC structure
* @param p80211Stats Statistics sent in response
* @param resultCode TODO:
*
*
* @return none
*/
void
limSendSmePEStatisticsRsp(tpAniSirGlobal pMac, tANI_U16 msgType, void* stats)
{
tSirMsgQ mmhMsg;
tANI_U8 sessionId;
tAniGetPEStatsRsp *pPeStats = (tAniGetPEStatsRsp *) stats;
tpPESession pPeSessionEntry;
//Get the Session Id based on Sta Id
pPeSessionEntry = peFindSessionByStaId(pMac, pPeStats->staId, &sessionId);
//Fill the Session Id
if(NULL != pPeSessionEntry)
{
//Fill the Session Id
pPeStats->sessionId = pPeSessionEntry->smeSessionId;
}
pPeStats->msgType = eWNI_SME_GET_STATISTICS_RSP;
//msgType should be WDA_GET_STATISTICS_RSP
mmhMsg.type = eWNI_SME_GET_STATISTICS_RSP;
mmhMsg.bodyptr = stats;
mmhMsg.bodyval = 0;
MTRACE(macTraceMsgTx(pMac, NO_SESSION, mmhMsg.type));
limSysProcessMmhMsgApi(pMac, &mmhMsg, ePROT);
return;
} /*** end limSendSmePEStatisticsRsp() ***/
void
limSendSmeIBSSPeerInd(
tpAniSirGlobal pMac,
tSirMacAddr peerMacAddr,
tANI_U16 staIndex,
tANI_U8 ucastIdx,
tANI_U8 bcastIdx,
tANI_U8 *beacon,
tANI_U16 beaconLen,
tANI_U16 msgType,
tANI_U8 sessionId)
{
tSirMsgQ mmhMsg;
tSmeIbssPeerInd *pNewPeerInd;
if(eHAL_STATUS_SUCCESS !=
palAllocateMemory(pMac->hHdd,(void * *) &pNewPeerInd,(sizeof(tSmeIbssPeerInd) + beaconLen)))
{
PELOGE(limLog(pMac, LOGE, FL("Failed to allocate memory"));)
return;
}
palZeroMemory(pMac->hHdd, (void *) pNewPeerInd, (sizeof(tSmeIbssPeerInd) + beaconLen));
palCopyMemory( pMac->hHdd, (tANI_U8 *) pNewPeerInd->peerAddr,
peerMacAddr, sizeof(tSirMacAddr));
pNewPeerInd->staId= staIndex;
pNewPeerInd->ucastSig = ucastIdx;
pNewPeerInd->bcastSig = bcastIdx;
pNewPeerInd->mesgLen = sizeof(tSmeIbssPeerInd) + beaconLen;
pNewPeerInd->mesgType = msgType;
pNewPeerInd->sessionId = sessionId;
if ( beacon != NULL )
{
palCopyMemory(pMac->hHdd, (void*) ((tANI_U8*)pNewPeerInd+sizeof(tSmeIbssPeerInd)), (void*)beacon, beaconLen);
}
mmhMsg.type = msgType;
// mmhMsg.bodyval = (tANI_U32) pNewPeerInd;
mmhMsg.bodyptr = pNewPeerInd;
MTRACE(macTraceMsgTx(pMac, NO_SESSION, mmhMsg.type));
limSysProcessMmhMsgApi(pMac, &mmhMsg, ePROT);
}
/** -----------------------------------------------------------------
\brief limSendExitBmpsInd() - sends exit bmps indication
This function sends a eWNI_PMC_EXIT_BMPS_IND with a specific reason
code to SME. This will trigger SME to get out of BMPS mode.
\param pMac - global mac structure
\param reasonCode - reason for which PE wish to exit BMPS
\return none
\sa
----------------------------------------------------------------- */
void limSendExitBmpsInd(tpAniSirGlobal pMac, tExitBmpsReason reasonCode)
{
tSirMsgQ mmhMsg;
tANI_U16 msgLen = 0;
tpSirSmeExitBmpsInd pExitBmpsInd;
msgLen = sizeof(tSirSmeExitBmpsInd);
if( eHAL_STATUS_SUCCESS != palAllocateMemory( pMac->hHdd, (void **)&pExitBmpsInd, msgLen ))
{
limLog(pMac, LOGP, FL("palAllocateMemory failed for PMC_EXIT_BMPS_IND \n"));
return;
}
palZeroMemory(pMac->hHdd, pExitBmpsInd, msgLen);
#if defined (ANI_PRODUCT_TYPE_AP) && defined (ANI_LITTLE_BYTE_ENDIAN)
sirStoreU16N((tANI_U8*)&pExitBmpsInd->mesgType, eWNI_PMC_EXIT_BMPS_IND);
sirStoreU16N((tANI_U8*)&pExitBmpsInd->mesgLen, msgLen);
#else
pExitBmpsInd->mesgType = eWNI_PMC_EXIT_BMPS_IND;
pExitBmpsInd->mesgLen = msgLen;
#endif
pExitBmpsInd->exitBmpsReason = reasonCode;
pExitBmpsInd->statusCode = eSIR_SME_SUCCESS;
mmhMsg.type = eWNI_PMC_EXIT_BMPS_IND;
mmhMsg.bodyptr = pExitBmpsInd;
mmhMsg.bodyval = 0;
PELOG1(limLog(pMac, LOG1, FL("Sending eWNI_PMC_EXIT_BMPS_IND to SME. \n"));)
MTRACE(macTraceMsgTx(pMac, NO_SESSION, mmhMsg.type));
#ifdef FEATURE_WLAN_DIAG_SUPPORT_LIM //FEATURE_WLAN_DIAG_SUPPORT
limDiagEventReport(pMac, WLAN_PE_DIAG_EXIT_BMPS_IND_EVENT, peGetValidPowerSaveSession(pMac), 0, (tANI_U16)reasonCode);
#endif //FEATURE_WLAN_DIAG_SUPPORT
limSysProcessMmhMsgApi(pMac, &mmhMsg, ePROT);
return;
} /*** end limSendExitBmpsInd() ***/
/*--------------------------------------------------------------------------
\brief peDeleteSession() - Handle the Delete BSS Response from HAL.
\param pMac - pointer to global adapter context
\param sessionId - Message pointer.
\sa
--------------------------------------------------------------------------*/
void limHandleDeleteBssRsp(tpAniSirGlobal pMac,tpSirMsgQ MsgQ)
{
tpPESession psessionEntry;
tpDeleteBssParams pDelBss = (tpDeleteBssParams)(MsgQ->bodyptr);
if((psessionEntry = peFindSessionBySessionId(pMac,pDelBss->sessionId))==NULL)
{
limLog(pMac, LOGP,FL("Session Does not exist for given sessionID\n"));
return;
}
if (psessionEntry->limSystemRole == eLIM_STA_IN_IBSS_ROLE)
{
limIbssDelBssRsp(pMac, MsgQ->bodyptr,psessionEntry);
}
else if(psessionEntry->limSystemRole == eLIM_UNKNOWN_ROLE)
{
limProcessSmeDelBssRsp(pMac, MsgQ->bodyval,psessionEntry);
}
else
limProcessMlmDelBssRsp(pMac,MsgQ,psessionEntry);
}
#ifdef WLAN_FEATURE_VOWIFI_11R
/** -----------------------------------------------------------------
\brief limSendSmeAggrQosRsp() - sends SME FT AGGR QOS RSP
\ This function sends a eWNI_SME_FT_AGGR_QOS_RSP to SME.
\ SME only looks at rc and tspec field.
\param pMac - global mac structure
\param rspReqd - is SmeAddTsRsp required
\param status - status code of eWNI_SME_FT_AGGR_QOS_RSP
\return tspec
\sa
----------------------------------------------------------------- */
void
limSendSmeAggrQosRsp(tpAniSirGlobal pMac, tpSirAggrQosRsp aggrQosRsp,
tANI_U8 smesessionId)
{
tSirMsgQ mmhMsg;
mmhMsg.type = eWNI_SME_FT_AGGR_QOS_RSP;
mmhMsg.bodyptr = aggrQosRsp;
mmhMsg.bodyval = 0;
MTRACE(macTraceMsgTx(pMac, NO_SESSION, mmhMsg.type));
limSysProcessMmhMsgApi(pMac, &mmhMsg, ePROT);
return;
}
#endif
/** -----------------------------------------------------------------
\brief limSendSmePreChannelSwitchInd() - sends an indication to SME
before switching channels for spectrum manangement.
This function sends a eWNI_SME_PRE_SWITCH_CHL_IND to SME.
\param pMac - global mac structure
\return none
\sa
----------------------------------------------------------------- */
void
limSendSmePreChannelSwitchInd(tpAniSirGlobal pMac)
{
tSirMsgQ mmhMsg;
mmhMsg.type = eWNI_SME_PRE_SWITCH_CHL_IND;
mmhMsg.bodyptr = NULL;
mmhMsg.bodyval = 0;
MTRACE(macTraceMsgTx(pMac, NO_SESSION, mmhMsg.type));
limSysProcessMmhMsgApi(pMac, &mmhMsg, ePROT);
return;
}
/** -----------------------------------------------------------------
\brief limSendSmePostChannelSwitchInd() - sends an indication to SME
after channel switch for spectrum manangement is complete.
This function sends a eWNI_SME_POST_SWITCH_CHL_IND to SME.
\param pMac - global mac structure
\return none
\sa
----------------------------------------------------------------- */
void
limSendSmePostChannelSwitchInd(tpAniSirGlobal pMac)
{
tSirMsgQ mmhMsg;
mmhMsg.type = eWNI_SME_POST_SWITCH_CHL_IND;
mmhMsg.bodyptr = NULL;
mmhMsg.bodyval = 0;
MTRACE(macTraceMsgTx(pMac, NO_SESSION, mmhMsg.type));
limSysProcessMmhMsgApi(pMac, &mmhMsg, ePROT);
return;
}
void limSendSmeMaxAssocExceededNtf(tpAniSirGlobal pMac, tSirMacAddr peerMacAddr,
tANI_U8 smesessionId)
{
tSirMsgQ mmhMsg;
tSmeMaxAssocInd *pSmeMaxAssocInd;
if(eHAL_STATUS_SUCCESS !=
palAllocateMemory(pMac->hHdd,(void **)&pSmeMaxAssocInd, sizeof(tSmeMaxAssocInd)))
{
PELOGE(limLog(pMac, LOGE, FL("Failed to allocate memory"));)
return;
}
palZeroMemory(pMac->hHdd, (void *) pSmeMaxAssocInd, sizeof(tSmeMaxAssocInd));
palCopyMemory( pMac->hHdd, (tANI_U8 *)pSmeMaxAssocInd->peerMac,
(tANI_U8 *)peerMacAddr, sizeof(tSirMacAddr));
pSmeMaxAssocInd->mesgType = eWNI_SME_MAX_ASSOC_EXCEEDED;
pSmeMaxAssocInd->mesgLen = sizeof(tSmeMaxAssocInd);
pSmeMaxAssocInd->sessionId = smesessionId;
mmhMsg.type = pSmeMaxAssocInd->mesgType;
mmhMsg.bodyptr = pSmeMaxAssocInd;
PELOG1(limLog(pMac, LOG1, FL("msgType %s peerMacAddr %02x-%02x-%02x-%02x-%02x-%02x"
"sme session id %d\n"),"eWNI_SME_MAX_ASSOC_EXCEEDED", peerMacAddr[0], peerMacAddr[1],
peerMacAddr[2], peerMacAddr[3], peerMacAddr[4], peerMacAddr[5], smesessionId);)
MTRACE(macTraceMsgTx(pMac, NO_SESSION, mmhMsg.type));
limSysProcessMmhMsgApi(pMac, &mmhMsg, ePROT);
return;
}
| gpl-2.0 |
NeverLEX/linux | drivers/clk/clk-max77802.c | 387 | 2708 | /*
* clk-max77802.c - Clock driver for Maxim 77802
*
* Copyright (C) 2014 Google, Inc
*
* Copyright (C) 2012 Samsung Electornics
* Jonghwa Lee <jonghwa3.lee@samsung.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* This driver is based on clk-max77686.c
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mfd/max77686-private.h>
#include <linux/clk-provider.h>
#include <linux/mutex.h>
#include <linux/clkdev.h>
#include <dt-bindings/clock/maxim,max77802.h>
#include "clk-max-gen.h"
#define MAX77802_CLOCK_OPMODE_MASK 0x1
#define MAX77802_CLOCK_LOW_JITTER_SHIFT 0x3
static struct clk_init_data max77802_clks_init[MAX77802_CLKS_NUM] = {
[MAX77802_CLK_32K_AP] = {
.name = "32khz_ap",
.ops = &max_gen_clk_ops,
.flags = CLK_IS_ROOT,
},
[MAX77802_CLK_32K_CP] = {
.name = "32khz_cp",
.ops = &max_gen_clk_ops,
.flags = CLK_IS_ROOT,
},
};
static int max77802_clk_probe(struct platform_device *pdev)
{
struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent);
int ret;
ret = max_gen_clk_probe(pdev, iodev->regmap, MAX77802_REG_32KHZ,
max77802_clks_init, MAX77802_CLKS_NUM);
if (ret) {
dev_err(&pdev->dev, "generic probe failed %d\n", ret);
return ret;
}
/* Enable low-jitter mode on the 32khz clocks. */
ret = regmap_update_bits(iodev->regmap, MAX77802_REG_32KHZ,
1 << MAX77802_CLOCK_LOW_JITTER_SHIFT,
1 << MAX77802_CLOCK_LOW_JITTER_SHIFT);
if (ret < 0)
dev_err(&pdev->dev, "failed to enable low-jitter mode\n");
return ret;
}
static int max77802_clk_remove(struct platform_device *pdev)
{
return max_gen_clk_remove(pdev, MAX77802_CLKS_NUM);
}
static const struct platform_device_id max77802_clk_id[] = {
{ "max77802-clk", 0},
{ },
};
MODULE_DEVICE_TABLE(platform, max77802_clk_id);
static struct platform_driver max77802_clk_driver = {
.driver = {
.name = "max77802-clk",
},
.probe = max77802_clk_probe,
.remove = max77802_clk_remove,
.id_table = max77802_clk_id,
};
module_platform_driver(max77802_clk_driver);
MODULE_DESCRIPTION("MAXIM 77802 Clock Driver");
MODULE_AUTHOR("Javier Martinez Canillas <javier.martinez@collabora.co.uk>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
zaclimon/Quanta-Flo | arch/arm/mach-msm/qdsp5v2/audio_pcm.c | 643 | 44037 | /* arch/arm/mach-msm/qdsp5v2/audio_pcm.c
*
*
* Copyright (C) 2008 Google, Inc.
* Copyright (C) 2008 HTC Corporation
* Copyright (c) 2009-2012, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <asm/ioctls.h>
#include <asm/atomic.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/uaccess.h>
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/dma-mapping.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/earlysuspend.h>
#include <linux/list.h>
#include <linux/android_pmem.h>
#include <linux/memory_alloc.h>
#include <linux/slab.h>
#include <linux/msm_audio.h>
#include <mach/msm_adsp.h>
#include <mach/qdsp5v2/audio_dev_ctl.h>
#include <mach/iommu.h>
#include <mach/iommu_domains.h>
#include <mach/qdsp5v2/qdsp5audppcmdi.h>
#include <mach/qdsp5v2/qdsp5audppmsg.h>
#include <mach/qdsp5v2/qdsp5audplaycmdi.h>
#include <mach/qdsp5v2/qdsp5audplaymsg.h>
#include <mach/qdsp5v2/audpp.h>
#include <mach/debug_mm.h>
#include <mach/msm_memtypes.h>
#define ADRV_STATUS_AIO_INTF 0x00000001
#define ADRV_STATUS_OBUF_GIVEN 0x00000002
#define ADRV_STATUS_IBUF_GIVEN 0x00000004
#define ADRV_STATUS_FSYNC 0x00000008
/* Size must be power of 2 */
#define BUFSZ_MAX 32768
#define BUFSZ_MIN 4096
#define DMASZ_MAX (BUFSZ_MAX * 2)
#define DMASZ_MIN (BUFSZ_MIN * 2)
#define AUDDEC_DEC_PCM 0
/* Decoder status received from AUDPPTASK */
#define AUDPP_DEC_STATUS_SLEEP 0
#define AUDPP_DEC_STATUS_INIT 1
#define AUDPP_DEC_STATUS_CFG 2
#define AUDPP_DEC_STATUS_PLAY 3
#define AUDPCM_EVENT_NUM 10 /* Default number of pre-allocated event packets */
#define __CONTAINS(r, v, l) ({ \
typeof(r) __r = r; \
typeof(v) __v = v; \
typeof(v) __e = __v + l; \
int res = ((__v >= __r->vaddr) && \
(__e <= __r->vaddr + __r->len)); \
res; \
})
#define CONTAINS(r1, r2) ({ \
typeof(r2) __r2 = r2; \
__CONTAINS(r1, __r2->vaddr, __r2->len); \
})
#define IN_RANGE(r, v) ({ \
typeof(r) __r = r; \
typeof(v) __vv = v; \
int res = ((__vv >= __r->vaddr) && \
(__vv < (__r->vaddr + __r->len))); \
res; \
})
#define OVERLAPS(r1, r2) ({ \
typeof(r1) __r1 = r1; \
typeof(r2) __r2 = r2; \
typeof(__r2->vaddr) __v = __r2->vaddr; \
typeof(__v) __e = __v + __r2->len - 1; \
int res = (IN_RANGE(__r1, __v) || IN_RANGE(__r1, __e)); \
res; \
})
struct audio;
struct buffer {
void *data;
unsigned size;
unsigned used; /* Input usage actual DSP produced PCM size */
unsigned addr;
};
#ifdef CONFIG_HAS_EARLYSUSPEND
struct audpcm_suspend_ctl {
struct early_suspend node;
struct audio *audio;
};
#endif
struct audpcm_event {
struct list_head list;
int event_type;
union msm_audio_event_payload payload;
};
struct audpcm_pmem_region {
struct list_head list;
struct file *file;
int fd;
void *vaddr;
unsigned long paddr;
unsigned long kvaddr;
unsigned long len;
unsigned ref_cnt;
};
struct audpcm_buffer_node {
struct list_head list;
struct msm_audio_aio_buf buf;
unsigned long paddr;
};
struct audpcm_drv_operations {
void (*send_data)(struct audio *, unsigned);
void (*out_flush)(struct audio *);
int (*fsync)(struct audio *);
};
struct audio {
struct buffer out[2];
spinlock_t dsp_lock;
uint8_t out_head;
uint8_t out_tail;
uint8_t out_needed; /* number of buffers the dsp is waiting for */
unsigned out_dma_sz;
struct list_head out_queue; /* queue to retain output buffers */
atomic_t out_bytes;
struct mutex lock;
struct mutex write_lock;
wait_queue_head_t write_wait;
struct msm_adsp_module *audplay;
/* configuration to use on next enable */
uint32_t out_sample_rate;
uint32_t out_channel_mode;
uint32_t out_bits; /* bits per sample */
/* data allocated for various buffers */
char *data;
int32_t phys;
void *map_v_write;
uint32_t drv_status;
int wflush; /* Write flush */
int opened;
int enabled;
int running;
int stopped; /* set when stopped, cleared on flush */
int teos; /* valid only if tunnel mode & no data left for decoder */
enum msm_aud_decoder_state dec_state; /* Represents decoder state */
int reserved; /* A byte is being reserved */
char rsv_byte; /* Handle odd length user data */
const char *module_name;
unsigned queue_id;
uint32_t device_events;
unsigned volume;
uint16_t dec_id;
int16_t source;
#ifdef CONFIG_HAS_EARLYSUSPEND
struct audpcm_suspend_ctl suspend_ctl;
#endif
#ifdef CONFIG_DEBUG_FS
struct dentry *dentry;
#endif
wait_queue_head_t wait;
struct list_head free_event_queue;
struct list_head event_queue;
wait_queue_head_t event_wait;
spinlock_t event_queue_lock;
struct mutex get_event_lock;
int event_abort;
/* AV sync Info */
int avsync_flag; /* Flag to indicate feedback from DSP */
wait_queue_head_t avsync_wait;/* Wait queue for AV Sync Message */
/* flags, 48 bits sample/bytes counter per channel */
uint16_t avsync[AUDPP_AVSYNC_CH_COUNT * AUDPP_AVSYNC_NUM_WORDS + 1];
struct list_head pmem_region_queue;
struct audpcm_drv_operations drv_ops;
};
static int auddec_dsp_config(struct audio *audio, int enable);
static void audpp_cmd_cfg_adec_params(struct audio *audio);
static void audplay_send_data(struct audio *audio, unsigned needed);
static void audio_dsp_event(void *private, unsigned id, uint16_t *msg);
static void audpcm_post_event(struct audio *audio, int type,
union msm_audio_event_payload payload);
static unsigned long audpcm_pmem_fixup(struct audio *audio, void *addr,
unsigned long len, int ref_up);
static void pcm_listner(u32 evt_id, union auddev_evt_data *evt_payload,
void *private_data)
{
struct audio *audio = (struct audio *) private_data;
switch (evt_id) {
case AUDDEV_EVT_DEV_RDY:
MM_DBG("AUDDEV_EVT_DEV_RDY\n");
audio->source |= (0x1 << evt_payload->routing_id);
if (audio->running == 1 && audio->enabled == 1)
audpp_route_stream(audio->dec_id, audio->source);
break;
case AUDDEV_EVT_DEV_RLS:
MM_DBG("AUDDEV_EVT_DEV_RLS\n");
audio->source &= ~(0x1 << evt_payload->routing_id);
if (audio->running == 1 && audio->enabled == 1)
audpp_route_stream(audio->dec_id, audio->source);
break;
case AUDDEV_EVT_STREAM_VOL_CHG:
audio->volume = evt_payload->session_vol;
MM_DBG("AUDDEV_EVT_STREAM_VOL_CHG, stream vol %d\n",
audio->volume);
if (audio->running)
audpp_set_volume_and_pan(audio->dec_id, audio->volume,
0, POPP);
break;
default:
MM_ERR("ERROR:wrong event\n");
break;
}
}
/* must be called with audio->lock held */
static int audio_enable(struct audio *audio)
{
MM_DBG("\n"); /* Macro prints the file name and function */
if (audio->enabled)
return 0;
audio->dec_state = MSM_AUD_DECODER_STATE_NONE;
audio->out_tail = 0;
audio->out_needed = 0;
if (msm_adsp_enable(audio->audplay)) {
MM_ERR("msm_adsp_enable(audplay) failed\n");
return -ENODEV;
}
if (audpp_enable(audio->dec_id, audio_dsp_event, audio)) {
MM_ERR("audpp_enable() failed\n");
msm_adsp_disable(audio->audplay);
return -ENODEV;
}
audio->enabled = 1;
return 0;
}
/* must be called with audio->lock held */
static int audio_disable(struct audio *audio)
{
int rc = 0;
MM_DBG("\n"); /* Macro prints the file name and function */
if (audio->enabled) {
audio->enabled = 0;
audio->dec_state = MSM_AUD_DECODER_STATE_NONE;
auddec_dsp_config(audio, 0);
rc = wait_event_interruptible_timeout(audio->wait,
audio->dec_state != MSM_AUD_DECODER_STATE_NONE,
msecs_to_jiffies(MSM_AUD_DECODER_WAIT_MS));
if (rc == 0)
rc = -ETIMEDOUT;
else if (audio->dec_state != MSM_AUD_DECODER_STATE_CLOSE)
rc = -EFAULT;
else
rc = 0;
wake_up(&audio->write_wait);
msm_adsp_disable(audio->audplay);
audpp_disable(audio->dec_id, audio);
audio->out_needed = 0;
}
return rc;
}
/* ------------------- dsp --------------------- */
static void audplay_dsp_event(void *data, unsigned id, size_t len,
void (*getevent) (void *ptr, size_t len))
{
struct audio *audio = data;
uint32_t msg[28];
getevent(msg, sizeof(msg));
MM_DBG("msg_id=%x\n", id);
switch (id) {
case AUDPLAY_MSG_DEC_NEEDS_DATA:
audio->drv_ops.send_data(audio, 1);
break;
case ADSP_MESSAGE_ID:
MM_DBG("Received ADSP event:module audplaytask\n");
break;
default:
MM_ERR("unexpected message from decoder\n");
break;
}
}
static void audio_dsp_event(void *private, unsigned id, uint16_t *msg)
{
struct audio *audio = private;
switch (id) {
case AUDPP_MSG_STATUS_MSG:{
unsigned status = msg[1];
switch (status) {
case AUDPP_DEC_STATUS_SLEEP: {
uint16_t reason = msg[2];
MM_DBG("decoder status:sleep reason=0x%04x\n",
reason);
if ((reason == AUDPP_MSG_REASON_MEM)
|| (reason ==
AUDPP_MSG_REASON_NODECODER)) {
audio->dec_state =
MSM_AUD_DECODER_STATE_FAILURE;
wake_up(&audio->wait);
} else if (reason == AUDPP_MSG_REASON_NONE) {
/* decoder is in disable state */
audio->dec_state =
MSM_AUD_DECODER_STATE_CLOSE;
wake_up(&audio->wait);
}
break;
}
case AUDPP_DEC_STATUS_INIT:
MM_DBG("decoder status: init \n");
audpp_cmd_cfg_adec_params(audio);
break;
case AUDPP_DEC_STATUS_CFG:
MM_DBG("decoder status: cfg \n");
break;
case AUDPP_DEC_STATUS_PLAY:
MM_DBG("decoder status: play \n");
audpp_route_stream(audio->dec_id,
audio->source);
audio->dec_state =
MSM_AUD_DECODER_STATE_SUCCESS;
wake_up(&audio->wait);
break;
default:
MM_ERR("unknown decoder status\n");
break;
}
break;
}
case AUDPP_MSG_CFG_MSG:
if (msg[0] == AUDPP_MSG_ENA_ENA) {
MM_DBG("CFG_MSG ENABLE\n");
auddec_dsp_config(audio, 1);
audio->out_needed = 0;
audio->running = 1;
audpp_set_volume_and_pan(audio->dec_id, audio->volume,
0, POPP);
} else if (msg[0] == AUDPP_MSG_ENA_DIS) {
MM_DBG("CFG_MSG DISABLE\n");
audio->running = 0;
} else {
MM_ERR("audio_dsp_event: CFG_MSG %d?\n", msg[0]);
}
break;
case AUDPP_MSG_FLUSH_ACK:
MM_DBG("FLUSH_ACK\n");
audio->wflush = 0;
wake_up(&audio->write_wait);
break;
case AUDPP_MSG_PCMDMAMISSED:
MM_DBG("PCMDMAMISSED\n");
audio->teos = 1;
wake_up(&audio->write_wait);
break;
case AUDPP_MSG_AVSYNC_MSG:
pr_info("%s: AVSYNC_MSG\n", __func__);
memcpy(&audio->avsync[0], msg, sizeof(audio->avsync));
audio->avsync_flag = 1;
wake_up(&audio->avsync_wait);
break;
default:
MM_DBG("audio_dsp_event: UNKNOWN (%d)\n", id);
}
}
struct msm_adsp_ops audpcmdec_adsp_ops = {
.event = audplay_dsp_event,
};
#define audplay_send_queue0(audio, cmd, len) \
msm_adsp_write(audio->audplay, audio->queue_id, \
cmd, len)
static int auddec_dsp_config(struct audio *audio, int enable)
{
struct audpp_cmd_cfg_dec_type cfg_dec_cmd;
memset(&cfg_dec_cmd, 0, sizeof(cfg_dec_cmd));
cfg_dec_cmd.cmd_id = AUDPP_CMD_CFG_DEC_TYPE;
if (enable)
cfg_dec_cmd.dec_cfg = AUDPP_CMD_UPDATDE_CFG_DEC |
AUDPP_CMD_ENA_DEC_V | AUDDEC_DEC_PCM;
else
cfg_dec_cmd.dec_cfg = AUDPP_CMD_UPDATDE_CFG_DEC |
AUDPP_CMD_DIS_DEC_V;
cfg_dec_cmd.dm_mode = 0x0;
cfg_dec_cmd.stream_id = audio->dec_id;
return audpp_send_queue1(&cfg_dec_cmd, sizeof(cfg_dec_cmd));
}
static void audpp_cmd_cfg_adec_params(struct audio *audio)
{
struct audpp_cmd_cfg_adec_params_wav cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.common.cmd_id = AUDPP_CMD_CFG_ADEC_PARAMS;
cmd.common.length = AUDPP_CMD_CFG_ADEC_PARAMS_WAV_LEN >> 1;
cmd.common.dec_id = audio->dec_id;
cmd.common.input_sampling_frequency = audio->out_sample_rate;
cmd.stereo_cfg = audio->out_channel_mode;
cmd.pcm_width = audio->out_bits;
cmd.sign = 0;
audpp_send_queue2(&cmd, sizeof(cmd));
}
static int audplay_dsp_send_data_avail(struct audio *audio,
unsigned idx, unsigned len)
{
struct audplay_cmd_bitstream_data_avail cmd;
cmd.cmd_id = AUDPLAY_CMD_BITSTREAM_DATA_AVAIL;
cmd.decoder_id = audio->dec_id;
cmd.buf_ptr = audio->out[idx].addr;
cmd.buf_size = len/2;
cmd.partition_number = 0;
/* complete writes to the input buffer */
wmb();
return audplay_send_queue0(audio, &cmd, sizeof(cmd));
}
static void audpcm_async_send_data(struct audio *audio, unsigned needed)
{
unsigned long flags;
if (!audio->running)
return;
spin_lock_irqsave(&audio->dsp_lock, flags);
if (needed && !audio->wflush) {
audio->out_needed = 1;
if (audio->drv_status & ADRV_STATUS_OBUF_GIVEN) {
/* pop one node out of queue */
union msm_audio_event_payload payload;
struct audpcm_buffer_node *used_buf;
MM_DBG("consumed\n");
BUG_ON(list_empty(&audio->out_queue));
used_buf = list_first_entry(&audio->out_queue,
struct audpcm_buffer_node, list);
list_del(&used_buf->list);
payload.aio_buf = used_buf->buf;
audpcm_post_event(audio, AUDIO_EVENT_WRITE_DONE,
payload);
kfree(used_buf);
audio->drv_status &= ~ADRV_STATUS_OBUF_GIVEN;
}
}
if (audio->out_needed) {
struct audpcm_buffer_node *next_buf;
struct audplay_cmd_bitstream_data_avail cmd;
if (!list_empty(&audio->out_queue)) {
next_buf = list_first_entry(&audio->out_queue,
struct audpcm_buffer_node, list);
MM_DBG("next_buf %p\n", next_buf);
if (next_buf) {
MM_DBG("next buf phy %lx len %d\n",
next_buf->paddr, next_buf->buf.data_len);
cmd.cmd_id = AUDPLAY_CMD_BITSTREAM_DATA_AVAIL;
if (next_buf->buf.data_len)
cmd.decoder_id = audio->dec_id;
else {
cmd.decoder_id = -1;
MM_DBG("input EOS signaled\n");
}
cmd.buf_ptr = (unsigned) next_buf->paddr;
cmd.buf_size = next_buf->buf.data_len >> 1;
cmd.partition_number = 0;
/* complete writes to the input buffer */
wmb();
audplay_send_queue0(audio, &cmd, sizeof(cmd));
audio->out_needed = 0;
audio->drv_status |= ADRV_STATUS_OBUF_GIVEN;
}
}
}
spin_unlock_irqrestore(&audio->dsp_lock, flags);
}
static void audplay_send_data(struct audio *audio, unsigned needed)
{
struct buffer *frame;
unsigned long flags;
if (!audio->running)
return;
spin_lock_irqsave(&audio->dsp_lock, flags);
if (needed && !audio->wflush) {
/* We were called from the callback because the DSP
* requested more data. Note that the DSP does want
* more data, and if a buffer was in-flight, mark it
* as available (since the DSP must now be done with
* it).
*/
audio->out_needed = 1;
frame = audio->out + audio->out_tail;
if (frame->used == 0xffffffff) {
MM_DBG("frame %d free\n", audio->out_tail);
frame->used = 0;
audio->out_tail ^= 1;
wake_up(&audio->write_wait);
}
}
if (audio->out_needed) {
/* If the DSP currently wants data and we have a
* buffer available, we will send it and reset
* the needed flag. We'll mark the buffer as in-flight
* so that it won't be recycled until the next buffer
* is requested
*/
frame = audio->out + audio->out_tail;
if (frame->used) {
BUG_ON(frame->used == 0xffffffff);
MM_DBG("frame %d busy\n", audio->out_tail);
audplay_dsp_send_data_avail(audio, audio->out_tail,
frame->used);
frame->used = 0xffffffff;
audio->out_needed = 0;
}
}
spin_unlock_irqrestore(&audio->dsp_lock, flags);
}
/* ------------------- device --------------------- */
static void audpcm_async_flush(struct audio *audio)
{
struct audpcm_buffer_node *buf_node;
struct list_head *ptr, *next;
union msm_audio_event_payload payload;
MM_DBG("\n"); /* Macro prints the file name and function */
list_for_each_safe(ptr, next, &audio->out_queue) {
buf_node = list_entry(ptr, struct audpcm_buffer_node, list);
list_del(&buf_node->list);
payload.aio_buf = buf_node->buf;
audpcm_post_event(audio, AUDIO_EVENT_WRITE_DONE,
payload);
kfree(buf_node);
}
audio->drv_status &= ~ADRV_STATUS_OBUF_GIVEN;
audio->out_needed = 0;
atomic_set(&audio->out_bytes, 0);
}
static void audio_flush(struct audio *audio)
{
audio->out[0].used = 0;
audio->out[1].used = 0;
audio->out_head = 0;
audio->out_tail = 0;
audio->reserved = 0;
audio->out_needed = 0;
atomic_set(&audio->out_bytes, 0);
}
static void audio_ioport_reset(struct audio *audio)
{
if (audio->drv_status & ADRV_STATUS_AIO_INTF) {
/* If fsync is in progress, make sure
* return value of fsync indicates
* abort due to flush
*/
if (audio->drv_status & ADRV_STATUS_FSYNC) {
MM_DBG("fsync in progress\n");
wake_up(&audio->write_wait);
mutex_lock(&audio->write_lock);
audio->drv_ops.out_flush(audio);
mutex_unlock(&audio->write_lock);
} else
audio->drv_ops.out_flush(audio);
} else {
/* Make sure read/write thread are free from
* sleep and knowing that system is not able
* to process io request at the moment
*/
wake_up(&audio->write_wait);
mutex_lock(&audio->write_lock);
audio->drv_ops.out_flush(audio);
mutex_unlock(&audio->write_lock);
}
audio->avsync_flag = 1;
wake_up(&audio->avsync_wait);
}
static int audpcm_events_pending(struct audio *audio)
{
unsigned long flags;
int empty;
spin_lock_irqsave(&audio->event_queue_lock, flags);
empty = !list_empty(&audio->event_queue);
spin_unlock_irqrestore(&audio->event_queue_lock, flags);
return empty || audio->event_abort;
}
static void audpcm_reset_event_queue(struct audio *audio)
{
unsigned long flags;
struct audpcm_event *drv_evt;
struct list_head *ptr, *next;
spin_lock_irqsave(&audio->event_queue_lock, flags);
list_for_each_safe(ptr, next, &audio->event_queue) {
drv_evt = list_first_entry(&audio->event_queue,
struct audpcm_event, list);
list_del(&drv_evt->list);
kfree(drv_evt);
}
list_for_each_safe(ptr, next, &audio->free_event_queue) {
drv_evt = list_first_entry(&audio->free_event_queue,
struct audpcm_event, list);
list_del(&drv_evt->list);
kfree(drv_evt);
}
spin_unlock_irqrestore(&audio->event_queue_lock, flags);
return;
}
static long audpcm_process_event_req(struct audio *audio, void __user *arg)
{
long rc;
struct msm_audio_event usr_evt;
struct audpcm_event *drv_evt = NULL;
int timeout;
unsigned long flags;
if (copy_from_user(&usr_evt, arg, sizeof(struct msm_audio_event)))
return -EFAULT;
timeout = (int) usr_evt.timeout_ms;
if (timeout > 0) {
rc = wait_event_interruptible_timeout(
audio->event_wait, audpcm_events_pending(audio),
msecs_to_jiffies(timeout));
if (rc == 0)
return -ETIMEDOUT;
} else {
rc = wait_event_interruptible(
audio->event_wait, audpcm_events_pending(audio));
}
if (rc < 0)
return rc;
if (audio->event_abort) {
audio->event_abort = 0;
return -ENODEV;
}
spin_lock_irqsave(&audio->event_queue_lock, flags);
if (!list_empty(&audio->event_queue)) {
drv_evt = list_first_entry(&audio->event_queue,
struct audpcm_event, list);
list_del(&drv_evt->list);
}
if (drv_evt) {
usr_evt.event_type = drv_evt->event_type;
usr_evt.event_payload = drv_evt->payload;
list_add_tail(&drv_evt->list, &audio->free_event_queue);
} else
rc = -1;
spin_unlock_irqrestore(&audio->event_queue_lock, flags);
if (drv_evt && drv_evt->event_type == AUDIO_EVENT_WRITE_DONE) {
mutex_lock(&audio->lock);
audpcm_pmem_fixup(audio, drv_evt->payload.aio_buf.buf_addr,
drv_evt->payload.aio_buf.buf_len, 0);
mutex_unlock(&audio->lock);
}
if (!rc && copy_to_user(arg, &usr_evt, sizeof(usr_evt)))
rc = -EFAULT;
return rc;
}
static int audpcm_pmem_check(struct audio *audio,
void *vaddr, unsigned long len)
{
struct audpcm_pmem_region *region_elt;
struct audpcm_pmem_region t = { .vaddr = vaddr, .len = len };
list_for_each_entry(region_elt, &audio->pmem_region_queue, list) {
if (CONTAINS(region_elt, &t) || CONTAINS(&t, region_elt) ||
OVERLAPS(region_elt, &t)) {
MM_ERR("region (vaddr %p len %ld)"
" clashes with registered region"
" (vaddr %p paddr %p len %ld)\n",
vaddr, len,
region_elt->vaddr,
(void *)region_elt->paddr,
region_elt->len);
return -EINVAL;
}
}
return 0;
}
static int audpcm_pmem_add(struct audio *audio,
struct msm_audio_pmem_info *info)
{
unsigned long paddr, kvaddr, len;
struct file *file;
struct audpcm_pmem_region *region;
int rc = -EINVAL;
MM_DBG("\n"); /* Macro prints the file name and function */
region = kmalloc(sizeof(*region), GFP_KERNEL);
if (!region)
return -ENOMEM;
if (get_pmem_file(info->fd, &paddr, &kvaddr, &len, &file)) {
kfree(region);
return -EINVAL;
}
rc = audpcm_pmem_check(audio, info->vaddr, len);
if (rc < 0) {
put_pmem_file(file);
kfree(region);
return rc;
}
region->vaddr = info->vaddr;
region->fd = info->fd;
region->paddr = paddr;
region->kvaddr = kvaddr;
region->len = len;
region->file = file;
region->ref_cnt = 0;
MM_DBG("add region paddr %lx vaddr %p, len %lu\n", region->paddr,
region->vaddr, region->len);
list_add_tail(®ion->list, &audio->pmem_region_queue);
return rc;
}
static int audpcm_pmem_remove(struct audio *audio,
struct msm_audio_pmem_info *info)
{
struct audpcm_pmem_region *region;
struct list_head *ptr, *next;
int rc = -EINVAL;
MM_DBG("info fd %d vaddr %p\n", info->fd, info->vaddr);
list_for_each_safe(ptr, next, &audio->pmem_region_queue) {
region = list_entry(ptr, struct audpcm_pmem_region, list);
if ((region->fd == info->fd) &&
(region->vaddr == info->vaddr)) {
if (region->ref_cnt) {
MM_DBG("region %p in use ref_cnt %d\n", region,
region->ref_cnt);
break;
}
MM_DBG("remove region fd %d vaddr %p \n", info->fd,
info->vaddr);
list_del(®ion->list);
put_pmem_file(region->file);
kfree(region);
rc = 0;
break;
}
}
return rc;
}
static int audpcm_pmem_lookup_vaddr(struct audio *audio, void *addr,
unsigned long len, struct audpcm_pmem_region **region)
{
struct audpcm_pmem_region *region_elt;
int match_count = 0;
*region = NULL;
/* returns physical address or zero */
list_for_each_entry(region_elt, &audio->pmem_region_queue,
list) {
if (addr >= region_elt->vaddr &&
addr < region_elt->vaddr + region_elt->len &&
addr + len <= region_elt->vaddr + region_elt->len) {
/* offset since we could pass vaddr inside a registerd
* pmem buffer
*/
match_count++;
if (!*region)
*region = region_elt;
}
}
if (match_count > 1) {
MM_ERR("multiple hits for vaddr %p, len %ld\n", addr, len);
list_for_each_entry(region_elt,
&audio->pmem_region_queue, list) {
if (addr >= region_elt->vaddr &&
addr < region_elt->vaddr + region_elt->len &&
addr + len <= region_elt->vaddr + region_elt->len)
MM_ERR("\t%p, %ld --> %p\n",
region_elt->vaddr,
region_elt->len,
(void *)region_elt->paddr);
}
}
return *region ? 0 : -1;
}
static unsigned long audpcm_pmem_fixup(struct audio *audio, void *addr,
unsigned long len, int ref_up)
{
struct audpcm_pmem_region *region;
unsigned long paddr;
int ret;
ret = audpcm_pmem_lookup_vaddr(audio, addr, len, ®ion);
if (ret) {
MM_ERR("lookup (%p, %ld) failed\n", addr, len);
return 0;
}
if (ref_up)
region->ref_cnt++;
else
region->ref_cnt--;
MM_DBG("found region %p ref_cnt %d\n", region, region->ref_cnt);
paddr = region->paddr + (addr - region->vaddr);
return paddr;
}
/* audio -> lock must be held at this point */
static int audpcm_aio_buf_add(struct audio *audio, unsigned dir,
void __user *arg)
{
unsigned long flags;
struct audpcm_buffer_node *buf_node;
buf_node = kmalloc(sizeof(*buf_node), GFP_KERNEL);
if (!buf_node)
return -ENOMEM;
if (copy_from_user(&buf_node->buf, arg, sizeof(buf_node->buf))) {
kfree(buf_node);
return -EFAULT;
}
MM_DBG("node %p dir %x buf_addr %p buf_len %d data_len %d\n",
buf_node, dir, buf_node->buf.buf_addr,
buf_node->buf.buf_len, buf_node->buf.data_len);
buf_node->paddr = audpcm_pmem_fixup(
audio, buf_node->buf.buf_addr,
buf_node->buf.buf_len, 1);
if (dir) {
/* write */
if (!buf_node->paddr ||
(buf_node->paddr & 0x1) ||
(buf_node->buf.data_len & 0x1) ||
(!buf_node->buf.data_len)) {
kfree(buf_node);
return -EINVAL;
}
spin_lock_irqsave(&audio->dsp_lock, flags);
list_add_tail(&buf_node->list, &audio->out_queue);
spin_unlock_irqrestore(&audio->dsp_lock, flags);
audio->drv_ops.send_data(audio, 0);
}
MM_DBG("Add buf_node %p paddr %lx\n", buf_node, buf_node->paddr);
return 0;
}
static int audio_get_avsync_data(struct audio *audio,
struct msm_audio_stats *stats)
{
int rc = -EINVAL;
unsigned long flags;
local_irq_save(flags);
if (audio->dec_id == audio->avsync[0] && audio->avsync_flag) {
/* av_sync sample count */
stats->sample_count = (audio->avsync[2] << 16) |
(audio->avsync[3]);
/* av_sync byte_count */
stats->byte_count = (audio->avsync[5] << 16) |
(audio->avsync[6]);
audio->avsync_flag = 0;
rc = 0;
}
local_irq_restore(flags);
return rc;
}
static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct audio *audio = file->private_data;
int rc = 0;
MM_DBG("cmd = %d\n", cmd);
if (cmd == AUDIO_GET_STATS) {
struct msm_audio_stats stats;
audio->avsync_flag = 0;
memset(&stats, 0, sizeof(stats));
if (audpp_query_avsync(audio->dec_id) < 0)
return rc;
rc = wait_event_interruptible_timeout(audio->avsync_wait,
(audio->avsync_flag == 1),
msecs_to_jiffies(AUDPP_AVSYNC_EVENT_TIMEOUT));
if (rc < 0)
return rc;
else if ((rc > 0) || ((rc == 0) && (audio->avsync_flag == 1))) {
if (audio_get_avsync_data(audio, &stats) < 0)
return rc;
if (copy_to_user((void *)arg, &stats, sizeof(stats)))
return -EFAULT;
return 0;
} else
return -EAGAIN;
}
if (cmd == AUDIO_SET_VOLUME) {
unsigned long flags;
spin_lock_irqsave(&audio->dsp_lock, flags);
audio->volume = arg;
if (audio->running)
audpp_set_volume_and_pan(audio->dec_id, arg, 0,
POPP);
spin_unlock_irqrestore(&audio->dsp_lock, flags);
return 0;
}
if (cmd == AUDIO_GET_EVENT) {
MM_DBG("AUDIO_GET_EVENT\n");
if (mutex_trylock(&audio->get_event_lock)) {
rc = audpcm_process_event_req(audio,
(void __user *) arg);
mutex_unlock(&audio->get_event_lock);
} else
rc = -EBUSY;
return rc;
}
if (cmd == AUDIO_ABORT_GET_EVENT) {
audio->event_abort = 1;
wake_up(&audio->event_wait);
return 0;
}
mutex_lock(&audio->lock);
switch (cmd) {
case AUDIO_START:
MM_DBG("AUDIO_START\n");
rc = audio_enable(audio);
if (!rc) {
rc = wait_event_interruptible_timeout(audio->wait,
audio->dec_state != MSM_AUD_DECODER_STATE_NONE,
msecs_to_jiffies(MSM_AUD_DECODER_WAIT_MS));
MM_INFO("dec_state %d rc = %d\n", audio->dec_state, rc);
if (audio->dec_state != MSM_AUD_DECODER_STATE_SUCCESS)
rc = -ENODEV;
else
rc = 0;
}
break;
case AUDIO_STOP:
MM_DBG("AUDIO_STOP\n");
rc = audio_disable(audio);
audio->stopped = 1;
audio_ioport_reset(audio);
audio->stopped = 0;
break;
case AUDIO_FLUSH:
MM_DBG("AUDIO_FLUSH\n");
audio->wflush = 1;
audio_ioport_reset(audio);
if (audio->running) {
audpp_flush(audio->dec_id);
rc = wait_event_interruptible(audio->write_wait,
!audio->wflush);
if (rc < 0) {
MM_ERR("AUDIO_FLUSH interrupted\n");
rc = -EINTR;
}
} else {
audio->wflush = 0;
}
break;
case AUDIO_SET_CONFIG: {
struct msm_audio_config config;
if (copy_from_user(&config, (void *) arg, sizeof(config))) {
rc = -EFAULT;
break;
}
if (config.channel_count == 1) {
config.channel_count = AUDPP_CMD_PCM_INTF_MONO_V;
} else if (config.channel_count == 2) {
config.channel_count = AUDPP_CMD_PCM_INTF_STEREO_V;
} else {
rc = -EINVAL;
break;
}
if (config.bits == 8)
config.bits = AUDPP_CMD_WAV_PCM_WIDTH_8;
else if (config.bits == 16)
config.bits = AUDPP_CMD_WAV_PCM_WIDTH_16;
else if (config.bits == 24)
config.bits = AUDPP_CMD_WAV_PCM_WIDTH_24;
else {
rc = -EINVAL;
break;
}
audio->out_sample_rate = config.sample_rate;
audio->out_channel_mode = config.channel_count;
audio->out_bits = config.bits;
break;
}
case AUDIO_GET_CONFIG: {
struct msm_audio_config config;
config.buffer_size = (audio->out_dma_sz >> 1);
config.buffer_count = 2;
config.sample_rate = audio->out_sample_rate;
if (audio->out_channel_mode == AUDPP_CMD_PCM_INTF_MONO_V)
config.channel_count = 1;
else
config.channel_count = 2;
if (audio->out_bits == AUDPP_CMD_WAV_PCM_WIDTH_8)
config.bits = 8;
else if (audio->out_bits == AUDPP_CMD_WAV_PCM_WIDTH_24)
config.bits = 24;
else
config.bits = 16;
config.unused[0] = 0;
config.unused[1] = 0;
if (copy_to_user((void *) arg, &config, sizeof(config)))
rc = -EFAULT;
else
rc = 0;
break;
}
case AUDIO_PAUSE:
MM_DBG("AUDIO_PAUSE %ld\n", arg);
rc = audpp_pause(audio->dec_id, (int) arg);
break;
case AUDIO_REGISTER_PMEM: {
struct msm_audio_pmem_info info;
MM_DBG("AUDIO_REGISTER_PMEM\n");
if (copy_from_user(&info, (void *) arg, sizeof(info)))
rc = -EFAULT;
else
rc = audpcm_pmem_add(audio, &info);
break;
}
case AUDIO_DEREGISTER_PMEM: {
struct msm_audio_pmem_info info;
MM_DBG("AUDIO_DEREGISTER_PMEM\n");
if (copy_from_user(&info, (void *) arg, sizeof(info)))
rc = -EFAULT;
else
rc = audpcm_pmem_remove(audio, &info);
break;
}
case AUDIO_ASYNC_WRITE:
if (audio->drv_status & ADRV_STATUS_FSYNC)
rc = -EBUSY;
else
rc = audpcm_aio_buf_add(audio, 1, (void __user *) arg);
break;
case AUDIO_ASYNC_READ:
MM_ERR("AUDIO_ASYNC_READ not supported\n");
rc = -EPERM;
break;
case AUDIO_GET_SESSION_ID:
if (copy_to_user((void *) arg, &audio->dec_id,
sizeof(unsigned short)))
return -EFAULT;
break;
default:
rc = -EINVAL;
}
mutex_unlock(&audio->lock);
return rc;
}
/* Only useful in tunnel-mode */
int audpcm_async_fsync(struct audio *audio)
{
int rc = 0;
MM_DBG("\n"); /* Macro prints the file name and function */
/* Blocking client sends more data */
mutex_lock(&audio->lock);
audio->drv_status |= ADRV_STATUS_FSYNC;
mutex_unlock(&audio->lock);
mutex_lock(&audio->write_lock);
/* pcm dmamiss message is sent continously
* when decoder is starved so no race
* condition concern
*/
audio->teos = 0;
rc = wait_event_interruptible(audio->write_wait,
(audio->teos && audio->out_needed &&
list_empty(&audio->out_queue))
|| audio->wflush || audio->stopped);
if (audio->stopped || audio->wflush)
rc = -EBUSY;
mutex_unlock(&audio->write_lock);
mutex_lock(&audio->lock);
audio->drv_status &= ~ADRV_STATUS_FSYNC;
mutex_unlock(&audio->lock);
return rc;
}
int audpcm_sync_fsync(struct audio *audio)
{
struct buffer *frame;
int rc = 0;
MM_DBG("\n"); /* Macro prints the file name and function */
mutex_lock(&audio->write_lock);
rc = wait_event_interruptible(audio->write_wait,
(!audio->out[0].used &&
!audio->out[1].used &&
audio->out_needed) || audio->wflush);
if (rc < 0)
goto done;
else if (audio->wflush) {
rc = -EBUSY;
goto done;
}
if (audio->reserved) {
MM_DBG("send reserved byte\n");
frame = audio->out + audio->out_tail;
((char *) frame->data)[0] = audio->rsv_byte;
((char *) frame->data)[1] = 0;
frame->used = 2;
audio->drv_ops.send_data(audio, 0);
rc = wait_event_interruptible(audio->write_wait,
(!audio->out[0].used &&
!audio->out[1].used &&
audio->out_needed) || audio->wflush);
if (rc < 0)
goto done;
else if (audio->wflush) {
rc = -EBUSY;
goto done;
}
}
/* pcm dmamiss message is sent continously
* when decoder is starved so no race
* condition concern
*/
audio->teos = 0;
rc = wait_event_interruptible(audio->write_wait,
audio->teos || audio->wflush);
if (audio->wflush)
rc = -EBUSY;
done:
mutex_unlock(&audio->write_lock);
return rc;
}
int audpcm_fsync(struct file *file, loff_t ppos1, loff_t ppos2, int datasync)
{
struct audio *audio = file->private_data;
if (!audio->running)
return -EINVAL;
return audio->drv_ops.fsync(audio);
}
static ssize_t audio_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
struct audio *audio = file->private_data;
const char __user *start = buf;
struct buffer *frame;
size_t xfer;
char *cpy_ptr;
int rc = 0;
unsigned dsize;
if (audio->drv_status & ADRV_STATUS_AIO_INTF)
return -EPERM;
MM_DBG("cnt=%d\n", count);
mutex_lock(&audio->write_lock);
while (count > 0) {
frame = audio->out + audio->out_head;
cpy_ptr = frame->data;
dsize = 0;
rc = wait_event_interruptible(audio->write_wait,
(frame->used == 0)
|| (audio->stopped)
|| (audio->wflush));
if (rc < 0)
break;
if (audio->stopped || audio->wflush) {
rc = -EBUSY;
break;
}
if (audio->reserved) {
MM_DBG("append reserved byte %x\n", audio->rsv_byte);
*cpy_ptr = audio->rsv_byte;
xfer = (count > (frame->size - 1)) ?
frame->size - 1 : count;
cpy_ptr++;
dsize = 1;
audio->reserved = 0;
} else
xfer = (count > frame->size) ? frame->size : count;
if (copy_from_user(cpy_ptr, buf, xfer)) {
rc = -EFAULT;
break;
}
dsize += xfer;
if (dsize & 1) {
audio->rsv_byte = ((char *) frame->data)[dsize - 1];
MM_DBG("odd length buf reserve last byte %x\n",
audio->rsv_byte);
audio->reserved = 1;
dsize--;
}
count -= xfer;
buf += xfer;
if (dsize > 0) {
audio->out_head ^= 1;
frame->used = dsize;
audio->drv_ops.send_data(audio, 0);
}
}
mutex_unlock(&audio->write_lock);
if (buf > start)
return buf - start;
return rc;
}
static void audpcm_reset_pmem_region(struct audio *audio)
{
struct audpcm_pmem_region *region;
struct list_head *ptr, *next;
list_for_each_safe(ptr, next, &audio->pmem_region_queue) {
region = list_entry(ptr, struct audpcm_pmem_region, list);
list_del(®ion->list);
put_pmem_file(region->file);
kfree(region);
}
return;
}
static int audio_release(struct inode *inode, struct file *file)
{
struct audio *audio = file->private_data;
MM_INFO("audio instance 0x%08x freeing\n", (int)audio);
mutex_lock(&audio->lock);
auddev_unregister_evt_listner(AUDDEV_CLNT_DEC, audio->dec_id);
audio_disable(audio);
audio->drv_ops.out_flush(audio);
audpcm_reset_pmem_region(audio);
msm_adsp_put(audio->audplay);
audpp_adec_free(audio->dec_id);
#ifdef CONFIG_HAS_EARLYSUSPEND
unregister_early_suspend(&audio->suspend_ctl.node);
#endif
audio->opened = 0;
audio->event_abort = 1;
wake_up(&audio->event_wait);
audpcm_reset_event_queue(audio);
if (audio->data) {
iounmap(audio->map_v_write);
free_contiguous_memory_by_paddr(audio->phys);
}
mutex_unlock(&audio->lock);
#ifdef CONFIG_DEBUG_FS
if (audio->dentry)
debugfs_remove(audio->dentry);
#endif
kfree(audio);
return 0;
}
static void audpcm_post_event(struct audio *audio, int type,
union msm_audio_event_payload payload)
{
struct audpcm_event *e_node = NULL;
unsigned long flags;
spin_lock_irqsave(&audio->event_queue_lock, flags);
if (!list_empty(&audio->free_event_queue)) {
e_node = list_first_entry(&audio->free_event_queue,
struct audpcm_event, list);
list_del(&e_node->list);
} else {
e_node = kmalloc(sizeof(struct audpcm_event), GFP_ATOMIC);
if (!e_node) {
MM_ERR("No mem to post event %d\n", type);
return;
}
}
e_node->event_type = type;
e_node->payload = payload;
list_add_tail(&e_node->list, &audio->event_queue);
spin_unlock_irqrestore(&audio->event_queue_lock, flags);
wake_up(&audio->event_wait);
}
#ifdef CONFIG_HAS_EARLYSUSPEND
static void audpcm_suspend(struct early_suspend *h)
{
struct audpcm_suspend_ctl *ctl =
container_of(h, struct audpcm_suspend_ctl, node);
union msm_audio_event_payload payload;
MM_DBG("\n"); /* Macro prints the file name and function */
audpcm_post_event(ctl->audio, AUDIO_EVENT_SUSPEND, payload);
}
static void audpcm_resume(struct early_suspend *h)
{
struct audpcm_suspend_ctl *ctl =
container_of(h, struct audpcm_suspend_ctl, node);
union msm_audio_event_payload payload;
MM_DBG("\n"); /* Macro prints the file name and function */
audpcm_post_event(ctl->audio, AUDIO_EVENT_RESUME, payload);
}
#endif
#ifdef CONFIG_DEBUG_FS
static ssize_t audpcm_debug_open(struct inode *inode, struct file *file)
{
file->private_data = inode->i_private;
return 0;
}
static ssize_t audpcm_debug_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
const int debug_bufmax = 4096;
static char buffer[4096];
int n = 0;
struct audio *audio = file->private_data;
mutex_lock(&audio->lock);
n = scnprintf(buffer, debug_bufmax, "opened %d\n", audio->opened);
n += scnprintf(buffer + n, debug_bufmax - n,
"enabled %d\n", audio->enabled);
n += scnprintf(buffer + n, debug_bufmax - n,
"stopped %d\n", audio->stopped);
n += scnprintf(buffer + n, debug_bufmax - n,
"out_buf_sz %d\n", audio->out[0].size);
n += scnprintf(buffer + n, debug_bufmax - n,
"volume %x \n", audio->volume);
n += scnprintf(buffer + n, debug_bufmax - n,
"sample rate %d \n", audio->out_sample_rate);
n += scnprintf(buffer + n, debug_bufmax - n,
"channel mode %d \n", audio->out_channel_mode);
mutex_unlock(&audio->lock);
/* Following variables are only useful for debugging when
* when playback halts unexpectedly. Thus, no mutual exclusion
* enforced
*/
n += scnprintf(buffer + n, debug_bufmax - n,
"wflush %d\n", audio->wflush);
n += scnprintf(buffer + n, debug_bufmax - n,
"running %d \n", audio->running);
n += scnprintf(buffer + n, debug_bufmax - n,
"dec state %d \n", audio->dec_state);
n += scnprintf(buffer + n, debug_bufmax - n,
"out_needed %d \n", audio->out_needed);
n += scnprintf(buffer + n, debug_bufmax - n,
"out_head %d \n", audio->out_head);
n += scnprintf(buffer + n, debug_bufmax - n,
"out_tail %d \n", audio->out_tail);
n += scnprintf(buffer + n, debug_bufmax - n,
"out[0].used %d \n", audio->out[0].used);
n += scnprintf(buffer + n, debug_bufmax - n,
"out[1].used %d \n", audio->out[1].used);
buffer[n] = 0;
return simple_read_from_buffer(buf, count, ppos, buffer, n);
}
static const struct file_operations audpcm_debug_fops = {
.read = audpcm_debug_read,
.open = audpcm_debug_open,
};
#endif
static int audio_open(struct inode *inode, struct file *file)
{
struct audio *audio = NULL;
int rc, i, dec_attrb, decid;
struct audpcm_event *e_node = NULL;
unsigned pmem_sz = DMASZ_MAX;
#ifdef CONFIG_DEBUG_FS
/* 4 bytes represents decoder number, 1 byte for terminate string */
char name[sizeof "msm_pcm_dec_" + 5];
#endif
/* Allocate audio instance, set to zero */
audio = kzalloc(sizeof(struct audio), GFP_KERNEL);
if (!audio) {
MM_ERR("no memory to allocate audio instance \n");
rc = -ENOMEM;
goto done;
}
MM_INFO("audio instance 0x%08x created\n", (int)audio);
/* Allocate the decoder */
dec_attrb = AUDDEC_DEC_PCM;
if (file->f_mode & FMODE_READ) {
MM_ERR("Non-Tunneled mode not supported\n");
rc = -EPERM;
kfree(audio);
goto done;
} else
dec_attrb |= MSM_AUD_MODE_TUNNEL;
decid = audpp_adec_alloc(dec_attrb, &audio->module_name,
&audio->queue_id);
if (decid < 0) {
MM_ERR("No free decoder available, freeing instance 0x%08x\n",
(int)audio);
rc = -ENODEV;
kfree(audio);
goto done;
}
audio->dec_id = decid & MSM_AUD_DECODER_MASK;
/* AIO interface */
if (file->f_flags & O_NONBLOCK) {
MM_DBG("set to aio interface\n");
audio->drv_status |= ADRV_STATUS_AIO_INTF;
audio->drv_ops.send_data = audpcm_async_send_data;
audio->drv_ops.out_flush = audpcm_async_flush;
audio->drv_ops.fsync = audpcm_async_fsync;
} else {
MM_DBG("set to std io interface\n");
while (pmem_sz >= DMASZ_MIN) {
MM_DBG("pmemsz = %d\n", pmem_sz);
audio->phys = allocate_contiguous_ebi_nomap(pmem_sz,
SZ_4K);
if (audio->phys) {
audio->map_v_write = ioremap(
audio->phys, pmem_sz);
if (IS_ERR(audio->map_v_write)) {
MM_ERR("could not map write phys\
address freeing instance \
0x%08x\n", (int)audio);
rc = -ENOMEM;
free_contiguous_memory_by_paddr(
audio->phys);
audpp_adec_free(audio->dec_id);
kfree(audio);
goto done;
}
audio->data = audio->map_v_write;
MM_DBG("write buf: phy addr 0x%08x \
kernel addr 0x%08x\n",
audio->phys, (int)audio->data);
break;
} else if (pmem_sz == DMASZ_MIN) {
MM_ERR("could not allocate write buffers \
freeing instance 0x%08x\n", (int)audio);
rc = -ENOMEM;
audpp_adec_free(audio->dec_id);
kfree(audio);
goto done;
} else
pmem_sz >>= 1;
}
audio->out_dma_sz = pmem_sz;
audio->drv_ops.send_data = audplay_send_data;
audio->drv_ops.out_flush = audio_flush;
audio->drv_ops.fsync = audpcm_sync_fsync;
audio->out[0].data = audio->data + 0;
audio->out[0].addr = audio->phys + 0;
audio->out[0].size = (audio->out_dma_sz >> 1);
audio->out[1].data = audio->data + audio->out[0].size;
audio->out[1].addr = audio->phys + audio->out[0].size;
audio->out[1].size = audio->out[0].size;
}
rc = msm_adsp_get(audio->module_name, &audio->audplay,
&audpcmdec_adsp_ops, audio);
if (rc) {
MM_ERR("failed to get %s module, freeing instance 0x%08x\n",
audio->module_name, (int)audio);
goto err;
}
/* Initialize all locks of audio instance */
mutex_init(&audio->lock);
mutex_init(&audio->write_lock);
mutex_init(&audio->get_event_lock);
spin_lock_init(&audio->dsp_lock);
init_waitqueue_head(&audio->write_wait);
INIT_LIST_HEAD(&audio->out_queue);
INIT_LIST_HEAD(&audio->pmem_region_queue);
INIT_LIST_HEAD(&audio->free_event_queue);
INIT_LIST_HEAD(&audio->event_queue);
init_waitqueue_head(&audio->wait);
init_waitqueue_head(&audio->event_wait);
spin_lock_init(&audio->event_queue_lock);
init_waitqueue_head(&audio->avsync_wait);
audio->out_sample_rate = 44100;
audio->out_channel_mode = AUDPP_CMD_PCM_INTF_STEREO_V;
audio->out_bits = AUDPP_CMD_WAV_PCM_WIDTH_16;
audio->volume = 0x7FFF;
audio->drv_ops.out_flush(audio);
file->private_data = audio;
audio->opened = 1;
audio->device_events = AUDDEV_EVT_DEV_RDY
|AUDDEV_EVT_DEV_RLS|
AUDDEV_EVT_STREAM_VOL_CHG;
rc = auddev_register_evt_listner(audio->device_events,
AUDDEV_CLNT_DEC,
audio->dec_id,
pcm_listner,
(void *)audio);
if (rc) {
MM_ERR("failed to register listnet\n");
goto event_err;
}
#ifdef CONFIG_DEBUG_FS
snprintf(name, sizeof name, "msm_pcm_dec_%04x", audio->dec_id);
audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
NULL, (void *) audio, &audpcm_debug_fops);
if (IS_ERR(audio->dentry))
MM_ERR("debugfs_create_file failed\n");
#endif
#ifdef CONFIG_HAS_EARLYSUSPEND
audio->suspend_ctl.node.level = EARLY_SUSPEND_LEVEL_DISABLE_FB;
audio->suspend_ctl.node.resume = audpcm_resume;
audio->suspend_ctl.node.suspend = audpcm_suspend;
audio->suspend_ctl.audio = audio;
register_early_suspend(&audio->suspend_ctl.node);
#endif
for (i = 0; i < AUDPCM_EVENT_NUM; i++) {
e_node = kmalloc(sizeof(struct audpcm_event), GFP_KERNEL);
if (e_node)
list_add_tail(&e_node->list, &audio->free_event_queue);
else {
MM_ERR("event pkt alloc failed\n");
break;
}
}
done:
return rc;
event_err:
msm_adsp_put(audio->audplay);
err:
if (audio->data) {
iounmap(audio->map_v_write);
free_contiguous_memory_by_paddr(audio->phys);
}
audpp_adec_free(audio->dec_id);
kfree(audio);
return rc;
}
static const struct file_operations audio_pcm_fops = {
.owner = THIS_MODULE,
.open = audio_open,
.release = audio_release,
.write = audio_write,
.unlocked_ioctl = audio_ioctl,
.fsync = audpcm_fsync,
};
struct miscdevice audio_pcm_misc = {
.minor = MISC_DYNAMIC_MINOR,
.name = "msm_pcm_dec",
.fops = &audio_pcm_fops,
};
static int __init audio_init(void)
{
return misc_register(&audio_pcm_misc);
}
device_initcall(audio_init);
| gpl-2.0 |
minz1/xosp_kernel_xiaomi_msm8956 | drivers/cpufreq/davinci-cpufreq.c | 1155 | 5494 | /*
* CPU frequency scaling for DaVinci
*
* Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
*
* Based on linux/arch/arm/plat-omap/cpu-omap.c. Original Copyright follows:
*
* Copyright (C) 2005 Nokia Corporation
* Written by Tony Lindgren <tony@atomide.com>
*
* Based on cpu-sa1110.c, Copyright (C) 2001 Russell King
*
* Copyright (C) 2007-2008 Texas Instruments, Inc.
* Updated to support OMAP3
* Rajendra Nayak <rnayak@ti.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/cpufreq.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/export.h>
#include <mach/hardware.h>
#include <mach/cpufreq.h>
#include <mach/common.h>
struct davinci_cpufreq {
struct device *dev;
struct clk *armclk;
struct clk *asyncclk;
unsigned long asyncrate;
};
static struct davinci_cpufreq cpufreq;
static int davinci_verify_speed(struct cpufreq_policy *policy)
{
struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
struct cpufreq_frequency_table *freq_table = pdata->freq_table;
struct clk *armclk = cpufreq.armclk;
if (freq_table)
return cpufreq_frequency_table_verify(policy, freq_table);
if (policy->cpu)
return -EINVAL;
cpufreq_verify_within_cpu_limits(policy);
policy->min = clk_round_rate(armclk, policy->min * 1000) / 1000;
policy->max = clk_round_rate(armclk, policy->max * 1000) / 1000;
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
policy->cpuinfo.max_freq);
return 0;
}
static unsigned int davinci_getspeed(unsigned int cpu)
{
if (cpu)
return 0;
return clk_get_rate(cpufreq.armclk) / 1000;
}
static int davinci_target(struct cpufreq_policy *policy, unsigned int idx)
{
int ret = 0;
struct cpufreq_freqs freqs;
struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
struct clk *armclk = cpufreq.armclk;
freqs.old = davinci_getspeed(0);
freqs.new = pdata->freq_table[idx].frequency;
dev_dbg(cpufreq.dev, "transition: %u --> %u\n", freqs.old, freqs.new);
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
/* if moving to higher frequency, up the voltage beforehand */
if (pdata->set_voltage && freqs.new > freqs.old) {
ret = pdata->set_voltage(idx);
if (ret)
goto out;
}
ret = clk_set_rate(armclk, idx);
if (ret)
goto out;
if (cpufreq.asyncclk) {
ret = clk_set_rate(cpufreq.asyncclk, cpufreq.asyncrate);
if (ret)
goto out;
}
/* if moving to lower freq, lower the voltage after lowering freq */
if (pdata->set_voltage && freqs.new < freqs.old)
pdata->set_voltage(idx);
out:
cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
return ret;
}
static int davinci_cpu_init(struct cpufreq_policy *policy)
{
int result = 0;
struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
struct cpufreq_frequency_table *freq_table = pdata->freq_table;
if (policy->cpu != 0)
return -EINVAL;
/* Finish platform specific initialization */
if (pdata->init) {
result = pdata->init();
if (result)
return result;
}
policy->cur = davinci_getspeed(0);
result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
if (result) {
pr_err("%s: cpufreq_frequency_table_cpuinfo() failed",
__func__);
return result;
}
cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
/*
* Time measurement across the target() function yields ~1500-1800us
* time taken with no drivers on notification list.
* Setting the latency to 2000 us to accommodate addition of drivers
* to pre/post change notification list.
*/
policy->cpuinfo.transition_latency = 2000 * 1000;
return 0;
}
static int davinci_cpu_exit(struct cpufreq_policy *policy)
{
cpufreq_frequency_table_put_attr(policy->cpu);
return 0;
}
static struct freq_attr *davinci_cpufreq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver davinci_driver = {
.flags = CPUFREQ_STICKY,
.verify = davinci_verify_speed,
.target_index = davinci_target,
.get = davinci_getspeed,
.init = davinci_cpu_init,
.exit = davinci_cpu_exit,
.name = "davinci",
.attr = davinci_cpufreq_attr,
};
static int __init davinci_cpufreq_probe(struct platform_device *pdev)
{
struct davinci_cpufreq_config *pdata = pdev->dev.platform_data;
struct clk *asyncclk;
if (!pdata)
return -EINVAL;
if (!pdata->freq_table)
return -EINVAL;
cpufreq.dev = &pdev->dev;
cpufreq.armclk = clk_get(NULL, "arm");
if (IS_ERR(cpufreq.armclk)) {
dev_err(cpufreq.dev, "Unable to get ARM clock\n");
return PTR_ERR(cpufreq.armclk);
}
asyncclk = clk_get(cpufreq.dev, "async");
if (!IS_ERR(asyncclk)) {
cpufreq.asyncclk = asyncclk;
cpufreq.asyncrate = clk_get_rate(asyncclk);
}
return cpufreq_register_driver(&davinci_driver);
}
static int __exit davinci_cpufreq_remove(struct platform_device *pdev)
{
clk_put(cpufreq.armclk);
if (cpufreq.asyncclk)
clk_put(cpufreq.asyncclk);
return cpufreq_unregister_driver(&davinci_driver);
}
static struct platform_driver davinci_cpufreq_driver = {
.driver = {
.name = "cpufreq-davinci",
.owner = THIS_MODULE,
},
.remove = __exit_p(davinci_cpufreq_remove),
};
int __init davinci_cpufreq_init(void)
{
return platform_driver_probe(&davinci_cpufreq_driver,
davinci_cpufreq_probe);
}
| gpl-2.0 |
eyeballer/LenovoA1-3.0.8 | arch/powerpc/kernel/rtas_flash.c | 2435 | 22133 | /*
* c 2001 PPC 64 Team, IBM Corp
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* /proc/powerpc/rtas/firmware_flash interface
*
* This file implements a firmware_flash interface to pump a firmware
* image into the kernel. At reboot time rtas_restart() will see the
* firmware image and flash it as it reboots (see rtas.c).
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/proc_fs.h>
#include <asm/delay.h>
#include <asm/uaccess.h>
#include <asm/rtas.h>
#include <asm/abs_addr.h>
#define MODULE_VERS "1.0"
#define MODULE_NAME "rtas_flash"
#define FIRMWARE_FLASH_NAME "firmware_flash"
#define FIRMWARE_UPDATE_NAME "firmware_update"
#define MANAGE_FLASH_NAME "manage_flash"
#define VALIDATE_FLASH_NAME "validate_flash"
/* General RTAS Status Codes */
#define RTAS_RC_SUCCESS 0
#define RTAS_RC_HW_ERR -1
#define RTAS_RC_BUSY -2
/* Flash image status values */
#define FLASH_AUTH -9002 /* RTAS Not Service Authority Partition */
#define FLASH_NO_OP -1099 /* No operation initiated by user */
#define FLASH_IMG_SHORT -1005 /* Flash image shorter than expected */
#define FLASH_IMG_BAD_LEN -1004 /* Bad length value in flash list block */
#define FLASH_IMG_NULL_DATA -1003 /* Bad data value in flash list block */
#define FLASH_IMG_READY 0 /* Firmware img ready for flash on reboot */
/* Manage image status values */
#define MANAGE_AUTH -9002 /* RTAS Not Service Authority Partition */
#define MANAGE_ACTIVE_ERR -9001 /* RTAS Cannot Overwrite Active Img */
#define MANAGE_NO_OP -1099 /* No operation initiated by user */
#define MANAGE_PARAM_ERR -3 /* RTAS Parameter Error */
#define MANAGE_HW_ERR -1 /* RTAS Hardware Error */
/* Validate image status values */
#define VALIDATE_AUTH -9002 /* RTAS Not Service Authority Partition */
#define VALIDATE_NO_OP -1099 /* No operation initiated by the user */
#define VALIDATE_INCOMPLETE -1002 /* User copied < VALIDATE_BUF_SIZE */
#define VALIDATE_READY -1001 /* Firmware image ready for validation */
#define VALIDATE_PARAM_ERR -3 /* RTAS Parameter Error */
#define VALIDATE_HW_ERR -1 /* RTAS Hardware Error */
#define VALIDATE_TMP_UPDATE 0 /* Validate Return Status */
#define VALIDATE_FLASH_AUTH 1 /* Validate Return Status */
#define VALIDATE_INVALID_IMG 2 /* Validate Return Status */
#define VALIDATE_CUR_UNKNOWN 3 /* Validate Return Status */
#define VALIDATE_TMP_COMMIT_DL 4 /* Validate Return Status */
#define VALIDATE_TMP_COMMIT 5 /* Validate Return Status */
#define VALIDATE_TMP_UPDATE_DL 6 /* Validate Return Status */
/* ibm,manage-flash-image operation tokens */
#define RTAS_REJECT_TMP_IMG 0
#define RTAS_COMMIT_TMP_IMG 1
/* Array sizes */
#define VALIDATE_BUF_SIZE 4096
#define RTAS_MSG_MAXLEN 64
/* Quirk - RTAS requires 4k list length and block size */
#define RTAS_BLKLIST_LENGTH 4096
#define RTAS_BLK_SIZE 4096
struct flash_block {
char *data;
unsigned long length;
};
/* This struct is very similar but not identical to
* that needed by the rtas flash update.
* All we need to do for rtas is rewrite num_blocks
* into a version/length and translate the pointers
* to absolute.
*/
#define FLASH_BLOCKS_PER_NODE ((RTAS_BLKLIST_LENGTH - 16) / sizeof(struct flash_block))
struct flash_block_list {
unsigned long num_blocks;
struct flash_block_list *next;
struct flash_block blocks[FLASH_BLOCKS_PER_NODE];
};
static struct flash_block_list *rtas_firmware_flash_list;
/* Use slab cache to guarantee 4k alignment */
static struct kmem_cache *flash_block_cache = NULL;
#define FLASH_BLOCK_LIST_VERSION (1UL)
/* Local copy of the flash block list.
* We only allow one open of the flash proc file and create this
* list as we go. The rtas_firmware_flash_list varable will be
* set once the data is fully read.
*
* For convenience as we build the list we use virtual addrs,
* we do not fill in the version number, and the length field
* is treated as the number of entries currently in the block
* (i.e. not a byte count). This is all fixed when calling
* the flash routine.
*/
/* Status int must be first member of struct */
struct rtas_update_flash_t
{
int status; /* Flash update status */
struct flash_block_list *flist; /* Local copy of flash block list */
};
/* Status int must be first member of struct */
struct rtas_manage_flash_t
{
int status; /* Returned status */
unsigned int op; /* Reject or commit image */
};
/* Status int must be first member of struct */
struct rtas_validate_flash_t
{
int status; /* Returned status */
char buf[VALIDATE_BUF_SIZE]; /* Candidate image buffer */
unsigned int buf_size; /* Size of image buf */
unsigned int update_results; /* Update results token */
};
static DEFINE_SPINLOCK(flash_file_open_lock);
static struct proc_dir_entry *firmware_flash_pde;
static struct proc_dir_entry *firmware_update_pde;
static struct proc_dir_entry *validate_pde;
static struct proc_dir_entry *manage_pde;
/* Do simple sanity checks on the flash image. */
static int flash_list_valid(struct flash_block_list *flist)
{
struct flash_block_list *f;
int i;
unsigned long block_size, image_size;
/* Paranoid self test here. We also collect the image size. */
image_size = 0;
for (f = flist; f; f = f->next) {
for (i = 0; i < f->num_blocks; i++) {
if (f->blocks[i].data == NULL) {
return FLASH_IMG_NULL_DATA;
}
block_size = f->blocks[i].length;
if (block_size <= 0 || block_size > RTAS_BLK_SIZE) {
return FLASH_IMG_BAD_LEN;
}
image_size += block_size;
}
}
if (image_size < (256 << 10)) {
if (image_size < 2)
return FLASH_NO_OP;
}
printk(KERN_INFO "FLASH: flash image with %ld bytes stored for hardware flash on reboot\n", image_size);
return FLASH_IMG_READY;
}
static void free_flash_list(struct flash_block_list *f)
{
struct flash_block_list *next;
int i;
while (f) {
for (i = 0; i < f->num_blocks; i++)
kmem_cache_free(flash_block_cache, f->blocks[i].data);
next = f->next;
kmem_cache_free(flash_block_cache, f);
f = next;
}
}
static int rtas_flash_release(struct inode *inode, struct file *file)
{
struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
struct rtas_update_flash_t *uf;
uf = (struct rtas_update_flash_t *) dp->data;
if (uf->flist) {
/* File was opened in write mode for a new flash attempt */
/* Clear saved list */
if (rtas_firmware_flash_list) {
free_flash_list(rtas_firmware_flash_list);
rtas_firmware_flash_list = NULL;
}
if (uf->status != FLASH_AUTH)
uf->status = flash_list_valid(uf->flist);
if (uf->status == FLASH_IMG_READY)
rtas_firmware_flash_list = uf->flist;
else
free_flash_list(uf->flist);
uf->flist = NULL;
}
atomic_dec(&dp->count);
return 0;
}
static void get_flash_status_msg(int status, char *buf)
{
char *msg;
switch (status) {
case FLASH_AUTH:
msg = "error: this partition does not have service authority\n";
break;
case FLASH_NO_OP:
msg = "info: no firmware image for flash\n";
break;
case FLASH_IMG_SHORT:
msg = "error: flash image short\n";
break;
case FLASH_IMG_BAD_LEN:
msg = "error: internal error bad length\n";
break;
case FLASH_IMG_NULL_DATA:
msg = "error: internal error null data\n";
break;
case FLASH_IMG_READY:
msg = "ready: firmware image ready for flash on reboot\n";
break;
default:
sprintf(buf, "error: unexpected status value %d\n", status);
return;
}
strcpy(buf, msg);
}
/* Reading the proc file will show status (not the firmware contents) */
static ssize_t rtas_flash_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
struct rtas_update_flash_t *uf;
char msg[RTAS_MSG_MAXLEN];
uf = dp->data;
if (!strcmp(dp->name, FIRMWARE_FLASH_NAME)) {
get_flash_status_msg(uf->status, msg);
} else { /* FIRMWARE_UPDATE_NAME */
sprintf(msg, "%d\n", uf->status);
}
return simple_read_from_buffer(buf, count, ppos, msg, strlen(msg));
}
/* constructor for flash_block_cache */
void rtas_block_ctor(void *ptr)
{
memset(ptr, 0, RTAS_BLK_SIZE);
}
/* We could be much more efficient here. But to keep this function
* simple we allocate a page to the block list no matter how small the
* count is. If the system is low on memory it will be just as well
* that we fail....
*/
static ssize_t rtas_flash_write(struct file *file, const char __user *buffer,
size_t count, loff_t *off)
{
struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
struct rtas_update_flash_t *uf;
char *p;
int next_free;
struct flash_block_list *fl;
uf = (struct rtas_update_flash_t *) dp->data;
if (uf->status == FLASH_AUTH || count == 0)
return count; /* discard data */
/* In the case that the image is not ready for flashing, the memory
* allocated for the block list will be freed upon the release of the
* proc file
*/
if (uf->flist == NULL) {
uf->flist = kmem_cache_alloc(flash_block_cache, GFP_KERNEL);
if (!uf->flist)
return -ENOMEM;
}
fl = uf->flist;
while (fl->next)
fl = fl->next; /* seek to last block_list for append */
next_free = fl->num_blocks;
if (next_free == FLASH_BLOCKS_PER_NODE) {
/* Need to allocate another block_list */
fl->next = kmem_cache_alloc(flash_block_cache, GFP_KERNEL);
if (!fl->next)
return -ENOMEM;
fl = fl->next;
next_free = 0;
}
if (count > RTAS_BLK_SIZE)
count = RTAS_BLK_SIZE;
p = kmem_cache_alloc(flash_block_cache, GFP_KERNEL);
if (!p)
return -ENOMEM;
if(copy_from_user(p, buffer, count)) {
kmem_cache_free(flash_block_cache, p);
return -EFAULT;
}
fl->blocks[next_free].data = p;
fl->blocks[next_free].length = count;
fl->num_blocks++;
return count;
}
static int rtas_excl_open(struct inode *inode, struct file *file)
{
struct proc_dir_entry *dp = PDE(inode);
/* Enforce exclusive open with use count of PDE */
spin_lock(&flash_file_open_lock);
if (atomic_read(&dp->count) > 2) {
spin_unlock(&flash_file_open_lock);
return -EBUSY;
}
atomic_inc(&dp->count);
spin_unlock(&flash_file_open_lock);
return 0;
}
static int rtas_excl_release(struct inode *inode, struct file *file)
{
struct proc_dir_entry *dp = PDE(inode);
atomic_dec(&dp->count);
return 0;
}
static void manage_flash(struct rtas_manage_flash_t *args_buf)
{
s32 rc;
do {
rc = rtas_call(rtas_token("ibm,manage-flash-image"), 1,
1, NULL, args_buf->op);
} while (rtas_busy_delay(rc));
args_buf->status = rc;
}
static ssize_t manage_flash_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
struct rtas_manage_flash_t *args_buf;
char msg[RTAS_MSG_MAXLEN];
int msglen;
args_buf = dp->data;
if (args_buf == NULL)
return 0;
msglen = sprintf(msg, "%d\n", args_buf->status);
return simple_read_from_buffer(buf, count, ppos, msg, msglen);
}
static ssize_t manage_flash_write(struct file *file, const char __user *buf,
size_t count, loff_t *off)
{
struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
struct rtas_manage_flash_t *args_buf;
const char reject_str[] = "0";
const char commit_str[] = "1";
char stkbuf[10];
int op;
args_buf = (struct rtas_manage_flash_t *) dp->data;
if ((args_buf->status == MANAGE_AUTH) || (count == 0))
return count;
op = -1;
if (buf) {
if (count > 9) count = 9;
if (copy_from_user (stkbuf, buf, count)) {
return -EFAULT;
}
if (strncmp(stkbuf, reject_str, strlen(reject_str)) == 0)
op = RTAS_REJECT_TMP_IMG;
else if (strncmp(stkbuf, commit_str, strlen(commit_str)) == 0)
op = RTAS_COMMIT_TMP_IMG;
}
if (op == -1) /* buf is empty, or contains invalid string */
return -EINVAL;
args_buf->op = op;
manage_flash(args_buf);
return count;
}
static void validate_flash(struct rtas_validate_flash_t *args_buf)
{
int token = rtas_token("ibm,validate-flash-image");
int update_results;
s32 rc;
rc = 0;
do {
spin_lock(&rtas_data_buf_lock);
memcpy(rtas_data_buf, args_buf->buf, VALIDATE_BUF_SIZE);
rc = rtas_call(token, 2, 2, &update_results,
(u32) __pa(rtas_data_buf), args_buf->buf_size);
memcpy(args_buf->buf, rtas_data_buf, VALIDATE_BUF_SIZE);
spin_unlock(&rtas_data_buf_lock);
} while (rtas_busy_delay(rc));
args_buf->status = rc;
args_buf->update_results = update_results;
}
static int get_validate_flash_msg(struct rtas_validate_flash_t *args_buf,
char *msg)
{
int n;
if (args_buf->status >= VALIDATE_TMP_UPDATE) {
n = sprintf(msg, "%d\n", args_buf->update_results);
if ((args_buf->update_results >= VALIDATE_CUR_UNKNOWN) ||
(args_buf->update_results == VALIDATE_TMP_UPDATE))
n += sprintf(msg + n, "%s\n", args_buf->buf);
} else {
n = sprintf(msg, "%d\n", args_buf->status);
}
return n;
}
static ssize_t validate_flash_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
struct rtas_validate_flash_t *args_buf;
char msg[RTAS_MSG_MAXLEN];
int msglen;
args_buf = dp->data;
msglen = get_validate_flash_msg(args_buf, msg);
return simple_read_from_buffer(buf, count, ppos, msg, msglen);
}
static ssize_t validate_flash_write(struct file *file, const char __user *buf,
size_t count, loff_t *off)
{
struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
struct rtas_validate_flash_t *args_buf;
int rc;
args_buf = (struct rtas_validate_flash_t *) dp->data;
if (dp->data == NULL) {
dp->data = kmalloc(sizeof(struct rtas_validate_flash_t),
GFP_KERNEL);
if (dp->data == NULL)
return -ENOMEM;
}
/* We are only interested in the first 4K of the
* candidate image */
if ((*off >= VALIDATE_BUF_SIZE) ||
(args_buf->status == VALIDATE_AUTH)) {
*off += count;
return count;
}
if (*off + count >= VALIDATE_BUF_SIZE) {
count = VALIDATE_BUF_SIZE - *off;
args_buf->status = VALIDATE_READY;
} else {
args_buf->status = VALIDATE_INCOMPLETE;
}
if (!access_ok(VERIFY_READ, buf, count)) {
rc = -EFAULT;
goto done;
}
if (copy_from_user(args_buf->buf + *off, buf, count)) {
rc = -EFAULT;
goto done;
}
*off += count;
rc = count;
done:
if (rc < 0) {
kfree(dp->data);
dp->data = NULL;
}
return rc;
}
static int validate_flash_release(struct inode *inode, struct file *file)
{
struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
struct rtas_validate_flash_t *args_buf;
args_buf = (struct rtas_validate_flash_t *) dp->data;
if (args_buf->status == VALIDATE_READY) {
args_buf->buf_size = VALIDATE_BUF_SIZE;
validate_flash(args_buf);
}
/* The matching atomic_inc was in rtas_excl_open() */
atomic_dec(&dp->count);
return 0;
}
static void rtas_flash_firmware(int reboot_type)
{
unsigned long image_size;
struct flash_block_list *f, *next, *flist;
unsigned long rtas_block_list;
int i, status, update_token;
if (rtas_firmware_flash_list == NULL)
return; /* nothing to do */
if (reboot_type != SYS_RESTART) {
printk(KERN_ALERT "FLASH: firmware flash requires a reboot\n");
printk(KERN_ALERT "FLASH: the firmware image will NOT be flashed\n");
return;
}
update_token = rtas_token("ibm,update-flash-64-and-reboot");
if (update_token == RTAS_UNKNOWN_SERVICE) {
printk(KERN_ALERT "FLASH: ibm,update-flash-64-and-reboot "
"is not available -- not a service partition?\n");
printk(KERN_ALERT "FLASH: firmware will not be flashed\n");
return;
}
/*
* NOTE: the "first" block must be under 4GB, so we create
* an entry with no data blocks in the reserved buffer in
* the kernel data segment.
*/
spin_lock(&rtas_data_buf_lock);
flist = (struct flash_block_list *)&rtas_data_buf[0];
flist->num_blocks = 0;
flist->next = rtas_firmware_flash_list;
rtas_block_list = virt_to_abs(flist);
if (rtas_block_list >= 4UL*1024*1024*1024) {
printk(KERN_ALERT "FLASH: kernel bug...flash list header addr above 4GB\n");
spin_unlock(&rtas_data_buf_lock);
return;
}
printk(KERN_ALERT "FLASH: preparing saved firmware image for flash\n");
/* Update the block_list in place. */
rtas_firmware_flash_list = NULL; /* too hard to backout on error */
image_size = 0;
for (f = flist; f; f = next) {
/* Translate data addrs to absolute */
for (i = 0; i < f->num_blocks; i++) {
f->blocks[i].data = (char *)virt_to_abs(f->blocks[i].data);
image_size += f->blocks[i].length;
}
next = f->next;
/* Don't translate NULL pointer for last entry */
if (f->next)
f->next = (struct flash_block_list *)virt_to_abs(f->next);
else
f->next = NULL;
/* make num_blocks into the version/length field */
f->num_blocks = (FLASH_BLOCK_LIST_VERSION << 56) | ((f->num_blocks+1)*16);
}
printk(KERN_ALERT "FLASH: flash image is %ld bytes\n", image_size);
printk(KERN_ALERT "FLASH: performing flash and reboot\n");
rtas_progress("Flashing \n", 0x0);
rtas_progress("Please Wait... ", 0x0);
printk(KERN_ALERT "FLASH: this will take several minutes. Do not power off!\n");
status = rtas_call(update_token, 1, 1, NULL, rtas_block_list);
switch (status) { /* should only get "bad" status */
case 0:
printk(KERN_ALERT "FLASH: success\n");
break;
case -1:
printk(KERN_ALERT "FLASH: hardware error. Firmware may not be not flashed\n");
break;
case -3:
printk(KERN_ALERT "FLASH: image is corrupt or not correct for this platform. Firmware not flashed\n");
break;
case -4:
printk(KERN_ALERT "FLASH: flash failed when partially complete. System may not reboot\n");
break;
default:
printk(KERN_ALERT "FLASH: unknown flash return code %d\n", status);
break;
}
spin_unlock(&rtas_data_buf_lock);
}
static void remove_flash_pde(struct proc_dir_entry *dp)
{
if (dp) {
kfree(dp->data);
remove_proc_entry(dp->name, dp->parent);
}
}
static int initialize_flash_pde_data(const char *rtas_call_name,
size_t buf_size,
struct proc_dir_entry *dp)
{
int *status;
int token;
dp->data = kzalloc(buf_size, GFP_KERNEL);
if (dp->data == NULL) {
remove_flash_pde(dp);
return -ENOMEM;
}
/*
* This code assumes that the status int is the first member of the
* struct
*/
status = (int *) dp->data;
token = rtas_token(rtas_call_name);
if (token == RTAS_UNKNOWN_SERVICE)
*status = FLASH_AUTH;
else
*status = FLASH_NO_OP;
return 0;
}
static struct proc_dir_entry *create_flash_pde(const char *filename,
const struct file_operations *fops)
{
return proc_create(filename, S_IRUSR | S_IWUSR, NULL, fops);
}
static const struct file_operations rtas_flash_operations = {
.owner = THIS_MODULE,
.read = rtas_flash_read,
.write = rtas_flash_write,
.open = rtas_excl_open,
.release = rtas_flash_release,
.llseek = default_llseek,
};
static const struct file_operations manage_flash_operations = {
.owner = THIS_MODULE,
.read = manage_flash_read,
.write = manage_flash_write,
.open = rtas_excl_open,
.release = rtas_excl_release,
.llseek = default_llseek,
};
static const struct file_operations validate_flash_operations = {
.owner = THIS_MODULE,
.read = validate_flash_read,
.write = validate_flash_write,
.open = rtas_excl_open,
.release = validate_flash_release,
.llseek = default_llseek,
};
static int __init rtas_flash_init(void)
{
int rc;
if (rtas_token("ibm,update-flash-64-and-reboot") ==
RTAS_UNKNOWN_SERVICE) {
printk(KERN_ERR "rtas_flash: no firmware flash support\n");
return 1;
}
firmware_flash_pde = create_flash_pde("powerpc/rtas/"
FIRMWARE_FLASH_NAME,
&rtas_flash_operations);
if (firmware_flash_pde == NULL) {
rc = -ENOMEM;
goto cleanup;
}
rc = initialize_flash_pde_data("ibm,update-flash-64-and-reboot",
sizeof(struct rtas_update_flash_t),
firmware_flash_pde);
if (rc != 0)
goto cleanup;
firmware_update_pde = create_flash_pde("powerpc/rtas/"
FIRMWARE_UPDATE_NAME,
&rtas_flash_operations);
if (firmware_update_pde == NULL) {
rc = -ENOMEM;
goto cleanup;
}
rc = initialize_flash_pde_data("ibm,update-flash-64-and-reboot",
sizeof(struct rtas_update_flash_t),
firmware_update_pde);
if (rc != 0)
goto cleanup;
validate_pde = create_flash_pde("powerpc/rtas/" VALIDATE_FLASH_NAME,
&validate_flash_operations);
if (validate_pde == NULL) {
rc = -ENOMEM;
goto cleanup;
}
rc = initialize_flash_pde_data("ibm,validate-flash-image",
sizeof(struct rtas_validate_flash_t),
validate_pde);
if (rc != 0)
goto cleanup;
manage_pde = create_flash_pde("powerpc/rtas/" MANAGE_FLASH_NAME,
&manage_flash_operations);
if (manage_pde == NULL) {
rc = -ENOMEM;
goto cleanup;
}
rc = initialize_flash_pde_data("ibm,manage-flash-image",
sizeof(struct rtas_manage_flash_t),
manage_pde);
if (rc != 0)
goto cleanup;
rtas_flash_term_hook = rtas_flash_firmware;
flash_block_cache = kmem_cache_create("rtas_flash_cache",
RTAS_BLK_SIZE, RTAS_BLK_SIZE, 0,
rtas_block_ctor);
if (!flash_block_cache) {
printk(KERN_ERR "%s: failed to create block cache\n",
__func__);
rc = -ENOMEM;
goto cleanup;
}
return 0;
cleanup:
remove_flash_pde(firmware_flash_pde);
remove_flash_pde(firmware_update_pde);
remove_flash_pde(validate_pde);
remove_flash_pde(manage_pde);
return rc;
}
static void __exit rtas_flash_cleanup(void)
{
rtas_flash_term_hook = NULL;
if (flash_block_cache)
kmem_cache_destroy(flash_block_cache);
remove_flash_pde(firmware_flash_pde);
remove_flash_pde(firmware_update_pde);
remove_flash_pde(validate_pde);
remove_flash_pde(manage_pde);
}
module_init(rtas_flash_init);
module_exit(rtas_flash_cleanup);
MODULE_LICENSE("GPL");
| gpl-2.0 |
RoGod/kernel_xtreme_mega_beta | drivers/staging/comedi/drivers/adv_pci1723.c | 2691 | 14322 | /*******************************************************************************
comedi/drivers/pci1723.c
COMEDI - Linux Control and Measurement Device Interface
Copyright (C) 2000 David A. Schleef <ds@schleef.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*******************************************************************************/
/*
Driver: adv_pci1723
Description: Advantech PCI-1723
Author: yonggang <rsmgnu@gmail.com>, Ian Abbott <abbotti@mev.co.uk>
Devices: [Advantech] PCI-1723 (adv_pci1723)
Updated: Mon, 14 Apr 2008 15:12:56 +0100
Status: works
Configuration Options:
[0] - PCI bus of device (optional)
[1] - PCI slot of device (optional)
If bus/slot is not specified, the first supported
PCI device found will be used.
Subdevice 0 is 8-channel AO, 16-bit, range +/- 10 V.
Subdevice 1 is 16-channel DIO. The channels are configurable as input or
output in 2 groups (0 to 7, 8 to 15). Configuring any channel implicitly
configures all channels in the same group.
TODO:
1. Add the two milliamp ranges to the AO subdevice (0 to 20 mA, 4 to 20 mA).
2. Read the initial ranges and values of the AO subdevice at start-up instead
of reinitializing them.
3. Implement calibration.
*/
#include "../comedidev.h"
#include "comedi_pci.h"
#define PCI_VENDOR_ID_ADVANTECH 0x13fe /* Advantech PCI vendor ID */
/* hardware types of the cards */
#define TYPE_PCI1723 0
#define IORANGE_1723 0x2A
/* all the registers for the pci1723 board */
#define PCI1723_DA(N) ((N)<<1) /* W: D/A register N (0 to 7) */
#define PCI1723_SYN_SET 0x12 /* synchronized set register */
#define PCI1723_ALL_CHNNELE_SYN_STROBE 0x12
/* synchronized status register */
#define PCI1723_RANGE_CALIBRATION_MODE 0x14
/* range and calibration mode */
#define PCI1723_RANGE_CALIBRATION_STATUS 0x14
/* range and calibration status */
#define PCI1723_CONTROL_CMD_CALIBRATION_FUN 0x16
/*
* SADC control command for
* calibration function
*/
#define PCI1723_STATUS_CMD_CALIBRATION_FUN 0x16
/*
* SADC control status for
* calibration function
*/
#define PCI1723_CALIBRATION_PARA_STROBE 0x18
/* Calibration parameter strobe */
#define PCI1723_DIGITAL_IO_PORT_SET 0x1A /* Digital I/O port setting */
#define PCI1723_DIGITAL_IO_PORT_MODE 0x1A /* Digital I/O port mode */
#define PCI1723_WRITE_DIGITAL_OUTPUT_CMD 0x1C
/* Write digital output command */
#define PCI1723_READ_DIGITAL_INPUT_DATA 0x1C /* Read digital input data */
#define PCI1723_WRITE_CAL_CMD 0x1E /* Write calibration command */
#define PCI1723_READ_CAL_STATUS 0x1E /* Read calibration status */
#define PCI1723_SYN_STROBE 0x20 /* Synchronized strobe */
#define PCI1723_RESET_ALL_CHN_STROBE 0x22
/* Reset all D/A channels strobe */
#define PCI1723_RESET_CAL_CONTROL_STROBE 0x24
/*
* Reset the calibration
* controller strobe
*/
#define PCI1723_CHANGE_CHA_OUTPUT_TYPE_STROBE 0x26
/*
* Change D/A channels output
* type strobe
*/
#define PCI1723_SELECT_CALIBRATION 0x28 /* Select the calibration Ref_V */
/* static unsigned short pci_list_builded=0; =1 list of card is know */
static const struct comedi_lrange range_pci1723 = { 1, {
BIP_RANGE(10)
}
};
/*
* Board descriptions for pci1723 boards.
*/
struct pci1723_board {
const char *name;
int vendor_id; /* PCI vendor a device ID of card */
int device_id;
int iorange;
char cardtype;
int n_aochan; /* num of D/A chans */
int n_diochan; /* num of DIO chans */
int ao_maxdata; /* resolution of D/A */
const struct comedi_lrange *rangelist_ao; /* rangelist for D/A */
};
static const struct pci1723_board boardtypes[] = {
{
.name = "pci1723",
.vendor_id = PCI_VENDOR_ID_ADVANTECH,
.device_id = 0x1723,
.iorange = IORANGE_1723,
.cardtype = TYPE_PCI1723,
.n_aochan = 8,
.n_diochan = 16,
.ao_maxdata = 0xffff,
.rangelist_ao = &range_pci1723,
},
};
/*
* This is used by modprobe to translate PCI IDs to drivers.
* Should only be used for PCI and ISA-PnP devices
*/
static DEFINE_PCI_DEVICE_TABLE(pci1723_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADVANTECH, 0x1723) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, pci1723_pci_table);
/*
* The struct comedi_driver structure tells the Comedi core module
* which functions to call to configure/deconfigure (attach/detach)
* the board, and also about the kernel module that contains
* the device code.
*/
static int pci1723_attach(struct comedi_device *dev,
struct comedi_devconfig *it);
static int pci1723_detach(struct comedi_device *dev);
#define n_boardtypes (sizeof(boardtypes)/sizeof(struct pci1723_board))
static struct comedi_driver driver_pci1723 = {
.driver_name = "adv_pci1723",
.module = THIS_MODULE,
.attach = pci1723_attach,
.detach = pci1723_detach,
};
/* This structure is for data unique to this hardware driver. */
struct pci1723_private {
int valid; /* card is usable; */
struct pci_dev *pcidev;
unsigned char da_range[8]; /* D/A output range for each channel */
short ao_data[8]; /* data output buffer */
};
/* The following macro to make it easy to access the private structure. */
#define devpriv ((struct pci1723_private *)dev->private)
#define this_board boardtypes
/*
* The pci1723 card reset;
*/
static int pci1723_reset(struct comedi_device *dev)
{
int i;
DPRINTK("adv_pci1723 EDBG: BGN: pci1723_reset(...)\n");
outw(0x01, dev->iobase + PCI1723_SYN_SET);
/* set synchronous output mode */
for (i = 0; i < 8; i++) {
/* set all outputs to 0V */
devpriv->ao_data[i] = 0x8000;
outw(devpriv->ao_data[i], dev->iobase + PCI1723_DA(i));
/* set all ranges to +/- 10V */
devpriv->da_range[i] = 0;
outw(((devpriv->da_range[i] << 4) | i),
PCI1723_RANGE_CALIBRATION_MODE);
}
outw(0, dev->iobase + PCI1723_CHANGE_CHA_OUTPUT_TYPE_STROBE);
/* update ranges */
outw(0, dev->iobase + PCI1723_SYN_STROBE); /* update outputs */
/* set asynchronous output mode */
outw(0, dev->iobase + PCI1723_SYN_SET);
DPRINTK("adv_pci1723 EDBG: END: pci1723_reset(...)\n");
return 0;
}
static int pci1723_insn_read_ao(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
int n, chan;
chan = CR_CHAN(insn->chanspec);
DPRINTK(" adv_PCI1723 DEBUG: pci1723_insn_read_ao() ----- \n");
for (n = 0; n < insn->n; n++)
data[n] = devpriv->ao_data[chan];
return n;
}
/*
analog data output;
*/
static int pci1723_ao_write_winsn(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
int n, chan;
chan = CR_CHAN(insn->chanspec);
DPRINTK("PCI1723: the pci1723_ao_write_winsn() ------\n");
for (n = 0; n < insn->n; n++) {
devpriv->ao_data[chan] = data[n];
outw(data[n], dev->iobase + PCI1723_DA(chan));
}
return n;
}
/*
digital i/o config/query
*/
static int pci1723_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
unsigned int mask;
unsigned int bits;
unsigned short dio_mode;
mask = 1 << CR_CHAN(insn->chanspec);
if (mask & 0x00FF)
bits = 0x00FF;
else
bits = 0xFF00;
switch (data[0]) {
case INSN_CONFIG_DIO_INPUT:
s->io_bits &= ~bits;
break;
case INSN_CONFIG_DIO_OUTPUT:
s->io_bits |= bits;
break;
case INSN_CONFIG_DIO_QUERY:
data[1] = (s->io_bits & bits) ? COMEDI_OUTPUT : COMEDI_INPUT;
return insn->n;
default:
return -EINVAL;
}
/* update hardware DIO mode */
dio_mode = 0x0000; /* low byte output, high byte output */
if ((s->io_bits & 0x00FF) == 0)
dio_mode |= 0x0001; /* low byte input */
if ((s->io_bits & 0xFF00) == 0)
dio_mode |= 0x0002; /* high byte input */
outw(dio_mode, dev->iobase + PCI1723_DIGITAL_IO_PORT_SET);
return 1;
}
/*
digital i/o bits read/write
*/
static int pci1723_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
if (data[0]) {
s->state &= ~data[0];
s->state |= (data[0] & data[1]);
outw(s->state, dev->iobase + PCI1723_WRITE_DIGITAL_OUTPUT_CMD);
}
data[1] = inw(dev->iobase + PCI1723_READ_DIGITAL_INPUT_DATA);
return 2;
}
/*
* Attach is called by the Comedi core to configure the driver
* for a pci1723 board.
*/
static int pci1723_attach(struct comedi_device *dev,
struct comedi_devconfig *it)
{
struct comedi_subdevice *s;
int ret, subdev, n_subdevices;
struct pci_dev *pcidev;
unsigned int iobase;
unsigned char pci_bus, pci_slot, pci_func;
int opt_bus, opt_slot;
const char *errstr;
printk(KERN_ERR "comedi%d: adv_pci1723: board=%s",
dev->minor, this_board->name);
opt_bus = it->options[0];
opt_slot = it->options[1];
ret = alloc_private(dev, sizeof(struct pci1723_private));
if (ret < 0) {
printk(" - Allocation failed!\n");
return -ENOMEM;
}
/* Look for matching PCI device */
errstr = "not found!";
pcidev = NULL;
while (NULL != (pcidev =
pci_get_device(PCI_VENDOR_ID_ADVANTECH,
this_board->device_id, pcidev))) {
/* Found matching vendor/device. */
if (opt_bus || opt_slot) {
/* Check bus/slot. */
if (opt_bus != pcidev->bus->number
|| opt_slot != PCI_SLOT(pcidev->devfn))
continue; /* no match */
}
/*
* Look for device that isn't in use.
* Enable PCI device and request regions.
*/
if (comedi_pci_enable(pcidev, "adv_pci1723")) {
errstr =
"failed to enable PCI device and request regions!";
continue;
}
break;
}
if (!pcidev) {
if (opt_bus || opt_slot) {
printk(KERN_ERR " - Card at b:s %d:%d %s\n",
opt_bus, opt_slot, errstr);
} else {
printk(KERN_ERR " - Card %s\n", errstr);
}
return -EIO;
}
pci_bus = pcidev->bus->number;
pci_slot = PCI_SLOT(pcidev->devfn);
pci_func = PCI_FUNC(pcidev->devfn);
iobase = pci_resource_start(pcidev, 2);
printk(KERN_ERR ", b:s:f=%d:%d:%d, io=0x%4x",
pci_bus, pci_slot, pci_func, iobase);
dev->iobase = iobase;
dev->board_name = this_board->name;
devpriv->pcidev = pcidev;
n_subdevices = 0;
if (this_board->n_aochan)
n_subdevices++;
if (this_board->n_diochan)
n_subdevices++;
ret = alloc_subdevices(dev, n_subdevices);
if (ret < 0) {
printk(" - Allocation failed!\n");
return ret;
}
pci1723_reset(dev);
subdev = 0;
if (this_board->n_aochan) {
s = dev->subdevices + subdev;
dev->write_subdev = s;
s->type = COMEDI_SUBD_AO;
s->subdev_flags = SDF_WRITEABLE | SDF_GROUND | SDF_COMMON;
s->n_chan = this_board->n_aochan;
s->maxdata = this_board->ao_maxdata;
s->len_chanlist = this_board->n_aochan;
s->range_table = this_board->rangelist_ao;
s->insn_write = pci1723_ao_write_winsn;
s->insn_read = pci1723_insn_read_ao;
/* read DIO config */
switch (inw(dev->iobase + PCI1723_DIGITAL_IO_PORT_MODE)
& 0x03) {
case 0x00: /* low byte output, high byte output */
s->io_bits = 0xFFFF;
break;
case 0x01: /* low byte input, high byte output */
s->io_bits = 0xFF00;
break;
case 0x02: /* low byte output, high byte input */
s->io_bits = 0x00FF;
break;
case 0x03: /* low byte input, high byte input */
s->io_bits = 0x0000;
break;
}
/* read DIO port state */
s->state = inw(dev->iobase + PCI1723_READ_DIGITAL_INPUT_DATA);
subdev++;
}
if (this_board->n_diochan) {
s = dev->subdevices + subdev;
s->type = COMEDI_SUBD_DIO;
s->subdev_flags =
SDF_READABLE | SDF_WRITABLE | SDF_GROUND | SDF_COMMON;
s->n_chan = this_board->n_diochan;
s->maxdata = 1;
s->len_chanlist = this_board->n_diochan;
s->range_table = &range_digital;
s->insn_config = pci1723_dio_insn_config;
s->insn_bits = pci1723_dio_insn_bits;
subdev++;
}
devpriv->valid = 1;
pci1723_reset(dev);
return 0;
}
/*
* _detach is called to deconfigure a device. It should deallocate
* resources.
* This function is also called when _attach() fails, so it should be
* careful not to release resources that were not necessarily
* allocated by _attach(). dev->private and dev->subdevices are
* deallocated automatically by the core.
*/
static int pci1723_detach(struct comedi_device *dev)
{
printk(KERN_ERR "comedi%d: pci1723: remove\n", dev->minor);
if (dev->private) {
if (devpriv->valid)
pci1723_reset(dev);
if (devpriv->pcidev) {
if (dev->iobase)
comedi_pci_disable(devpriv->pcidev);
pci_dev_put(devpriv->pcidev);
}
}
return 0;
}
/*
* A convenient macro that defines init_module() and cleanup_module(),
* as necessary.
*/
static int __devinit driver_pci1723_pci_probe(struct pci_dev *dev,
const struct pci_device_id *ent)
{
return comedi_pci_auto_config(dev, driver_pci1723.driver_name);
}
static void __devexit driver_pci1723_pci_remove(struct pci_dev *dev)
{
comedi_pci_auto_unconfig(dev);
}
static struct pci_driver driver_pci1723_pci_driver = {
.id_table = pci1723_pci_table,
.probe = &driver_pci1723_pci_probe,
.remove = __devexit_p(&driver_pci1723_pci_remove)
};
static int __init driver_pci1723_init_module(void)
{
int retval;
retval = comedi_driver_register(&driver_pci1723);
if (retval < 0)
return retval;
driver_pci1723_pci_driver.name = (char *)driver_pci1723.driver_name;
return pci_register_driver(&driver_pci1723_pci_driver);
}
static void __exit driver_pci1723_cleanup_module(void)
{
pci_unregister_driver(&driver_pci1723_pci_driver);
comedi_driver_unregister(&driver_pci1723);
}
module_init(driver_pci1723_init_module);
module_exit(driver_pci1723_cleanup_module);
MODULE_AUTHOR("Comedi http://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
StarkDroid/android_kernel_motorola_msm8610 | fs/ramfs/inode.c | 4483 | 6887 | /*
* Resizable simple ram filesystem for Linux.
*
* Copyright (C) 2000 Linus Torvalds.
* 2000 Transmeta Corp.
*
* Usage limits added by David Gibson, Linuxcare Australia.
* This file is released under the GPL.
*/
/*
* NOTE! This filesystem is probably most useful
* not as a real filesystem, but as an example of
* how virtual filesystems can be written.
*
* It doesn't get much simpler than this. Consider
* that this file implements the full semantics of
* a POSIX-compliant read-write filesystem.
*
* Note in particular how the filesystem does not
* need to implement any data structures of its own
* to keep track of the virtual data: using the VFS
* caches is sufficient.
*/
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/time.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/backing-dev.h>
#include <linux/ramfs.h>
#include <linux/sched.h>
#include <linux/parser.h>
#include <linux/magic.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include "internal.h"
#define RAMFS_DEFAULT_MODE 0755
static const struct super_operations ramfs_ops;
static const struct inode_operations ramfs_dir_inode_operations;
static struct backing_dev_info ramfs_backing_dev_info = {
.name = "ramfs",
.ra_pages = 0, /* No readahead */
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK |
BDI_CAP_MAP_DIRECT | BDI_CAP_MAP_COPY |
BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP,
};
struct inode *ramfs_get_inode(struct super_block *sb,
const struct inode *dir, umode_t mode, dev_t dev)
{
struct inode * inode = new_inode(sb);
if (inode) {
inode->i_ino = get_next_ino();
inode_init_owner(inode, dir, mode);
inode->i_mapping->a_ops = &ramfs_aops;
inode->i_mapping->backing_dev_info = &ramfs_backing_dev_info;
mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER);
mapping_set_unevictable(inode->i_mapping);
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
switch (mode & S_IFMT) {
default:
init_special_inode(inode, mode, dev);
break;
case S_IFREG:
inode->i_op = &ramfs_file_inode_operations;
inode->i_fop = &ramfs_file_operations;
break;
case S_IFDIR:
inode->i_op = &ramfs_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
/* directory inodes start off with i_nlink == 2 (for "." entry) */
inc_nlink(inode);
break;
case S_IFLNK:
inode->i_op = &page_symlink_inode_operations;
break;
}
}
return inode;
}
/*
* File creation. Allocate an inode, and we're done..
*/
/* SMP-safe */
static int
ramfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
{
struct inode * inode = ramfs_get_inode(dir->i_sb, dir, mode, dev);
int error = -ENOSPC;
if (inode) {
d_instantiate(dentry, inode);
dget(dentry); /* Extra count - pin the dentry in core */
error = 0;
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
}
return error;
}
static int ramfs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
{
int retval = ramfs_mknod(dir, dentry, mode | S_IFDIR, 0);
if (!retval)
inc_nlink(dir);
return retval;
}
static int ramfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *nd)
{
return ramfs_mknod(dir, dentry, mode | S_IFREG, 0);
}
static int ramfs_symlink(struct inode * dir, struct dentry *dentry, const char * symname)
{
struct inode *inode;
int error = -ENOSPC;
inode = ramfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
if (inode) {
int l = strlen(symname)+1;
error = page_symlink(inode, symname, l);
if (!error) {
d_instantiate(dentry, inode);
dget(dentry);
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
} else
iput(inode);
}
return error;
}
static const struct inode_operations ramfs_dir_inode_operations = {
.create = ramfs_create,
.lookup = simple_lookup,
.link = simple_link,
.unlink = simple_unlink,
.symlink = ramfs_symlink,
.mkdir = ramfs_mkdir,
.rmdir = simple_rmdir,
.mknod = ramfs_mknod,
.rename = simple_rename,
};
static const struct super_operations ramfs_ops = {
.statfs = simple_statfs,
.drop_inode = generic_delete_inode,
.show_options = generic_show_options,
};
struct ramfs_mount_opts {
umode_t mode;
};
enum {
Opt_mode,
Opt_err
};
static const match_table_t tokens = {
{Opt_mode, "mode=%o"},
{Opt_err, NULL}
};
struct ramfs_fs_info {
struct ramfs_mount_opts mount_opts;
};
static int ramfs_parse_options(char *data, struct ramfs_mount_opts *opts)
{
substring_t args[MAX_OPT_ARGS];
int option;
int token;
char *p;
opts->mode = RAMFS_DEFAULT_MODE;
while ((p = strsep(&data, ",")) != NULL) {
if (!*p)
continue;
token = match_token(p, tokens, args);
switch (token) {
case Opt_mode:
if (match_octal(&args[0], &option))
return -EINVAL;
opts->mode = option & S_IALLUGO;
break;
/*
* We might like to report bad mount options here;
* but traditionally ramfs has ignored all mount options,
* and as it is used as a !CONFIG_SHMEM simple substitute
* for tmpfs, better continue to ignore other mount options.
*/
}
}
return 0;
}
int ramfs_fill_super(struct super_block *sb, void *data, int silent)
{
struct ramfs_fs_info *fsi;
struct inode *inode;
int err;
save_mount_options(sb, data);
fsi = kzalloc(sizeof(struct ramfs_fs_info), GFP_KERNEL);
sb->s_fs_info = fsi;
if (!fsi)
return -ENOMEM;
err = ramfs_parse_options(data, &fsi->mount_opts);
if (err)
return err;
sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_magic = RAMFS_MAGIC;
sb->s_op = &ramfs_ops;
sb->s_time_gran = 1;
inode = ramfs_get_inode(sb, NULL, S_IFDIR | fsi->mount_opts.mode, 0);
sb->s_root = d_make_root(inode);
if (!sb->s_root)
return -ENOMEM;
return 0;
}
struct dentry *ramfs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
return mount_nodev(fs_type, flags, data, ramfs_fill_super);
}
static struct dentry *rootfs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
return mount_nodev(fs_type, flags|MS_NOUSER, data, ramfs_fill_super);
}
static void ramfs_kill_sb(struct super_block *sb)
{
kfree(sb->s_fs_info);
kill_litter_super(sb);
}
static struct file_system_type ramfs_fs_type = {
.name = "ramfs",
.mount = ramfs_mount,
.kill_sb = ramfs_kill_sb,
};
static struct file_system_type rootfs_fs_type = {
.name = "rootfs",
.mount = rootfs_mount,
.kill_sb = kill_litter_super,
};
static int __init init_ramfs_fs(void)
{
return register_filesystem(&ramfs_fs_type);
}
module_init(init_ramfs_fs)
int __init init_rootfs(void)
{
int err;
err = bdi_init(&ramfs_backing_dev_info);
if (err)
return err;
err = register_filesystem(&rootfs_fs_type);
if (err)
bdi_destroy(&ramfs_backing_dev_info);
return err;
}
| gpl-2.0 |
syhost/android_kernel_pantech_ef60s | drivers/staging/rtl8192e/rtllib_crypt.c | 7555 | 5837 | /*
* Host AP crypto routines
*
* Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
* Portions Copyright (C) 2004, Intel Corporation <jketreno@linux.intel.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation. See README and COPYING for
* more details.
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/errno.h>
#include "rtllib.h"
struct rtllib_crypto_alg {
struct list_head list;
struct lib80211_crypto_ops *ops;
};
struct rtllib_crypto {
struct list_head algs;
spinlock_t lock;
};
static struct rtllib_crypto *hcrypt;
void rtllib_crypt_deinit_entries(struct lib80211_crypt_info *info,
int force)
{
struct list_head *ptr, *n;
struct lib80211_crypt_data *entry;
for (ptr = info->crypt_deinit_list.next, n = ptr->next;
ptr != &info->crypt_deinit_list; ptr = n, n = ptr->next) {
entry = list_entry(ptr, struct lib80211_crypt_data, list);
if (atomic_read(&entry->refcnt) != 0 && !force)
continue;
list_del(ptr);
if (entry->ops)
entry->ops->deinit(entry->priv);
kfree(entry);
}
}
EXPORT_SYMBOL(rtllib_crypt_deinit_entries);
void rtllib_crypt_deinit_handler(unsigned long data)
{
struct lib80211_crypt_info *info = (struct lib80211_crypt_info *)data;
unsigned long flags;
spin_lock_irqsave(info->lock, flags);
rtllib_crypt_deinit_entries(info, 0);
if (!list_empty(&info->crypt_deinit_list)) {
printk(KERN_DEBUG "%s: entries remaining in delayed crypt "
"deletion list\n", info->name);
info->crypt_deinit_timer.expires = jiffies + HZ;
add_timer(&info->crypt_deinit_timer);
}
spin_unlock_irqrestore(info->lock, flags);
}
EXPORT_SYMBOL(rtllib_crypt_deinit_handler);
void rtllib_crypt_delayed_deinit(struct lib80211_crypt_info *info,
struct lib80211_crypt_data **crypt)
{
struct lib80211_crypt_data *tmp;
unsigned long flags;
if (*crypt == NULL)
return;
tmp = *crypt;
*crypt = NULL;
/* must not run ops->deinit() while there may be pending encrypt or
* decrypt operations. Use a list of delayed deinits to avoid needing
* locking. */
spin_lock_irqsave(info->lock, flags);
list_add(&tmp->list, &info->crypt_deinit_list);
if (!timer_pending(&info->crypt_deinit_timer)) {
info->crypt_deinit_timer.expires = jiffies + HZ;
add_timer(&info->crypt_deinit_timer);
}
spin_unlock_irqrestore(info->lock, flags);
}
EXPORT_SYMBOL(rtllib_crypt_delayed_deinit);
int rtllib_register_crypto_ops(struct lib80211_crypto_ops *ops)
{
unsigned long flags;
struct rtllib_crypto_alg *alg;
if (hcrypt == NULL)
return -1;
alg = kzalloc(sizeof(*alg), GFP_KERNEL);
if (alg == NULL)
return -ENOMEM;
alg->ops = ops;
spin_lock_irqsave(&hcrypt->lock, flags);
list_add(&alg->list, &hcrypt->algs);
spin_unlock_irqrestore(&hcrypt->lock, flags);
printk(KERN_DEBUG "rtllib_crypt: registered algorithm '%s'\n",
ops->name);
return 0;
}
EXPORT_SYMBOL(rtllib_register_crypto_ops);
int rtllib_unregister_crypto_ops(struct lib80211_crypto_ops *ops)
{
unsigned long flags;
struct list_head *ptr;
struct rtllib_crypto_alg *del_alg = NULL;
if (hcrypt == NULL)
return -1;
spin_lock_irqsave(&hcrypt->lock, flags);
for (ptr = hcrypt->algs.next; ptr != &hcrypt->algs; ptr = ptr->next) {
struct rtllib_crypto_alg *alg =
(struct rtllib_crypto_alg *) ptr;
if (alg->ops == ops) {
list_del(&alg->list);
del_alg = alg;
break;
}
}
spin_unlock_irqrestore(&hcrypt->lock, flags);
if (del_alg) {
printk(KERN_DEBUG "rtllib_crypt: unregistered algorithm "
"'%s'\n", ops->name);
kfree(del_alg);
}
return del_alg ? 0 : -1;
}
EXPORT_SYMBOL(rtllib_unregister_crypto_ops);
struct lib80211_crypto_ops *rtllib_get_crypto_ops(const char *name)
{
unsigned long flags;
struct list_head *ptr;
struct rtllib_crypto_alg *found_alg = NULL;
if (hcrypt == NULL)
return NULL;
spin_lock_irqsave(&hcrypt->lock, flags);
for (ptr = hcrypt->algs.next; ptr != &hcrypt->algs; ptr = ptr->next) {
struct rtllib_crypto_alg *alg =
(struct rtllib_crypto_alg *) ptr;
if (strcmp(alg->ops->name, name) == 0) {
found_alg = alg;
break;
}
}
spin_unlock_irqrestore(&hcrypt->lock, flags);
if (found_alg)
return found_alg->ops;
else
return NULL;
}
EXPORT_SYMBOL(rtllib_get_crypto_ops);
static void * rtllib_crypt_null_init(int keyidx) { return (void *) 1; }
static void rtllib_crypt_null_deinit(void *priv) {}
static struct lib80211_crypto_ops rtllib_crypt_null = {
.name = "NULL",
.init = rtllib_crypt_null_init,
.deinit = rtllib_crypt_null_deinit,
.encrypt_mpdu = NULL,
.decrypt_mpdu = NULL,
.encrypt_msdu = NULL,
.decrypt_msdu = NULL,
.set_key = NULL,
.get_key = NULL,
.extra_mpdu_prefix_len = 0,
.extra_mpdu_postfix_len = 0,
.extra_msdu_prefix_len = 0,
.extra_msdu_postfix_len = 0,
.owner = THIS_MODULE,
};
int __init rtllib_crypto_init(void)
{
int ret = -ENOMEM;
hcrypt = kzalloc(sizeof(*hcrypt), GFP_KERNEL);
if (!hcrypt)
goto out;
INIT_LIST_HEAD(&hcrypt->algs);
spin_lock_init(&hcrypt->lock);
ret = lib80211_register_crypto_ops(&rtllib_crypt_null);
if (ret < 0) {
kfree(hcrypt);
hcrypt = NULL;
}
out:
return ret;
}
void __exit rtllib_crypto_deinit(void)
{
struct list_head *ptr, *n;
if (hcrypt == NULL)
return;
for (ptr = hcrypt->algs.next, n = ptr->next; ptr != &hcrypt->algs;
ptr = n, n = ptr->next) {
struct rtllib_crypto_alg *alg =
(struct rtllib_crypto_alg *) ptr;
list_del(ptr);
printk(KERN_DEBUG "rtllib_crypt: unregistered algorithm "
"'%s' (deinit)\n", alg->ops->name);
kfree(alg);
}
kfree(hcrypt);
}
module_init(rtllib_crypto_init);
module_exit(rtllib_crypto_deinit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.