repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
Fusion-Devices/android_kernel_samsung_manta | drivers/staging/vt6656/firmware.c | 8160 | 4744 | /*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
*
* File: baseband.c
*
* Purpose: Implement functions to access baseband
*
* Author: Yiching Chen
*
* Date: May 20, 2004
*
* Functions:
*
* Revision History:
*
*/
#include "firmware.h"
#include "control.h"
#include "rndis.h"
/*--------------------- Static Definitions -------------------------*/
static int msglevel =MSG_LEVEL_INFO;
//static int msglevel =MSG_LEVEL_DEBUG;
#define FIRMWARE_VERSION 0x133 /* version 1.51 */
#define FIRMWARE_NAME "vntwusb.fw"
#define FIRMWARE_CHUNK_SIZE 0x400
/*--------------------- Static Classes ----------------------------*/
/*--------------------- Static Variables --------------------------*/
/*--------------------- Static Functions --------------------------*/
/*--------------------- Export Variables --------------------------*/
/*--------------------- Export Functions --------------------------*/
BOOL
FIRMWAREbDownload(
PSDevice pDevice
)
{
const struct firmware *fw;
int NdisStatus;
void *pBuffer = NULL;
BOOL result = FALSE;
u16 wLength;
int ii;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->Download firmware\n");
spin_unlock_irq(&pDevice->lock);
if (!pDevice->firmware) {
struct device *dev = &pDevice->usb->dev;
int rc;
rc = request_firmware(&pDevice->firmware, FIRMWARE_NAME, dev);
if (rc) {
dev_err(dev, "firmware file %s request failed (%d)\n",
FIRMWARE_NAME, rc);
goto out;
}
}
fw = pDevice->firmware;
pBuffer = kmalloc(FIRMWARE_CHUNK_SIZE, GFP_KERNEL);
if (!pBuffer)
goto out;
for (ii = 0; ii < fw->size; ii += FIRMWARE_CHUNK_SIZE) {
wLength = min_t(int, fw->size - ii, FIRMWARE_CHUNK_SIZE);
memcpy(pBuffer, fw->data + ii, wLength);
NdisStatus = CONTROLnsRequestOutAsyn(pDevice,
0,
0x1200+ii,
0x0000,
wLength,
pBuffer
);
DBG_PRT(MSG_LEVEL_DEBUG,
KERN_INFO"Download firmware...%d %zu\n", ii, fw->size);
if (NdisStatus != STATUS_SUCCESS)
goto out;
}
result = TRUE;
out:
kfree(pBuffer);
spin_lock_irq(&pDevice->lock);
return result;
}
MODULE_FIRMWARE(FIRMWARE_NAME);
BOOL
FIRMWAREbBrach2Sram(
PSDevice pDevice
)
{
int NdisStatus;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->Branch to Sram\n");
NdisStatus = CONTROLnsRequestOut(pDevice,
1,
0x1200,
0x0000,
0,
NULL
);
if (NdisStatus != STATUS_SUCCESS) {
return (FALSE);
} else {
return (TRUE);
}
}
BOOL
FIRMWAREbCheckVersion(
PSDevice pDevice
)
{
int ntStatus;
ntStatus = CONTROLnsRequestIn(pDevice,
MESSAGE_TYPE_READ,
0,
MESSAGE_REQUEST_VERSION,
2,
(PBYTE) &(pDevice->wFirmwareVersion));
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Firmware Version [%04x]\n", pDevice->wFirmwareVersion);
if (ntStatus != STATUS_SUCCESS) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Firmware Invalid.\n");
return FALSE;
}
if (pDevice->wFirmwareVersion == 0xFFFF) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"In Loader.\n");
return FALSE;
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Firmware Version [%04x]\n", pDevice->wFirmwareVersion);
if (pDevice->wFirmwareVersion < FIRMWARE_VERSION) {
// branch to loader for download new firmware
FIRMWAREbBrach2Sram(pDevice);
return FALSE;
}
return TRUE;
}
| gpl-2.0 |
AICP/kernel_motorola_msm8226 | drivers/net/arcnet/com20020.c | 8160 | 10293 | /*
* Linux ARCnet driver - COM20020 chipset support
*
* Written 1997 by David Woodhouse.
* Written 1994-1999 by Avery Pennarun.
* Written 1999 by Martin Mares <mj@ucw.cz>.
* Derived from skeleton.c by Donald Becker.
*
* Special thanks to Contemporary Controls, Inc. (www.ccontrols.com)
* for sponsoring the further development of this driver.
*
* **********************
*
* The original copyright of skeleton.c was as follows:
*
* skeleton.c Written 1993 by Donald Becker.
* Copyright 1993 United States Government as represented by the
* Director, National Security Agency. This software may only be used
* and distributed according to the terms of the GNU General Public License as
* modified by SRC, incorporated herein by reference.
*
* **********************
*
* For more details, see drivers/net/arcnet.c
*
* **********************
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/ioport.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/arcdevice.h>
#include <linux/com20020.h>
#include <asm/io.h>
#define VERSION "arcnet: COM20020 chipset support (by David Woodhouse et al.)\n"
static char *clockrates[] =
{"10 Mb/s", "Reserved", "5 Mb/s",
"2.5 Mb/s", "1.25Mb/s", "625 Kb/s", "312.5 Kb/s",
"156.25 Kb/s", "Reserved", "Reserved", "Reserved"};
static void com20020_command(struct net_device *dev, int command);
static int com20020_status(struct net_device *dev);
static void com20020_setmask(struct net_device *dev, int mask);
static int com20020_reset(struct net_device *dev, int really_reset);
static void com20020_copy_to_card(struct net_device *dev, int bufnum,
int offset, void *buf, int count);
static void com20020_copy_from_card(struct net_device *dev, int bufnum,
int offset, void *buf, int count);
static void com20020_set_mc_list(struct net_device *dev);
static void com20020_close(struct net_device *);
static void com20020_copy_from_card(struct net_device *dev, int bufnum,
int offset, void *buf, int count)
{
int ioaddr = dev->base_addr, ofs = 512 * bufnum + offset;
/* set up the address register */
outb((ofs >> 8) | RDDATAflag | AUTOINCflag, _ADDR_HI);
outb(ofs & 0xff, _ADDR_LO);
/* copy the data */
TIME("insb", count, insb(_MEMDATA, buf, count));
}
static void com20020_copy_to_card(struct net_device *dev, int bufnum,
int offset, void *buf, int count)
{
int ioaddr = dev->base_addr, ofs = 512 * bufnum + offset;
/* set up the address register */
outb((ofs >> 8) | AUTOINCflag, _ADDR_HI);
outb(ofs & 0xff, _ADDR_LO);
/* copy the data */
TIME("outsb", count, outsb(_MEMDATA, buf, count));
}
/* Reset the card and check some basic stuff during the detection stage. */
int com20020_check(struct net_device *dev)
{
int ioaddr = dev->base_addr, status;
struct arcnet_local *lp = netdev_priv(dev);
ARCRESET0;
mdelay(RESETtime);
lp->setup = lp->clockm ? 0 : (lp->clockp << 1);
lp->setup2 = (lp->clockm << 4) | 8;
/* CHECK: should we do this for SOHARD cards ? */
/* Enable P1Mode for backplane mode */
lp->setup = lp->setup | P1MODE;
SET_SUBADR(SUB_SETUP1);
outb(lp->setup, _XREG);
if (lp->clockm != 0)
{
SET_SUBADR(SUB_SETUP2);
outb(lp->setup2, _XREG);
/* must now write the magic "restart operation" command */
mdelay(1);
outb(0x18, _COMMAND);
}
lp->config = 0x21 | (lp->timeout << 3) | (lp->backplane << 2);
/* set node ID to 0x42 (but transmitter is disabled, so it's okay) */
SETCONF;
outb(0x42, ioaddr + BUS_ALIGN*7);
status = ASTATUS();
if ((status & 0x99) != (NORXflag | TXFREEflag | RESETflag)) {
BUGMSG(D_NORMAL, "status invalid (%Xh).\n", status);
return -ENODEV;
}
BUGMSG(D_INIT_REASONS, "status after reset: %X\n", status);
/* Enable TX */
outb(0x39, _CONFIG);
outb(inb(ioaddr + BUS_ALIGN*8), ioaddr + BUS_ALIGN*7);
ACOMMAND(CFLAGScmd | RESETclear | CONFIGclear);
status = ASTATUS();
BUGMSG(D_INIT_REASONS, "status after reset acknowledged: %X\n",
status);
/* Read first location of memory */
outb(0 | RDDATAflag | AUTOINCflag, _ADDR_HI);
outb(0, _ADDR_LO);
if ((status = inb(_MEMDATA)) != TESTvalue) {
BUGMSG(D_NORMAL, "Signature byte not found (%02Xh != D1h).\n",
status);
return -ENODEV;
}
return 0;
}
const struct net_device_ops com20020_netdev_ops = {
.ndo_open = arcnet_open,
.ndo_stop = arcnet_close,
.ndo_start_xmit = arcnet_send_packet,
.ndo_tx_timeout = arcnet_timeout,
.ndo_set_rx_mode = com20020_set_mc_list,
};
/* Set up the struct net_device associated with this card. Called after
* probing succeeds.
*/
int com20020_found(struct net_device *dev, int shared)
{
struct arcnet_local *lp;
int ioaddr = dev->base_addr;
/* Initialize the rest of the device structure. */
lp = netdev_priv(dev);
lp->hw.owner = THIS_MODULE;
lp->hw.command = com20020_command;
lp->hw.status = com20020_status;
lp->hw.intmask = com20020_setmask;
lp->hw.reset = com20020_reset;
lp->hw.copy_to_card = com20020_copy_to_card;
lp->hw.copy_from_card = com20020_copy_from_card;
lp->hw.close = com20020_close;
if (!dev->dev_addr[0])
dev->dev_addr[0] = inb(ioaddr + BUS_ALIGN*8); /* FIXME: do this some other way! */
SET_SUBADR(SUB_SETUP1);
outb(lp->setup, _XREG);
if (lp->card_flags & ARC_CAN_10MBIT)
{
SET_SUBADR(SUB_SETUP2);
outb(lp->setup2, _XREG);
/* must now write the magic "restart operation" command */
mdelay(1);
outb(0x18, _COMMAND);
}
lp->config = 0x20 | (lp->timeout << 3) | (lp->backplane << 2) | 1;
/* Default 0x38 + register: Node ID */
SETCONF;
outb(dev->dev_addr[0], _XREG);
/* reserve the irq */
if (request_irq(dev->irq, arcnet_interrupt, shared,
"arcnet (COM20020)", dev)) {
BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", dev->irq);
return -ENODEV;
}
dev->base_addr = ioaddr;
BUGMSG(D_NORMAL, "%s: station %02Xh found at %03lXh, IRQ %d.\n",
lp->card_name, dev->dev_addr[0], dev->base_addr, dev->irq);
if (lp->backplane)
BUGMSG(D_NORMAL, "Using backplane mode.\n");
if (lp->timeout != 3)
BUGMSG(D_NORMAL, "Using extended timeout value of %d.\n", lp->timeout);
BUGMSG(D_NORMAL, "Using CKP %d - data rate %s.\n",
lp->setup >> 1,
clockrates[3 - ((lp->setup2 & 0xF0) >> 4) + ((lp->setup & 0x0F) >> 1)]);
if (register_netdev(dev)) {
free_irq(dev->irq, dev);
return -EIO;
}
return 0;
}
/*
* Do a hardware reset on the card, and set up necessary registers.
*
* This should be called as little as possible, because it disrupts the
* token on the network (causes a RECON) and requires a significant delay.
*
* However, it does make sure the card is in a defined state.
*/
static int com20020_reset(struct net_device *dev, int really_reset)
{
struct arcnet_local *lp = netdev_priv(dev);
u_int ioaddr = dev->base_addr;
u_char inbyte;
BUGMSG(D_DEBUG, "%s: %d: %s: dev: %p, lp: %p, dev->name: %s\n",
__FILE__,__LINE__,__func__,dev,lp,dev->name);
BUGMSG(D_INIT, "Resetting %s (status=%02Xh)\n",
dev->name, ASTATUS());
BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
lp->config = TXENcfg | (lp->timeout << 3) | (lp->backplane << 2);
/* power-up defaults */
SETCONF;
BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
if (really_reset) {
/* reset the card */
ARCRESET;
mdelay(RESETtime * 2); /* COM20020 seems to be slower sometimes */
}
/* clear flags & end reset */
BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
ACOMMAND(CFLAGScmd | RESETclear | CONFIGclear);
/* verify that the ARCnet signature byte is present */
BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
com20020_copy_from_card(dev, 0, 0, &inbyte, 1);
BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
if (inbyte != TESTvalue) {
BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
BUGMSG(D_NORMAL, "reset failed: TESTvalue not present.\n");
return 1;
}
/* enable extended (512-byte) packets */
ACOMMAND(CONFIGcmd | EXTconf);
BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
/* done! return success. */
return 0;
}
static void com20020_setmask(struct net_device *dev, int mask)
{
u_int ioaddr = dev->base_addr;
BUGMSG(D_DURING, "Setting mask to %x at %x\n",mask,ioaddr);
AINTMASK(mask);
}
static void com20020_command(struct net_device *dev, int cmd)
{
u_int ioaddr = dev->base_addr;
ACOMMAND(cmd);
}
static int com20020_status(struct net_device *dev)
{
u_int ioaddr = dev->base_addr;
return ASTATUS() + (ADIAGSTATUS()<<8);
}
static void com20020_close(struct net_device *dev)
{
struct arcnet_local *lp = netdev_priv(dev);
int ioaddr = dev->base_addr;
/* disable transmitter */
lp->config &= ~TXENcfg;
SETCONF;
}
/* Set or clear the multicast filter for this adaptor.
* num_addrs == -1 Promiscuous mode, receive all packets
* num_addrs == 0 Normal mode, clear multicast list
* num_addrs > 0 Multicast mode, receive normal and MC packets, and do
* best-effort filtering.
* FIXME - do multicast stuff, not just promiscuous.
*/
static void com20020_set_mc_list(struct net_device *dev)
{
struct arcnet_local *lp = netdev_priv(dev);
int ioaddr = dev->base_addr;
if ((dev->flags & IFF_PROMISC) && (dev->flags & IFF_UP)) { /* Enable promiscuous mode */
if (!(lp->setup & PROMISCset))
BUGMSG(D_NORMAL, "Setting promiscuous flag...\n");
SET_SUBADR(SUB_SETUP1);
lp->setup |= PROMISCset;
outb(lp->setup, _XREG);
} else
/* Disable promiscuous mode, use normal mode */
{
if ((lp->setup & PROMISCset))
BUGMSG(D_NORMAL, "Resetting promiscuous flag...\n");
SET_SUBADR(SUB_SETUP1);
lp->setup &= ~PROMISCset;
outb(lp->setup, _XREG);
}
}
#if defined(CONFIG_ARCNET_COM20020_PCI_MODULE) || \
defined(CONFIG_ARCNET_COM20020_ISA_MODULE) || \
defined(CONFIG_ARCNET_COM20020_CS_MODULE)
EXPORT_SYMBOL(com20020_check);
EXPORT_SYMBOL(com20020_found);
EXPORT_SYMBOL(com20020_netdev_ops);
#endif
MODULE_LICENSE("GPL");
#ifdef MODULE
static int __init com20020_module_init(void)
{
BUGLVL(D_NORMAL) printk(VERSION);
return 0;
}
static void __exit com20020_module_exit(void)
{
}
module_init(com20020_module_init);
module_exit(com20020_module_exit);
#endif /* MODULE */
| gpl-2.0 |
tchaari/android_kernel_samsung_crespo_kitkang | drivers/staging/vt6656/firmware.c | 8160 | 4744 | /*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
*
* File: baseband.c
*
* Purpose: Implement functions to access baseband
*
* Author: Yiching Chen
*
* Date: May 20, 2004
*
* Functions:
*
* Revision History:
*
*/
#include "firmware.h"
#include "control.h"
#include "rndis.h"
/*--------------------- Static Definitions -------------------------*/
static int msglevel =MSG_LEVEL_INFO;
//static int msglevel =MSG_LEVEL_DEBUG;
#define FIRMWARE_VERSION 0x133 /* version 1.51 */
#define FIRMWARE_NAME "vntwusb.fw"
#define FIRMWARE_CHUNK_SIZE 0x400
/*--------------------- Static Classes ----------------------------*/
/*--------------------- Static Variables --------------------------*/
/*--------------------- Static Functions --------------------------*/
/*--------------------- Export Variables --------------------------*/
/*--------------------- Export Functions --------------------------*/
BOOL
FIRMWAREbDownload(
PSDevice pDevice
)
{
const struct firmware *fw;
int NdisStatus;
void *pBuffer = NULL;
BOOL result = FALSE;
u16 wLength;
int ii;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->Download firmware\n");
spin_unlock_irq(&pDevice->lock);
if (!pDevice->firmware) {
struct device *dev = &pDevice->usb->dev;
int rc;
rc = request_firmware(&pDevice->firmware, FIRMWARE_NAME, dev);
if (rc) {
dev_err(dev, "firmware file %s request failed (%d)\n",
FIRMWARE_NAME, rc);
goto out;
}
}
fw = pDevice->firmware;
pBuffer = kmalloc(FIRMWARE_CHUNK_SIZE, GFP_KERNEL);
if (!pBuffer)
goto out;
for (ii = 0; ii < fw->size; ii += FIRMWARE_CHUNK_SIZE) {
wLength = min_t(int, fw->size - ii, FIRMWARE_CHUNK_SIZE);
memcpy(pBuffer, fw->data + ii, wLength);
NdisStatus = CONTROLnsRequestOutAsyn(pDevice,
0,
0x1200+ii,
0x0000,
wLength,
pBuffer
);
DBG_PRT(MSG_LEVEL_DEBUG,
KERN_INFO"Download firmware...%d %zu\n", ii, fw->size);
if (NdisStatus != STATUS_SUCCESS)
goto out;
}
result = TRUE;
out:
kfree(pBuffer);
spin_lock_irq(&pDevice->lock);
return result;
}
MODULE_FIRMWARE(FIRMWARE_NAME);
BOOL
FIRMWAREbBrach2Sram(
PSDevice pDevice
)
{
int NdisStatus;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->Branch to Sram\n");
NdisStatus = CONTROLnsRequestOut(pDevice,
1,
0x1200,
0x0000,
0,
NULL
);
if (NdisStatus != STATUS_SUCCESS) {
return (FALSE);
} else {
return (TRUE);
}
}
BOOL
FIRMWAREbCheckVersion(
PSDevice pDevice
)
{
int ntStatus;
ntStatus = CONTROLnsRequestIn(pDevice,
MESSAGE_TYPE_READ,
0,
MESSAGE_REQUEST_VERSION,
2,
(PBYTE) &(pDevice->wFirmwareVersion));
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Firmware Version [%04x]\n", pDevice->wFirmwareVersion);
if (ntStatus != STATUS_SUCCESS) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Firmware Invalid.\n");
return FALSE;
}
if (pDevice->wFirmwareVersion == 0xFFFF) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"In Loader.\n");
return FALSE;
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Firmware Version [%04x]\n", pDevice->wFirmwareVersion);
if (pDevice->wFirmwareVersion < FIRMWARE_VERSION) {
// branch to loader for download new firmware
FIRMWAREbBrach2Sram(pDevice);
return FALSE;
}
return TRUE;
}
| gpl-2.0 |
froggy666uk/Froggy_SensMod_CM10.1 | drivers/staging/vt6656/firmware.c | 8160 | 4744 | /*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
*
* File: baseband.c
*
* Purpose: Implement functions to access baseband
*
* Author: Yiching Chen
*
* Date: May 20, 2004
*
* Functions:
*
* Revision History:
*
*/
#include "firmware.h"
#include "control.h"
#include "rndis.h"
/*--------------------- Static Definitions -------------------------*/
static int msglevel =MSG_LEVEL_INFO;
//static int msglevel =MSG_LEVEL_DEBUG;
#define FIRMWARE_VERSION 0x133 /* version 1.51 */
#define FIRMWARE_NAME "vntwusb.fw"
#define FIRMWARE_CHUNK_SIZE 0x400
/*--------------------- Static Classes ----------------------------*/
/*--------------------- Static Variables --------------------------*/
/*--------------------- Static Functions --------------------------*/
/*--------------------- Export Variables --------------------------*/
/*--------------------- Export Functions --------------------------*/
BOOL
FIRMWAREbDownload(
PSDevice pDevice
)
{
const struct firmware *fw;
int NdisStatus;
void *pBuffer = NULL;
BOOL result = FALSE;
u16 wLength;
int ii;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->Download firmware\n");
spin_unlock_irq(&pDevice->lock);
if (!pDevice->firmware) {
struct device *dev = &pDevice->usb->dev;
int rc;
rc = request_firmware(&pDevice->firmware, FIRMWARE_NAME, dev);
if (rc) {
dev_err(dev, "firmware file %s request failed (%d)\n",
FIRMWARE_NAME, rc);
goto out;
}
}
fw = pDevice->firmware;
pBuffer = kmalloc(FIRMWARE_CHUNK_SIZE, GFP_KERNEL);
if (!pBuffer)
goto out;
for (ii = 0; ii < fw->size; ii += FIRMWARE_CHUNK_SIZE) {
wLength = min_t(int, fw->size - ii, FIRMWARE_CHUNK_SIZE);
memcpy(pBuffer, fw->data + ii, wLength);
NdisStatus = CONTROLnsRequestOutAsyn(pDevice,
0,
0x1200+ii,
0x0000,
wLength,
pBuffer
);
DBG_PRT(MSG_LEVEL_DEBUG,
KERN_INFO"Download firmware...%d %zu\n", ii, fw->size);
if (NdisStatus != STATUS_SUCCESS)
goto out;
}
result = TRUE;
out:
kfree(pBuffer);
spin_lock_irq(&pDevice->lock);
return result;
}
MODULE_FIRMWARE(FIRMWARE_NAME);
BOOL
FIRMWAREbBrach2Sram(
PSDevice pDevice
)
{
int NdisStatus;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->Branch to Sram\n");
NdisStatus = CONTROLnsRequestOut(pDevice,
1,
0x1200,
0x0000,
0,
NULL
);
if (NdisStatus != STATUS_SUCCESS) {
return (FALSE);
} else {
return (TRUE);
}
}
BOOL
FIRMWAREbCheckVersion(
PSDevice pDevice
)
{
int ntStatus;
ntStatus = CONTROLnsRequestIn(pDevice,
MESSAGE_TYPE_READ,
0,
MESSAGE_REQUEST_VERSION,
2,
(PBYTE) &(pDevice->wFirmwareVersion));
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Firmware Version [%04x]\n", pDevice->wFirmwareVersion);
if (ntStatus != STATUS_SUCCESS) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Firmware Invalid.\n");
return FALSE;
}
if (pDevice->wFirmwareVersion == 0xFFFF) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"In Loader.\n");
return FALSE;
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Firmware Version [%04x]\n", pDevice->wFirmwareVersion);
if (pDevice->wFirmwareVersion < FIRMWARE_VERSION) {
// branch to loader for download new firmware
FIRMWAREbBrach2Sram(pDevice);
return FALSE;
}
return TRUE;
}
| gpl-2.0 |
Dr-Shadow/android_kernel_mt6589 | drivers/firmware/google/memconsole.c | 10720 | 3916 | /*
* memconsole.c
*
* Infrastructure for importing the BIOS memory based console
* into the kernel log ringbuffer.
*
* Copyright 2010 Google Inc. All rights reserved.
*/
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/sysfs.h>
#include <linux/kobject.h>
#include <linux/module.h>
#include <linux/dmi.h>
#include <asm/bios_ebda.h>
#define BIOS_MEMCONSOLE_V1_MAGIC 0xDEADBABE
#define BIOS_MEMCONSOLE_V2_MAGIC (('M')|('C'<<8)|('O'<<16)|('N'<<24))
struct biosmemcon_ebda {
u32 signature;
union {
struct {
u8 enabled;
u32 buffer_addr;
u16 start;
u16 end;
u16 num_chars;
u8 wrapped;
} __packed v1;
struct {
u32 buffer_addr;
/* Misdocumented as number of pages! */
u16 num_bytes;
u16 start;
u16 end;
} __packed v2;
};
} __packed;
static char *memconsole_baseaddr;
static size_t memconsole_length;
static ssize_t memconsole_read(struct file *filp, struct kobject *kobp,
struct bin_attribute *bin_attr, char *buf,
loff_t pos, size_t count)
{
return memory_read_from_buffer(buf, count, &pos, memconsole_baseaddr,
memconsole_length);
}
static struct bin_attribute memconsole_bin_attr = {
.attr = {.name = "log", .mode = 0444},
.read = memconsole_read,
};
static void found_v1_header(struct biosmemcon_ebda *hdr)
{
printk(KERN_INFO "BIOS console v1 EBDA structure found at %p\n", hdr);
printk(KERN_INFO "BIOS console buffer at 0x%.8x, "
"start = %d, end = %d, num = %d\n",
hdr->v1.buffer_addr, hdr->v1.start,
hdr->v1.end, hdr->v1.num_chars);
memconsole_length = hdr->v1.num_chars;
memconsole_baseaddr = phys_to_virt(hdr->v1.buffer_addr);
}
static void found_v2_header(struct biosmemcon_ebda *hdr)
{
printk(KERN_INFO "BIOS console v2 EBDA structure found at %p\n", hdr);
printk(KERN_INFO "BIOS console buffer at 0x%.8x, "
"start = %d, end = %d, num_bytes = %d\n",
hdr->v2.buffer_addr, hdr->v2.start,
hdr->v2.end, hdr->v2.num_bytes);
memconsole_length = hdr->v2.end - hdr->v2.start;
memconsole_baseaddr = phys_to_virt(hdr->v2.buffer_addr
+ hdr->v2.start);
}
/*
* Search through the EBDA for the BIOS Memory Console, and
* set the global variables to point to it. Return true if found.
*/
static bool found_memconsole(void)
{
unsigned int address;
size_t length, cur;
address = get_bios_ebda();
if (!address) {
printk(KERN_INFO "BIOS EBDA non-existent.\n");
return false;
}
/* EBDA length is byte 0 of EBDA (in KB) */
length = *(u8 *)phys_to_virt(address);
length <<= 10; /* convert to bytes */
/*
* Search through EBDA for BIOS memory console structure
* note: signature is not necessarily dword-aligned
*/
for (cur = 0; cur < length; cur++) {
struct biosmemcon_ebda *hdr = phys_to_virt(address + cur);
/* memconsole v1 */
if (hdr->signature == BIOS_MEMCONSOLE_V1_MAGIC) {
found_v1_header(hdr);
return true;
}
/* memconsole v2 */
if (hdr->signature == BIOS_MEMCONSOLE_V2_MAGIC) {
found_v2_header(hdr);
return true;
}
}
printk(KERN_INFO "BIOS console EBDA structure not found!\n");
return false;
}
static struct dmi_system_id memconsole_dmi_table[] __initdata = {
{
.ident = "Google Board",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Google, Inc."),
},
},
{}
};
MODULE_DEVICE_TABLE(dmi, memconsole_dmi_table);
static int __init memconsole_init(void)
{
int ret;
if (!dmi_check_system(memconsole_dmi_table))
return -ENODEV;
if (!found_memconsole())
return -ENODEV;
memconsole_bin_attr.size = memconsole_length;
ret = sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
return ret;
}
static void __exit memconsole_exit(void)
{
sysfs_remove_bin_file(firmware_kobj, &memconsole_bin_attr);
}
module_init(memconsole_init);
module_exit(memconsole_exit);
MODULE_AUTHOR("Google, Inc.");
MODULE_LICENSE("GPL");
| gpl-2.0 |
paulluo/linux | drivers/input/serio/q40kbd.c | 225 | 4621 | /*
* Copyright (c) 2000-2001 Vojtech Pavlik
*
* Based on the work of:
* Richard Zidlicky <Richard.Zidlicky@stud.informatik.uni-erlangen.de>
*/
/*
* Q40 PS/2 keyboard controller driver for Linux/m68k
*/
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Should you need to contact me, the author, you can do so either by
* e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail:
* Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
#include <linux/module.h>
#include <linux/serio.h>
#include <linux/interrupt.h>
#include <linux/err.h>
#include <linux/bitops.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <asm/io.h>
#include <linux/uaccess.h>
#include <asm/q40_master.h>
#include <asm/irq.h>
#include <asm/q40ints.h>
#define DRV_NAME "q40kbd"
MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
MODULE_DESCRIPTION("Q40 PS/2 keyboard controller driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
struct q40kbd {
struct serio *port;
spinlock_t lock;
};
static irqreturn_t q40kbd_interrupt(int irq, void *dev_id)
{
struct q40kbd *q40kbd = dev_id;
unsigned long flags;
spin_lock_irqsave(&q40kbd->lock, flags);
if (Q40_IRQ_KEYB_MASK & master_inb(INTERRUPT_REG))
serio_interrupt(q40kbd->port, master_inb(KEYCODE_REG), 0);
master_outb(-1, KEYBOARD_UNLOCK_REG);
spin_unlock_irqrestore(&q40kbd->lock, flags);
return IRQ_HANDLED;
}
/*
* q40kbd_flush() flushes all data that may be in the keyboard buffers
*/
static void q40kbd_flush(struct q40kbd *q40kbd)
{
int maxread = 100;
unsigned long flags;
spin_lock_irqsave(&q40kbd->lock, flags);
while (maxread-- && (Q40_IRQ_KEYB_MASK & master_inb(INTERRUPT_REG)))
master_inb(KEYCODE_REG);
spin_unlock_irqrestore(&q40kbd->lock, flags);
}
static void q40kbd_stop(void)
{
master_outb(0, KEY_IRQ_ENABLE_REG);
master_outb(-1, KEYBOARD_UNLOCK_REG);
}
/*
* q40kbd_open() is called when a port is open by the higher layer.
* It allocates the interrupt and enables in in the chip.
*/
static int q40kbd_open(struct serio *port)
{
struct q40kbd *q40kbd = port->port_data;
q40kbd_flush(q40kbd);
/* off we go */
master_outb(-1, KEYBOARD_UNLOCK_REG);
master_outb(1, KEY_IRQ_ENABLE_REG);
return 0;
}
static void q40kbd_close(struct serio *port)
{
struct q40kbd *q40kbd = port->port_data;
q40kbd_stop();
q40kbd_flush(q40kbd);
}
static int q40kbd_probe(struct platform_device *pdev)
{
struct q40kbd *q40kbd;
struct serio *port;
int error;
q40kbd = kzalloc(sizeof(struct q40kbd), GFP_KERNEL);
port = kzalloc(sizeof(struct serio), GFP_KERNEL);
if (!q40kbd || !port) {
error = -ENOMEM;
goto err_free_mem;
}
q40kbd->port = port;
spin_lock_init(&q40kbd->lock);
port->id.type = SERIO_8042;
port->open = q40kbd_open;
port->close = q40kbd_close;
port->port_data = q40kbd;
port->dev.parent = &pdev->dev;
strlcpy(port->name, "Q40 Kbd Port", sizeof(port->name));
strlcpy(port->phys, "Q40", sizeof(port->phys));
q40kbd_stop();
error = request_irq(Q40_IRQ_KEYBOARD, q40kbd_interrupt, 0,
DRV_NAME, q40kbd);
if (error) {
dev_err(&pdev->dev, "Can't get irq %d.\n", Q40_IRQ_KEYBOARD);
goto err_free_mem;
}
serio_register_port(q40kbd->port);
platform_set_drvdata(pdev, q40kbd);
printk(KERN_INFO "serio: Q40 kbd registered\n");
return 0;
err_free_mem:
kfree(port);
kfree(q40kbd);
return error;
}
static int q40kbd_remove(struct platform_device *pdev)
{
struct q40kbd *q40kbd = platform_get_drvdata(pdev);
/*
* q40kbd_close() will be called as part of unregistering
* and will ensure that IRQ is turned off, so it is safe
* to unregister port first and free IRQ later.
*/
serio_unregister_port(q40kbd->port);
free_irq(Q40_IRQ_KEYBOARD, q40kbd);
kfree(q40kbd);
return 0;
}
static struct platform_driver q40kbd_driver = {
.driver = {
.name = "q40kbd",
},
.remove = q40kbd_remove,
};
module_platform_driver_probe(q40kbd_driver, q40kbd_probe);
| gpl-2.0 |
rperier/linux-rockchip | arch/arm/probes/kprobes/test-thumb.c | 225 | 47178 | // SPDX-License-Identifier: GPL-2.0-only
/*
* arch/arm/probes/kprobes/test-thumb.c
*
* Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/opcodes.h>
#include <asm/probes.h>
#include "test-core.h"
#define TEST_ISA "16"
#define DONT_TEST_IN_ITBLOCK(tests) \
kprobe_test_flags |= TEST_FLAG_NO_ITBLOCK; \
tests \
kprobe_test_flags &= ~TEST_FLAG_NO_ITBLOCK;
#define CONDITION_INSTRUCTIONS(cc_pos, tests) \
kprobe_test_cc_position = cc_pos; \
DONT_TEST_IN_ITBLOCK(tests) \
kprobe_test_cc_position = 0;
#define TEST_ITBLOCK(code) \
kprobe_test_flags |= TEST_FLAG_FULL_ITBLOCK; \
TESTCASE_START(code) \
TEST_ARG_END("") \
"50: nop \n\t" \
"1: "code" \n\t" \
" mov r1, #0x11 \n\t" \
" mov r2, #0x22 \n\t" \
" mov r3, #0x33 \n\t" \
"2: nop \n\t" \
TESTCASE_END \
kprobe_test_flags &= ~TEST_FLAG_FULL_ITBLOCK;
#define TEST_THUMB_TO_ARM_INTERWORK_P(code1, reg, val, code2) \
TESTCASE_START(code1 #reg code2) \
TEST_ARG_PTR(reg, val) \
TEST_ARG_REG(14, 99f+1) \
TEST_ARG_MEM(15, 3f) \
TEST_ARG_END("") \
" nop \n\t" /* To align 1f */ \
"50: nop \n\t" \
"1: "code1 #reg code2" \n\t" \
" bx lr \n\t" \
".arm \n\t" \
"3: adr lr, 2f+1 \n\t" \
" bx lr \n\t" \
".thumb \n\t" \
"2: nop \n\t" \
TESTCASE_END
void kprobe_thumb16_test_cases(void)
{
kprobe_test_flags = TEST_FLAG_NARROW_INSTR;
TEST_GROUP("Shift (immediate), add, subtract, move, and compare")
TEST_R( "lsls r7, r",0,VAL1,", #5")
TEST_R( "lsls r0, r",7,VAL2,", #11")
TEST_R( "lsrs r7, r",0,VAL1,", #5")
TEST_R( "lsrs r0, r",7,VAL2,", #11")
TEST_R( "asrs r7, r",0,VAL1,", #5")
TEST_R( "asrs r0, r",7,VAL2,", #11")
TEST_RR( "adds r2, r",0,VAL1,", r",7,VAL2,"")
TEST_RR( "adds r5, r",7,VAL2,", r",0,VAL2,"")
TEST_RR( "subs r2, r",0,VAL1,", r",7,VAL2,"")
TEST_RR( "subs r5, r",7,VAL2,", r",0,VAL2,"")
TEST_R( "adds r7, r",0,VAL1,", #5")
TEST_R( "adds r0, r",7,VAL2,", #2")
TEST_R( "subs r7, r",0,VAL1,", #5")
TEST_R( "subs r0, r",7,VAL2,", #2")
TEST( "movs.n r0, #0x5f")
TEST( "movs.n r7, #0xa0")
TEST_R( "cmp.n r",0,0x5e, ", #0x5f")
TEST_R( "cmp.n r",5,0x15f,", #0x5f")
TEST_R( "cmp.n r",7,0xa0, ", #0xa0")
TEST_R( "adds.n r",0,VAL1,", #0x5f")
TEST_R( "adds.n r",7,VAL2,", #0xa0")
TEST_R( "subs.n r",0,VAL1,", #0x5f")
TEST_R( "subs.n r",7,VAL2,", #0xa0")
TEST_GROUP("16-bit Thumb data-processing instructions")
#define DATA_PROCESSING16(op,val) \
TEST_RR( op" r",0,VAL1,", r",7,val,"") \
TEST_RR( op" r",7,VAL2,", r",0,val,"")
DATA_PROCESSING16("ands",0xf00f00ff)
DATA_PROCESSING16("eors",0xf00f00ff)
DATA_PROCESSING16("lsls",11)
DATA_PROCESSING16("lsrs",11)
DATA_PROCESSING16("asrs",11)
DATA_PROCESSING16("adcs",VAL2)
DATA_PROCESSING16("sbcs",VAL2)
DATA_PROCESSING16("rors",11)
DATA_PROCESSING16("tst",0xf00f00ff)
TEST_R("rsbs r",0,VAL1,", #0")
TEST_R("rsbs r",7,VAL2,", #0")
DATA_PROCESSING16("cmp",0xf00f00ff)
DATA_PROCESSING16("cmn",0xf00f00ff)
DATA_PROCESSING16("orrs",0xf00f00ff)
DATA_PROCESSING16("muls",VAL2)
DATA_PROCESSING16("bics",0xf00f00ff)
DATA_PROCESSING16("mvns",VAL2)
TEST_GROUP("Special data instructions and branch and exchange")
TEST_RR( "add r",0, VAL1,", r",7,VAL2,"")
TEST_RR( "add r",3, VAL2,", r",8,VAL3,"")
TEST_RR( "add r",8, VAL3,", r",0,VAL1,"")
TEST_R( "add sp" ", r",8,-8, "")
TEST_R( "add r",14,VAL1,", pc")
TEST_BF_R("add pc" ", r",0,2f-1f-8,"")
TEST_UNSUPPORTED(__inst_thumb16(0x44ff) " @ add pc, pc")
TEST_RR( "cmp r",3,VAL1,", r",8,VAL2,"")
TEST_RR( "cmp r",8,VAL2,", r",0,VAL1,"")
TEST_R( "cmp sp" ", r",8,-8, "")
TEST_R( "mov r0, r",7,VAL2,"")
TEST_R( "mov r3, r",8,VAL3,"")
TEST_R( "mov r8, r",0,VAL1,"")
TEST_P( "mov sp, r",8,-8, "")
TEST( "mov lr, pc")
TEST_BF_R("mov pc, r",0,2f, "")
TEST_BF_R("bx r",0, 2f+1,"")
TEST_BF_R("bx r",14,2f+1,"")
TESTCASE_START("bx pc")
TEST_ARG_REG(14, 99f+1)
TEST_ARG_END("")
" nop \n\t" /* To align the bx pc*/
"50: nop \n\t"
"1: bx pc \n\t"
" bx lr \n\t"
".arm \n\t"
" adr lr, 2f+1 \n\t"
" bx lr \n\t"
".thumb \n\t"
"2: nop \n\t"
TESTCASE_END
TEST_BF_R("blx r",0, 2f+1,"")
TEST_BB_R("blx r",14,2f+1,"")
TEST_UNSUPPORTED(__inst_thumb16(0x47f8) " @ blx pc")
TEST_GROUP("Load from Literal Pool")
TEST_X( "ldr r0, 3f",
".align \n\t"
"3: .word "__stringify(VAL1))
TEST_X( "ldr r7, 3f",
".space 128 \n\t"
".align \n\t"
"3: .word "__stringify(VAL2))
TEST_GROUP("16-bit Thumb Load/store instructions")
TEST_RPR("str r",0, VAL1,", [r",1, 24,", r",2, 48,"]")
TEST_RPR("str r",7, VAL2,", [r",6, 24,", r",5, 48,"]")
TEST_RPR("strh r",0, VAL1,", [r",1, 24,", r",2, 48,"]")
TEST_RPR("strh r",7, VAL2,", [r",6, 24,", r",5, 48,"]")
TEST_RPR("strb r",0, VAL1,", [r",1, 24,", r",2, 48,"]")
TEST_RPR("strb r",7, VAL2,", [r",6, 24,", r",5, 48,"]")
TEST_PR( "ldrsb r0, [r",1, 24,", r",2, 48,"]")
TEST_PR( "ldrsb r7, [r",6, 24,", r",5, 50,"]")
TEST_PR( "ldr r0, [r",1, 24,", r",2, 48,"]")
TEST_PR( "ldr r7, [r",6, 24,", r",5, 48,"]")
TEST_PR( "ldrh r0, [r",1, 24,", r",2, 48,"]")
TEST_PR( "ldrh r7, [r",6, 24,", r",5, 50,"]")
TEST_PR( "ldrb r0, [r",1, 24,", r",2, 48,"]")
TEST_PR( "ldrb r7, [r",6, 24,", r",5, 50,"]")
TEST_PR( "ldrsh r0, [r",1, 24,", r",2, 48,"]")
TEST_PR( "ldrsh r7, [r",6, 24,", r",5, 50,"]")
TEST_RP("str r",0, VAL1,", [r",1, 24,", #120]")
TEST_RP("str r",7, VAL2,", [r",6, 24,", #120]")
TEST_P( "ldr r0, [r",1, 24,", #120]")
TEST_P( "ldr r7, [r",6, 24,", #120]")
TEST_RP("strb r",0, VAL1,", [r",1, 24,", #30]")
TEST_RP("strb r",7, VAL2,", [r",6, 24,", #30]")
TEST_P( "ldrb r0, [r",1, 24,", #30]")
TEST_P( "ldrb r7, [r",6, 24,", #30]")
TEST_RP("strh r",0, VAL1,", [r",1, 24,", #60]")
TEST_RP("strh r",7, VAL2,", [r",6, 24,", #60]")
TEST_P( "ldrh r0, [r",1, 24,", #60]")
TEST_P( "ldrh r7, [r",6, 24,", #60]")
TEST_R( "str r",0, VAL1,", [sp, #0]")
TEST_R( "str r",7, VAL2,", [sp, #160]")
TEST( "ldr r0, [sp, #0]")
TEST( "ldr r7, [sp, #160]")
TEST_RP("str r",0, VAL1,", [r",0, 24,"]")
TEST_P( "ldr r0, [r",0, 24,"]")
TEST_GROUP("Generate PC-/SP-relative address")
TEST("add r0, pc, #4")
TEST("add r7, pc, #1020")
TEST("add r0, sp, #4")
TEST("add r7, sp, #1020")
TEST_GROUP("Miscellaneous 16-bit instructions")
TEST_UNSUPPORTED( "cpsie i")
TEST_UNSUPPORTED( "cpsid i")
TEST_UNSUPPORTED( "setend le")
TEST_UNSUPPORTED( "setend be")
TEST("add sp, #"__stringify(TEST_MEMORY_SIZE)) /* Assumes TEST_MEMORY_SIZE < 0x400 */
TEST("sub sp, #0x7f*4")
DONT_TEST_IN_ITBLOCK(
TEST_BF_R( "cbnz r",0,0, ", 2f")
TEST_BF_R( "cbz r",2,-1,", 2f")
TEST_BF_RX( "cbnz r",4,1, ", 2f", SPACE_0x20)
TEST_BF_RX( "cbz r",7,0, ", 2f", SPACE_0x40)
)
TEST_R("sxth r0, r",7, HH1,"")
TEST_R("sxth r7, r",0, HH2,"")
TEST_R("sxtb r0, r",7, HH1,"")
TEST_R("sxtb r7, r",0, HH2,"")
TEST_R("uxth r0, r",7, HH1,"")
TEST_R("uxth r7, r",0, HH2,"")
TEST_R("uxtb r0, r",7, HH1,"")
TEST_R("uxtb r7, r",0, HH2,"")
TEST_R("rev r0, r",7, VAL1,"")
TEST_R("rev r7, r",0, VAL2,"")
TEST_R("rev16 r0, r",7, VAL1,"")
TEST_R("rev16 r7, r",0, VAL2,"")
TEST_UNSUPPORTED(__inst_thumb16(0xba80) "")
TEST_UNSUPPORTED(__inst_thumb16(0xbabf) "")
TEST_R("revsh r0, r",7, VAL1,"")
TEST_R("revsh r7, r",0, VAL2,"")
#define TEST_POPPC(code, offset) \
TESTCASE_START(code) \
TEST_ARG_PTR(13, offset) \
TEST_ARG_END("") \
TEST_BRANCH_F(code) \
TESTCASE_END
TEST("push {r0}")
TEST("push {r7}")
TEST("push {r14}")
TEST("push {r0-r7,r14}")
TEST("push {r0,r2,r4,r6,r14}")
TEST("push {r1,r3,r5,r7}")
TEST("pop {r0}")
TEST("pop {r7}")
TEST("pop {r0,r2,r4,r6}")
TEST_POPPC("pop {pc}",15*4)
TEST_POPPC("pop {r0-r7,pc}",7*4)
TEST_POPPC("pop {r1,r3,r5,r7,pc}",11*4)
TEST_THUMB_TO_ARM_INTERWORK_P("pop {pc} @ ",13,15*4,"")
TEST_THUMB_TO_ARM_INTERWORK_P("pop {r0-r7,pc} @ ",13,7*4,"")
TEST_UNSUPPORTED("bkpt.n 0")
TEST_UNSUPPORTED("bkpt.n 255")
TEST_SUPPORTED("yield")
TEST("sev")
TEST("nop")
TEST("wfi")
TEST_SUPPORTED("wfe")
TEST_UNSUPPORTED(__inst_thumb16(0xbf50) "") /* Unassigned hints */
TEST_UNSUPPORTED(__inst_thumb16(0xbff0) "") /* Unassigned hints */
#define TEST_IT(code, code2) \
TESTCASE_START(code) \
TEST_ARG_END("") \
"50: nop \n\t" \
"1: "code" \n\t" \
" "code2" \n\t" \
"2: nop \n\t" \
TESTCASE_END
DONT_TEST_IN_ITBLOCK(
TEST_IT("it eq","moveq r0,#0")
TEST_IT("it vc","movvc r0,#0")
TEST_IT("it le","movle r0,#0")
TEST_IT("ite eq","moveq r0,#0\n\t movne r1,#1")
TEST_IT("itet vc","movvc r0,#0\n\t movvs r1,#1\n\t movvc r2,#2")
TEST_IT("itete le","movle r0,#0\n\t movgt r1,#1\n\t movle r2,#2\n\t movgt r3,#3")
TEST_IT("itttt le","movle r0,#0\n\t movle r1,#1\n\t movle r2,#2\n\t movle r3,#3")
TEST_IT("iteee le","movle r0,#0\n\t movgt r1,#1\n\t movgt r2,#2\n\t movgt r3,#3")
)
TEST_GROUP("Load and store multiple")
TEST_P("ldmia r",4, 16*4,"!, {r0,r7}")
TEST_P("ldmia r",7, 16*4,"!, {r0-r6}")
TEST_P("stmia r",4, 16*4,"!, {r0,r7}")
TEST_P("stmia r",0, 16*4,"!, {r0-r7}")
TEST_GROUP("Conditional branch and Supervisor Call instructions")
CONDITION_INSTRUCTIONS(8,
TEST_BF("beq 2f")
TEST_BB("bne 2b")
TEST_BF("bgt 2f")
TEST_BB("blt 2b")
)
TEST_UNSUPPORTED(__inst_thumb16(0xde00) "")
TEST_UNSUPPORTED(__inst_thumb16(0xdeff) "")
TEST_UNSUPPORTED("svc #0x00")
TEST_UNSUPPORTED("svc #0xff")
TEST_GROUP("Unconditional branch")
TEST_BF( "b 2f")
TEST_BB( "b 2b")
TEST_BF_X("b 2f", SPACE_0x400)
TEST_BB_X("b 2b", SPACE_0x400)
TEST_GROUP("Testing instructions in IT blocks")
TEST_ITBLOCK("subs.n r0, r0")
verbose("\n");
}
void kprobe_thumb32_test_cases(void)
{
kprobe_test_flags = 0;
TEST_GROUP("Load/store multiple")
TEST_UNSUPPORTED("rfedb sp")
TEST_UNSUPPORTED("rfeia sp")
TEST_UNSUPPORTED("rfedb sp!")
TEST_UNSUPPORTED("rfeia sp!")
TEST_P( "stmia r",0, 16*4,", {r0,r8}")
TEST_P( "stmia r",4, 16*4,", {r0-r12,r14}")
TEST_P( "stmia r",7, 16*4,"!, {r8-r12,r14}")
TEST_P( "stmia r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
TEST_P( "ldmia r",0, 16*4,", {r0,r8}")
TEST_P( "ldmia r",4, 0, ", {r0-r12,r14}")
TEST_BF_P("ldmia r",5, 8*4, "!, {r6-r12,r15}")
TEST_P( "ldmia r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
TEST_BF_P("ldmia r",14,14*4,"!, {r4,pc}")
TEST_P( "stmdb r",0, 16*4,", {r0,r8}")
TEST_P( "stmdb r",4, 16*4,", {r0-r12,r14}")
TEST_P( "stmdb r",5, 16*4,"!, {r8-r12,r14}")
TEST_P( "stmdb r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
TEST_P( "ldmdb r",0, 16*4,", {r0,r8}")
TEST_P( "ldmdb r",4, 16*4,", {r0-r12,r14}")
TEST_BF_P("ldmdb r",5, 16*4,"!, {r6-r12,r15}")
TEST_P( "ldmdb r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
TEST_BF_P("ldmdb r",14,16*4,"!, {r4,pc}")
TEST_P( "stmdb r",13,16*4,"!, {r3-r12,lr}")
TEST_P( "stmdb r",13,16*4,"!, {r3-r12}")
TEST_P( "stmdb r",2, 16*4,", {r3-r12,lr}")
TEST_P( "stmdb r",13,16*4,"!, {r2-r12,lr}")
TEST_P( "stmdb r",0, 16*4,", {r0-r12}")
TEST_P( "stmdb r",0, 16*4,", {r0-r12,lr}")
TEST_BF_P("ldmia r",13,5*4, "!, {r3-r12,pc}")
TEST_P( "ldmia r",13,5*4, "!, {r3-r12}")
TEST_BF_P("ldmia r",2, 5*4, "!, {r3-r12,pc}")
TEST_BF_P("ldmia r",13,4*4, "!, {r2-r12,pc}")
TEST_P( "ldmia r",0, 16*4,", {r0-r12}")
TEST_P( "ldmia r",0, 16*4,", {r0-r12,lr}")
TEST_THUMB_TO_ARM_INTERWORK_P("ldmia r",0,14*4,", {r12,pc}")
TEST_THUMB_TO_ARM_INTERWORK_P("ldmia r",13,2*4,", {r0-r12,pc}")
TEST_UNSUPPORTED(__inst_thumb32(0xe88f0101) " @ stmia pc, {r0,r8}")
TEST_UNSUPPORTED(__inst_thumb32(0xe92f5f00) " @ stmdb pc!, {r8-r12,r14}")
TEST_UNSUPPORTED(__inst_thumb32(0xe8bdc000) " @ ldmia r13!, {r14,pc}")
TEST_UNSUPPORTED(__inst_thumb32(0xe93ec000) " @ ldmdb r14!, {r14,pc}")
TEST_UNSUPPORTED(__inst_thumb32(0xe8a73f00) " @ stmia r7!, {r8-r12,sp}")
TEST_UNSUPPORTED(__inst_thumb32(0xe8a79f00) " @ stmia r7!, {r8-r12,pc}")
TEST_UNSUPPORTED(__inst_thumb32(0xe93e2010) " @ ldmdb r14!, {r4,sp}")
TEST_GROUP("Load/store double or exclusive, table branch")
TEST_P( "ldrd r0, r1, [r",1, 24,", #-16]")
TEST( "ldrd r12, r14, [sp, #16]")
TEST_P( "ldrd r1, r0, [r",7, 24,", #-16]!")
TEST( "ldrd r14, r12, [sp, #16]!")
TEST_P( "ldrd r1, r0, [r",7, 24,"], #16")
TEST( "ldrd r7, r8, [sp], #-16")
TEST_X( "ldrd r12, r14, 3f",
".align 3 \n\t"
"3: .word "__stringify(VAL1)" \n\t"
" .word "__stringify(VAL2))
TEST_UNSUPPORTED(__inst_thumb32(0xe9ffec04) " @ ldrd r14, r12, [pc, #16]!")
TEST_UNSUPPORTED(__inst_thumb32(0xe8ffec04) " @ ldrd r14, r12, [pc], #16")
TEST_UNSUPPORTED(__inst_thumb32(0xe9d4d800) " @ ldrd sp, r8, [r4]")
TEST_UNSUPPORTED(__inst_thumb32(0xe9d4f800) " @ ldrd pc, r8, [r4]")
TEST_UNSUPPORTED(__inst_thumb32(0xe9d47d00) " @ ldrd r7, sp, [r4]")
TEST_UNSUPPORTED(__inst_thumb32(0xe9d47f00) " @ ldrd r7, pc, [r4]")
TEST_RRP("strd r",0, VAL1,", r",1, VAL2,", [r",1, 24,", #-16]")
TEST_RR( "strd r",12,VAL2,", r",14,VAL1,", [sp, #16]")
TEST_RRP("strd r",1, VAL1,", r",0, VAL2,", [r",7, 24,", #-16]!")
TEST_RR( "strd r",14,VAL2,", r",12,VAL1,", [sp, #16]!")
TEST_RRP("strd r",1, VAL1,", r",0, VAL2,", [r",7, 24,"], #16")
TEST_RR( "strd r",7, VAL2,", r",8, VAL1,", [sp], #-16")
TEST_RRP("strd r",6, VAL1,", r",7, VAL2,", [r",13, TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"]!")
TEST_UNSUPPORTED("strd r6, r7, [r13, #-"__stringify(MAX_STACK_SIZE)"-8]!")
TEST_RRP("strd r",4, VAL1,", r",5, VAL2,", [r",14, TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"-8]!")
TEST_UNSUPPORTED(__inst_thumb32(0xe9efec04) " @ strd r14, r12, [pc, #16]!")
TEST_UNSUPPORTED(__inst_thumb32(0xe8efec04) " @ strd r14, r12, [pc], #16")
TEST_RX("tbb [pc, r",0, (9f-(1f+4)),"]",
"9: \n\t"
".byte (2f-1b-4)>>1 \n\t"
".byte (3f-1b-4)>>1 \n\t"
"3: mvn r0, r0 \n\t"
"2: nop \n\t")
TEST_RX("tbb [pc, r",4, (9f-(1f+4)+1),"]",
"9: \n\t"
".byte (2f-1b-4)>>1 \n\t"
".byte (3f-1b-4)>>1 \n\t"
"3: mvn r0, r0 \n\t"
"2: nop \n\t")
TEST_RRX("tbb [r",1,9f,", r",2,0,"]",
"9: \n\t"
".byte (2f-1b-4)>>1 \n\t"
".byte (3f-1b-4)>>1 \n\t"
"3: mvn r0, r0 \n\t"
"2: nop \n\t")
TEST_RX("tbh [pc, r",7, (9f-(1f+4))>>1,"]",
"9: \n\t"
".short (2f-1b-4)>>1 \n\t"
".short (3f-1b-4)>>1 \n\t"
"3: mvn r0, r0 \n\t"
"2: nop \n\t")
TEST_RX("tbh [pc, r",12, ((9f-(1f+4))>>1)+1,"]",
"9: \n\t"
".short (2f-1b-4)>>1 \n\t"
".short (3f-1b-4)>>1 \n\t"
"3: mvn r0, r0 \n\t"
"2: nop \n\t")
TEST_RRX("tbh [r",1,9f, ", r",14,1,"]",
"9: \n\t"
".short (2f-1b-4)>>1 \n\t"
".short (3f-1b-4)>>1 \n\t"
"3: mvn r0, r0 \n\t"
"2: nop \n\t")
TEST_UNSUPPORTED(__inst_thumb32(0xe8d1f01f) " @ tbh [r1, pc]")
TEST_UNSUPPORTED(__inst_thumb32(0xe8d1f01d) " @ tbh [r1, sp]")
TEST_UNSUPPORTED(__inst_thumb32(0xe8ddf012) " @ tbh [sp, r2]")
TEST_UNSUPPORTED("strexb r0, r1, [r2]")
TEST_UNSUPPORTED("strexh r0, r1, [r2]")
TEST_UNSUPPORTED("strexd r0, r1, [r2]")
TEST_UNSUPPORTED("ldrexb r0, [r1]")
TEST_UNSUPPORTED("ldrexh r0, [r1]")
TEST_UNSUPPORTED("ldrexd r0, [r1]")
TEST_GROUP("Data-processing (shifted register) and (modified immediate)")
#define _DATA_PROCESSING32_DNM(op,s,val) \
TEST_RR(op s".w r0, r",1, VAL1,", r",2, val, "") \
TEST_RR(op s" r1, r",1, VAL1,", r",2, val, ", lsl #3") \
TEST_RR(op s" r2, r",3, VAL1,", r",2, val, ", lsr #4") \
TEST_RR(op s" r3, r",3, VAL1,", r",2, val, ", asr #5") \
TEST_RR(op s" r4, r",5, VAL1,", r",2, N(val),", asr #6") \
TEST_RR(op s" r5, r",5, VAL1,", r",2, val, ", ror #7") \
TEST_RR(op s" r8, r",9, VAL1,", r",10,val, ", rrx") \
TEST_R( op s" r0, r",11,VAL1,", #0x00010001") \
TEST_R( op s" r11, r",0, VAL1,", #0xf5000000") \
TEST_R( op s" r7, r",8, VAL2,", #0x000af000")
#define DATA_PROCESSING32_DNM(op,val) \
_DATA_PROCESSING32_DNM(op,"",val) \
_DATA_PROCESSING32_DNM(op,"s",val)
#define DATA_PROCESSING32_NM(op,val) \
TEST_RR(op".w r",1, VAL1,", r",2, val, "") \
TEST_RR(op" r",1, VAL1,", r",2, val, ", lsl #3") \
TEST_RR(op" r",3, VAL1,", r",2, val, ", lsr #4") \
TEST_RR(op" r",3, VAL1,", r",2, val, ", asr #5") \
TEST_RR(op" r",5, VAL1,", r",2, N(val),", asr #6") \
TEST_RR(op" r",5, VAL1,", r",2, val, ", ror #7") \
TEST_RR(op" r",9, VAL1,", r",10,val, ", rrx") \
TEST_R( op" r",11,VAL1,", #0x00010001") \
TEST_R( op" r",0, VAL1,", #0xf5000000") \
TEST_R( op" r",8, VAL2,", #0x000af000")
#define _DATA_PROCESSING32_DM(op,s,val) \
TEST_R( op s".w r0, r",14, val, "") \
TEST_R( op s" r1, r",12, val, ", lsl #3") \
TEST_R( op s" r2, r",11, val, ", lsr #4") \
TEST_R( op s" r3, r",10, val, ", asr #5") \
TEST_R( op s" r4, r",9, N(val),", asr #6") \
TEST_R( op s" r5, r",8, val, ", ror #7") \
TEST_R( op s" r8, r",7,val, ", rrx") \
TEST( op s" r0, #0x00010001") \
TEST( op s" r11, #0xf5000000") \
TEST( op s" r7, #0x000af000") \
TEST( op s" r4, #0x00005a00")
#define DATA_PROCESSING32_DM(op,val) \
_DATA_PROCESSING32_DM(op,"",val) \
_DATA_PROCESSING32_DM(op,"s",val)
DATA_PROCESSING32_DNM("and",0xf00f00ff)
DATA_PROCESSING32_NM("tst",0xf00f00ff)
DATA_PROCESSING32_DNM("bic",0xf00f00ff)
DATA_PROCESSING32_DNM("orr",0xf00f00ff)
DATA_PROCESSING32_DM("mov",VAL2)
DATA_PROCESSING32_DNM("orn",0xf00f00ff)
DATA_PROCESSING32_DM("mvn",VAL2)
DATA_PROCESSING32_DNM("eor",0xf00f00ff)
DATA_PROCESSING32_NM("teq",0xf00f00ff)
DATA_PROCESSING32_DNM("add",VAL2)
DATA_PROCESSING32_NM("cmn",VAL2)
DATA_PROCESSING32_DNM("adc",VAL2)
DATA_PROCESSING32_DNM("sbc",VAL2)
DATA_PROCESSING32_DNM("sub",VAL2)
DATA_PROCESSING32_NM("cmp",VAL2)
DATA_PROCESSING32_DNM("rsb",VAL2)
TEST_RR("pkhbt r0, r",0, HH1,", r",1, HH2,"")
TEST_RR("pkhbt r14,r",12, HH1,", r",10,HH2,", lsl #2")
TEST_RR("pkhtb r0, r",0, HH1,", r",1, HH2,"")
TEST_RR("pkhtb r14,r",12, HH1,", r",10,HH2,", asr #2")
TEST_UNSUPPORTED(__inst_thumb32(0xea170f0d) " @ tst.w r7, sp")
TEST_UNSUPPORTED(__inst_thumb32(0xea170f0f) " @ tst.w r7, pc")
TEST_UNSUPPORTED(__inst_thumb32(0xea1d0f07) " @ tst.w sp, r7")
TEST_UNSUPPORTED(__inst_thumb32(0xea1f0f07) " @ tst.w pc, r7")
TEST_UNSUPPORTED(__inst_thumb32(0xf01d1f08) " @ tst sp, #0x00080008")
TEST_UNSUPPORTED(__inst_thumb32(0xf01f1f08) " @ tst pc, #0x00080008")
TEST_UNSUPPORTED(__inst_thumb32(0xea970f0d) " @ teq.w r7, sp")
TEST_UNSUPPORTED(__inst_thumb32(0xea970f0f) " @ teq.w r7, pc")
TEST_UNSUPPORTED(__inst_thumb32(0xea9d0f07) " @ teq.w sp, r7")
TEST_UNSUPPORTED(__inst_thumb32(0xea9f0f07) " @ teq.w pc, r7")
TEST_UNSUPPORTED(__inst_thumb32(0xf09d1f08) " @ tst sp, #0x00080008")
TEST_UNSUPPORTED(__inst_thumb32(0xf09f1f08) " @ tst pc, #0x00080008")
TEST_UNSUPPORTED(__inst_thumb32(0xeb170f0d) " @ cmn.w r7, sp")
TEST_UNSUPPORTED(__inst_thumb32(0xeb170f0f) " @ cmn.w r7, pc")
TEST_P("cmn.w sp, r",7,0,"")
TEST_UNSUPPORTED(__inst_thumb32(0xeb1f0f07) " @ cmn.w pc, r7")
TEST( "cmn sp, #0x00080008")
TEST_UNSUPPORTED(__inst_thumb32(0xf11f1f08) " @ cmn pc, #0x00080008")
TEST_UNSUPPORTED(__inst_thumb32(0xebb70f0d) " @ cmp.w r7, sp")
TEST_UNSUPPORTED(__inst_thumb32(0xebb70f0f) " @ cmp.w r7, pc")
TEST_P("cmp.w sp, r",7,0,"")
TEST_UNSUPPORTED(__inst_thumb32(0xebbf0f07) " @ cmp.w pc, r7")
TEST( "cmp sp, #0x00080008")
TEST_UNSUPPORTED(__inst_thumb32(0xf1bf1f08) " @ cmp pc, #0x00080008")
TEST_UNSUPPORTED(__inst_thumb32(0xea5f070d) " @ movs.w r7, sp")
TEST_UNSUPPORTED(__inst_thumb32(0xea5f070f) " @ movs.w r7, pc")
TEST_UNSUPPORTED(__inst_thumb32(0xea5f0d07) " @ movs.w sp, r7")
TEST_UNSUPPORTED(__inst_thumb32(0xea4f0f07) " @ mov.w pc, r7")
TEST_UNSUPPORTED(__inst_thumb32(0xf04f1d08) " @ mov sp, #0x00080008")
TEST_UNSUPPORTED(__inst_thumb32(0xf04f1f08) " @ mov pc, #0x00080008")
TEST_R("add.w r0, sp, r",1, 4,"")
TEST_R("adds r0, sp, r",1, 4,", asl #3")
TEST_R("add r0, sp, r",1, 4,", asl #4")
TEST_R("add r0, sp, r",1, 16,", ror #1")
TEST_R("add.w sp, sp, r",1, 4,"")
TEST_R("add sp, sp, r",1, 4,", asl #3")
TEST_UNSUPPORTED(__inst_thumb32(0xeb0d1d01) " @ add sp, sp, r1, asl #4")
TEST_UNSUPPORTED(__inst_thumb32(0xeb0d0d71) " @ add sp, sp, r1, ror #1")
TEST( "add.w r0, sp, #24")
TEST( "add.w sp, sp, #24")
TEST_UNSUPPORTED(__inst_thumb32(0xeb0d0f01) " @ add pc, sp, r1")
TEST_UNSUPPORTED(__inst_thumb32(0xeb0d000f) " @ add r0, sp, pc")
TEST_UNSUPPORTED(__inst_thumb32(0xeb0d000d) " @ add r0, sp, sp")
TEST_UNSUPPORTED(__inst_thumb32(0xeb0d0d0f) " @ add sp, sp, pc")
TEST_UNSUPPORTED(__inst_thumb32(0xeb0d0d0d) " @ add sp, sp, sp")
TEST_R("sub.w r0, sp, r",1, 4,"")
TEST_R("subs r0, sp, r",1, 4,", asl #3")
TEST_R("sub r0, sp, r",1, 4,", asl #4")
TEST_R("sub r0, sp, r",1, 16,", ror #1")
TEST_R("sub.w sp, sp, r",1, 4,"")
TEST_R("sub sp, sp, r",1, 4,", asl #3")
TEST_UNSUPPORTED(__inst_thumb32(0xebad1d01) " @ sub sp, sp, r1, asl #4")
TEST_UNSUPPORTED(__inst_thumb32(0xebad0d71) " @ sub sp, sp, r1, ror #1")
TEST_UNSUPPORTED(__inst_thumb32(0xebad0f01) " @ sub pc, sp, r1")
TEST( "sub.w r0, sp, #24")
TEST( "sub.w sp, sp, #24")
TEST_UNSUPPORTED(__inst_thumb32(0xea02010f) " @ and r1, r2, pc")
TEST_UNSUPPORTED(__inst_thumb32(0xea0f0103) " @ and r1, pc, r3")
TEST_UNSUPPORTED(__inst_thumb32(0xea020f03) " @ and pc, r2, r3")
TEST_UNSUPPORTED(__inst_thumb32(0xea02010d) " @ and r1, r2, sp")
TEST_UNSUPPORTED(__inst_thumb32(0xea0d0103) " @ and r1, sp, r3")
TEST_UNSUPPORTED(__inst_thumb32(0xea020d03) " @ and sp, r2, r3")
TEST_UNSUPPORTED(__inst_thumb32(0xf00d1108) " @ and r1, sp, #0x00080008")
TEST_UNSUPPORTED(__inst_thumb32(0xf00f1108) " @ and r1, pc, #0x00080008")
TEST_UNSUPPORTED(__inst_thumb32(0xf0021d08) " @ and sp, r8, #0x00080008")
TEST_UNSUPPORTED(__inst_thumb32(0xf0021f08) " @ and pc, r8, #0x00080008")
TEST_UNSUPPORTED(__inst_thumb32(0xeb02010f) " @ add r1, r2, pc")
TEST_UNSUPPORTED(__inst_thumb32(0xeb0f0103) " @ add r1, pc, r3")
TEST_UNSUPPORTED(__inst_thumb32(0xeb020f03) " @ add pc, r2, r3")
TEST_UNSUPPORTED(__inst_thumb32(0xeb02010d) " @ add r1, r2, sp")
TEST_SUPPORTED( __inst_thumb32(0xeb0d0103) " @ add r1, sp, r3")
TEST_UNSUPPORTED(__inst_thumb32(0xeb020d03) " @ add sp, r2, r3")
TEST_SUPPORTED( __inst_thumb32(0xf10d1108) " @ add r1, sp, #0x00080008")
TEST_UNSUPPORTED(__inst_thumb32(0xf10d1f08) " @ add pc, sp, #0x00080008")
TEST_UNSUPPORTED(__inst_thumb32(0xf10f1108) " @ add r1, pc, #0x00080008")
TEST_UNSUPPORTED(__inst_thumb32(0xf1021d08) " @ add sp, r8, #0x00080008")
TEST_UNSUPPORTED(__inst_thumb32(0xf1021f08) " @ add pc, r8, #0x00080008")
TEST_UNSUPPORTED(__inst_thumb32(0xeaa00000) "")
TEST_UNSUPPORTED(__inst_thumb32(0xeaf00000) "")
TEST_UNSUPPORTED(__inst_thumb32(0xeb200000) "")
TEST_UNSUPPORTED(__inst_thumb32(0xeb800000) "")
TEST_UNSUPPORTED(__inst_thumb32(0xebe00000) "")
TEST_UNSUPPORTED(__inst_thumb32(0xf0a00000) "")
TEST_UNSUPPORTED(__inst_thumb32(0xf0c00000) "")
TEST_UNSUPPORTED(__inst_thumb32(0xf0f00000) "")
TEST_UNSUPPORTED(__inst_thumb32(0xf1200000) "")
TEST_UNSUPPORTED(__inst_thumb32(0xf1800000) "")
TEST_UNSUPPORTED(__inst_thumb32(0xf1e00000) "")
TEST_GROUP("Coprocessor instructions")
TEST_UNSUPPORTED(__inst_thumb32(0xec000000) "")
TEST_UNSUPPORTED(__inst_thumb32(0xeff00000) "")
TEST_UNSUPPORTED(__inst_thumb32(0xfc000000) "")
TEST_UNSUPPORTED(__inst_thumb32(0xfff00000) "")
TEST_GROUP("Data-processing (plain binary immediate)")
TEST_R("addw r0, r",1, VAL1,", #0x123")
TEST( "addw r14, sp, #0xf5a")
TEST( "addw sp, sp, #0x20")
TEST( "addw r7, pc, #0x888")
TEST_UNSUPPORTED(__inst_thumb32(0xf20f1f20) " @ addw pc, pc, #0x120")
TEST_UNSUPPORTED(__inst_thumb32(0xf20d1f20) " @ addw pc, sp, #0x120")
TEST_UNSUPPORTED(__inst_thumb32(0xf20f1d20) " @ addw sp, pc, #0x120")
TEST_UNSUPPORTED(__inst_thumb32(0xf2001d20) " @ addw sp, r0, #0x120")
TEST_R("subw r0, r",1, VAL1,", #0x123")
TEST( "subw r14, sp, #0xf5a")
TEST( "subw sp, sp, #0x20")
TEST( "subw r7, pc, #0x888")
TEST_UNSUPPORTED(__inst_thumb32(0xf2af1f20) " @ subw pc, pc, #0x120")
TEST_UNSUPPORTED(__inst_thumb32(0xf2ad1f20) " @ subw pc, sp, #0x120")
TEST_UNSUPPORTED(__inst_thumb32(0xf2af1d20) " @ subw sp, pc, #0x120")
TEST_UNSUPPORTED(__inst_thumb32(0xf2a01d20) " @ subw sp, r0, #0x120")
TEST("movw r0, #0")
TEST("movw r0, #0xffff")
TEST("movw lr, #0xffff")
TEST_UNSUPPORTED(__inst_thumb32(0xf2400d00) " @ movw sp, #0")
TEST_UNSUPPORTED(__inst_thumb32(0xf2400f00) " @ movw pc, #0")
TEST_R("movt r",0, VAL1,", #0")
TEST_R("movt r",0, VAL2,", #0xffff")
TEST_R("movt r",14,VAL1,", #0xffff")
TEST_UNSUPPORTED(__inst_thumb32(0xf2c00d00) " @ movt sp, #0")
TEST_UNSUPPORTED(__inst_thumb32(0xf2c00f00) " @ movt pc, #0")
TEST_R( "ssat r0, #24, r",0, VAL1,"")
TEST_R( "ssat r14, #24, r",12, VAL2,"")
TEST_R( "ssat r0, #24, r",0, VAL1,", lsl #8")
TEST_R( "ssat r14, #24, r",12, VAL2,", asr #8")
TEST_UNSUPPORTED(__inst_thumb32(0xf30c0d17) " @ ssat sp, #24, r12")
TEST_UNSUPPORTED(__inst_thumb32(0xf30c0f17) " @ ssat pc, #24, r12")
TEST_UNSUPPORTED(__inst_thumb32(0xf30d0c17) " @ ssat r12, #24, sp")
TEST_UNSUPPORTED(__inst_thumb32(0xf30f0c17) " @ ssat r12, #24, pc")
TEST_R( "usat r0, #24, r",0, VAL1,"")
TEST_R( "usat r14, #24, r",12, VAL2,"")
TEST_R( "usat r0, #24, r",0, VAL1,", lsl #8")
TEST_R( "usat r14, #24, r",12, VAL2,", asr #8")
TEST_UNSUPPORTED(__inst_thumb32(0xf38c0d17) " @ usat sp, #24, r12")
TEST_UNSUPPORTED(__inst_thumb32(0xf38c0f17) " @ usat pc, #24, r12")
TEST_UNSUPPORTED(__inst_thumb32(0xf38d0c17) " @ usat r12, #24, sp")
TEST_UNSUPPORTED(__inst_thumb32(0xf38f0c17) " @ usat r12, #24, pc")
TEST_R( "ssat16 r0, #12, r",0, HH1,"")
TEST_R( "ssat16 r14, #12, r",12, HH2,"")
TEST_UNSUPPORTED(__inst_thumb32(0xf32c0d0b) " @ ssat16 sp, #12, r12")
TEST_UNSUPPORTED(__inst_thumb32(0xf32c0f0b) " @ ssat16 pc, #12, r12")
TEST_UNSUPPORTED(__inst_thumb32(0xf32d0c0b) " @ ssat16 r12, #12, sp")
TEST_UNSUPPORTED(__inst_thumb32(0xf32f0c0b) " @ ssat16 r12, #12, pc")
TEST_R( "usat16 r0, #12, r",0, HH1,"")
TEST_R( "usat16 r14, #12, r",12, HH2,"")
TEST_UNSUPPORTED(__inst_thumb32(0xf3ac0d0b) " @ usat16 sp, #12, r12")
TEST_UNSUPPORTED(__inst_thumb32(0xf3ac0f0b) " @ usat16 pc, #12, r12")
TEST_UNSUPPORTED(__inst_thumb32(0xf3ad0c0b) " @ usat16 r12, #12, sp")
TEST_UNSUPPORTED(__inst_thumb32(0xf3af0c0b) " @ usat16 r12, #12, pc")
TEST_R( "sbfx r0, r",0 , VAL1,", #0, #31")
TEST_R( "sbfx r14, r",12, VAL2,", #8, #16")
TEST_R( "sbfx r4, r",10, VAL1,", #16, #15")
TEST_UNSUPPORTED(__inst_thumb32(0xf34c2d0f) " @ sbfx sp, r12, #8, #16")
TEST_UNSUPPORTED(__inst_thumb32(0xf34c2f0f) " @ sbfx pc, r12, #8, #16")
TEST_UNSUPPORTED(__inst_thumb32(0xf34d2c0f) " @ sbfx r12, sp, #8, #16")
TEST_UNSUPPORTED(__inst_thumb32(0xf34f2c0f) " @ sbfx r12, pc, #8, #16")
TEST_R( "ubfx r0, r",0 , VAL1,", #0, #31")
TEST_R( "ubfx r14, r",12, VAL2,", #8, #16")
TEST_R( "ubfx r4, r",10, VAL1,", #16, #15")
TEST_UNSUPPORTED(__inst_thumb32(0xf3cc2d0f) " @ ubfx sp, r12, #8, #16")
TEST_UNSUPPORTED(__inst_thumb32(0xf3cc2f0f) " @ ubfx pc, r12, #8, #16")
TEST_UNSUPPORTED(__inst_thumb32(0xf3cd2c0f) " @ ubfx r12, sp, #8, #16")
TEST_UNSUPPORTED(__inst_thumb32(0xf3cf2c0f) " @ ubfx r12, pc, #8, #16")
TEST_R( "bfc r",0, VAL1,", #4, #20")
TEST_R( "bfc r",14,VAL2,", #4, #20")
TEST_R( "bfc r",7, VAL1,", #0, #31")
TEST_R( "bfc r",8, VAL2,", #0, #31")
TEST_UNSUPPORTED(__inst_thumb32(0xf36f0d1e) " @ bfc sp, #0, #31")
TEST_UNSUPPORTED(__inst_thumb32(0xf36f0f1e) " @ bfc pc, #0, #31")
TEST_RR( "bfi r",0, VAL1,", r",0 , VAL2,", #0, #31")
TEST_RR( "bfi r",12,VAL1,", r",14 , VAL2,", #4, #20")
TEST_UNSUPPORTED(__inst_thumb32(0xf36e1d17) " @ bfi sp, r14, #4, #20")
TEST_UNSUPPORTED(__inst_thumb32(0xf36e1f17) " @ bfi pc, r14, #4, #20")
TEST_UNSUPPORTED(__inst_thumb32(0xf36d1e17) " @ bfi r14, sp, #4, #20")
TEST_GROUP("Branches and miscellaneous control")
CONDITION_INSTRUCTIONS(22,
TEST_BF("beq.w 2f")
TEST_BB("bne.w 2b")
TEST_BF("bgt.w 2f")
TEST_BB("blt.w 2b")
TEST_BF_X("bpl.w 2f", SPACE_0x1000)
)
TEST_UNSUPPORTED("msr cpsr, r0")
TEST_UNSUPPORTED("msr cpsr_f, r1")
TEST_UNSUPPORTED("msr spsr, r2")
TEST_UNSUPPORTED("cpsie.w i")
TEST_UNSUPPORTED("cpsid.w i")
TEST_UNSUPPORTED("cps 0x13")
TEST_SUPPORTED("yield.w")
TEST("sev.w")
TEST("nop.w")
TEST("wfi.w")
TEST_SUPPORTED("wfe.w")
TEST_UNSUPPORTED("dbg.w #0")
TEST_UNSUPPORTED("clrex")
TEST_UNSUPPORTED("dsb")
TEST_UNSUPPORTED("dmb")
TEST_UNSUPPORTED("isb")
TEST_UNSUPPORTED("bxj r0")
TEST_UNSUPPORTED("subs pc, lr, #4")
TEST_RMASKED("mrs r",0,~PSR_IGNORE_BITS,", cpsr")
TEST_RMASKED("mrs r",14,~PSR_IGNORE_BITS,", cpsr")
TEST_UNSUPPORTED(__inst_thumb32(0xf3ef8d00) " @ mrs sp, spsr")
TEST_UNSUPPORTED(__inst_thumb32(0xf3ef8f00) " @ mrs pc, spsr")
TEST_UNSUPPORTED("mrs r0, spsr")
TEST_UNSUPPORTED("mrs lr, spsr")
TEST_UNSUPPORTED(__inst_thumb32(0xf7f08000) " @ smc #0")
TEST_UNSUPPORTED(__inst_thumb32(0xf7f0a000) " @ undefeined")
TEST_BF( "b.w 2f")
TEST_BB( "b.w 2b")
TEST_BF_X("b.w 2f", SPACE_0x1000)
TEST_BF( "bl.w 2f")
TEST_BB( "bl.w 2b")
TEST_BB_X("bl.w 2b", SPACE_0x1000)
TEST_X( "blx __dummy_arm_subroutine",
".arm \n\t"
".align \n\t"
".type __dummy_arm_subroutine, %%function \n\t"
"__dummy_arm_subroutine: \n\t"
"mov r0, pc \n\t"
"bx lr \n\t"
".thumb \n\t"
)
TEST( "blx __dummy_arm_subroutine")
TEST_GROUP("Store single data item")
#define SINGLE_STORE(size) \
TEST_RP( "str"size" r",0, VAL1,", [r",11,-1024,", #1024]") \
TEST_RP( "str"size" r",14,VAL2,", [r",1, -1024,", #1080]") \
TEST_RP( "str"size" r",0, VAL1,", [r",11,256, ", #-120]") \
TEST_RP( "str"size" r",14,VAL2,", [r",1, 256, ", #-128]") \
TEST_RP( "str"size" r",0, VAL1,", [r",11,24, "], #120") \
TEST_RP( "str"size" r",14,VAL2,", [r",1, 24, "], #128") \
TEST_RP( "str"size" r",0, VAL1,", [r",11,24, "], #-120") \
TEST_RP( "str"size" r",14,VAL2,", [r",1, 24, "], #-128") \
TEST_RP( "str"size" r",0, VAL1,", [r",11,24, ", #120]!") \
TEST_RP( "str"size" r",14,VAL2,", [r",1, 24, ", #128]!") \
TEST_RP( "str"size" r",0, VAL1,", [r",11,256, ", #-120]!") \
TEST_RP( "str"size" r",14,VAL2,", [r",1, 256, ", #-128]!") \
TEST_RPR("str"size".w r",0, VAL1,", [r",1, 0,", r",2, 4,"]") \
TEST_RPR("str"size" r",14,VAL2,", [r",10,0,", r",11,4,", lsl #1]") \
TEST_UNSUPPORTED("str"size" r0, [r13, r1]") \
TEST_R( "str"size".w r",7, VAL1,", [sp, #24]") \
TEST_RP( "str"size".w r",0, VAL2,", [r",0,0, "]") \
TEST_RP( "str"size" r",6, VAL1,", [r",13, TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"]!") \
TEST_UNSUPPORTED("str"size" r6, [r13, #-"__stringify(MAX_STACK_SIZE)"-8]!") \
TEST_RP( "str"size" r",4, VAL2,", [r",12, TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"-8]!") \
TEST_UNSUPPORTED("str"size"t r0, [r1, #4]")
SINGLE_STORE("b")
SINGLE_STORE("h")
SINGLE_STORE("")
TEST_UNSUPPORTED(__inst_thumb32(0xf801000d) " @ strb r0, [r1, r13]")
TEST_UNSUPPORTED(__inst_thumb32(0xf821000d) " @ strh r0, [r1, r13]")
TEST_UNSUPPORTED(__inst_thumb32(0xf841000d) " @ str r0, [r1, r13]")
TEST("str sp, [sp]")
TEST_UNSUPPORTED(__inst_thumb32(0xf8cfe000) " @ str r14, [pc]")
TEST_UNSUPPORTED(__inst_thumb32(0xf8cef000) " @ str pc, [r14]")
TEST_GROUP("Advanced SIMD element or structure load/store instructions")
TEST_UNSUPPORTED(__inst_thumb32(0xf9000000) "")
TEST_UNSUPPORTED(__inst_thumb32(0xf92fffff) "")
TEST_UNSUPPORTED(__inst_thumb32(0xf9800000) "")
TEST_UNSUPPORTED(__inst_thumb32(0xf9efffff) "")
TEST_GROUP("Load single data item and memory hints")
#define SINGLE_LOAD(size) \
TEST_P( "ldr"size" r0, [r",11,-1024, ", #1024]") \
TEST_P( "ldr"size" r14, [r",1, -1024,", #1080]") \
TEST_P( "ldr"size" r0, [r",11,256, ", #-120]") \
TEST_P( "ldr"size" r14, [r",1, 256, ", #-128]") \
TEST_P( "ldr"size" r0, [r",11,24, "], #120") \
TEST_P( "ldr"size" r14, [r",1, 24, "], #128") \
TEST_P( "ldr"size" r0, [r",11,24, "], #-120") \
TEST_P( "ldr"size" r14, [r",1,24, "], #-128") \
TEST_P( "ldr"size" r0, [r",11,24, ", #120]!") \
TEST_P( "ldr"size" r14, [r",1, 24, ", #128]!") \
TEST_P( "ldr"size" r0, [r",11,256, ", #-120]!") \
TEST_P( "ldr"size" r14, [r",1, 256, ", #-128]!") \
TEST_PR("ldr"size".w r0, [r",1, 0,", r",2, 4,"]") \
TEST_PR("ldr"size" r14, [r",10,0,", r",11,4,", lsl #1]") \
TEST_X( "ldr"size".w r0, 3f", \
".align 3 \n\t" \
"3: .word "__stringify(VAL1)) \
TEST_X( "ldr"size".w r14, 3f", \
".align 3 \n\t" \
"3: .word "__stringify(VAL2)) \
TEST( "ldr"size".w r7, 3b") \
TEST( "ldr"size".w r7, [sp, #24]") \
TEST_P( "ldr"size".w r0, [r",0,0, "]") \
TEST_UNSUPPORTED("ldr"size"t r0, [r1, #4]")
SINGLE_LOAD("b")
SINGLE_LOAD("sb")
SINGLE_LOAD("h")
SINGLE_LOAD("sh")
SINGLE_LOAD("")
TEST_BF_P("ldr pc, [r",14, 15*4,"]")
TEST_P( "ldr sp, [r",14, 13*4,"]")
TEST_BF_R("ldr pc, [sp, r",14, 15*4,"]")
TEST_R( "ldr sp, [sp, r",14, 13*4,"]")
TEST_THUMB_TO_ARM_INTERWORK_P("ldr pc, [r",0,0,", #15*4]")
TEST_SUPPORTED("ldr sp, 99f")
TEST_SUPPORTED("ldr pc, 99f")
TEST_UNSUPPORTED(__inst_thumb32(0xf854700d) " @ ldr r7, [r4, sp]")
TEST_UNSUPPORTED(__inst_thumb32(0xf854700f) " @ ldr r7, [r4, pc]")
TEST_UNSUPPORTED(__inst_thumb32(0xf814700d) " @ ldrb r7, [r4, sp]")
TEST_UNSUPPORTED(__inst_thumb32(0xf814700f) " @ ldrb r7, [r4, pc]")
TEST_UNSUPPORTED(__inst_thumb32(0xf89fd004) " @ ldrb sp, 99f")
TEST_UNSUPPORTED(__inst_thumb32(0xf814d008) " @ ldrb sp, [r4, r8]")
TEST_UNSUPPORTED(__inst_thumb32(0xf894d000) " @ ldrb sp, [r4]")
TEST_UNSUPPORTED(__inst_thumb32(0xf8600000) "") /* Unallocated space */
TEST_UNSUPPORTED(__inst_thumb32(0xf9ffffff) "") /* Unallocated space */
TEST_UNSUPPORTED(__inst_thumb32(0xf9500000) "") /* Unallocated space */
TEST_UNSUPPORTED(__inst_thumb32(0xf95fffff) "") /* Unallocated space */
TEST_UNSUPPORTED(__inst_thumb32(0xf8000800) "") /* Unallocated space */
TEST_UNSUPPORTED(__inst_thumb32(0xf97ffaff) "") /* Unallocated space */
TEST( "pli [pc, #4]")
TEST( "pli [pc, #-4]")
TEST( "pld [pc, #4]")
TEST( "pld [pc, #-4]")
TEST_P( "pld [r",0,-1024,", #1024]")
TEST( __inst_thumb32(0xf8b0f400) " @ pldw [r0, #1024]")
TEST_P( "pli [r",4, 0b,", #1024]")
TEST_P( "pld [r",7, 120,", #-120]")
TEST( __inst_thumb32(0xf837fc78) " @ pldw [r7, #-120]")
TEST_P( "pli [r",11,120,", #-120]")
TEST( "pld [sp, #0]")
TEST_PR("pld [r",7, 24, ", r",0, 16,"]")
TEST_PR("pld [r",8, 24, ", r",12,16,", lsl #3]")
TEST_SUPPORTED(__inst_thumb32(0xf837f000) " @ pldw [r7, r0]")
TEST_SUPPORTED(__inst_thumb32(0xf838f03c) " @ pldw [r8, r12, lsl #3]");
TEST_RR("pli [r",12,0b,", r",0, 16,"]")
TEST_RR("pli [r",0, 0b,", r",12,16,", lsl #3]")
TEST_R( "pld [sp, r",1, 16,"]")
TEST_UNSUPPORTED(__inst_thumb32(0xf817f00d) " @pld [r7, sp]")
TEST_UNSUPPORTED(__inst_thumb32(0xf817f00f) " @pld [r7, pc]")
TEST_GROUP("Data-processing (register)")
#define SHIFTS32(op) \
TEST_RR(op" r0, r",1, VAL1,", r",2, 3, "") \
TEST_RR(op" r14, r",12,VAL2,", r",11,10,"")
SHIFTS32("lsl")
SHIFTS32("lsls")
SHIFTS32("lsr")
SHIFTS32("lsrs")
SHIFTS32("asr")
SHIFTS32("asrs")
SHIFTS32("ror")
SHIFTS32("rors")
TEST_UNSUPPORTED(__inst_thumb32(0xfa01ff02) " @ lsl pc, r1, r2")
TEST_UNSUPPORTED(__inst_thumb32(0xfa01fd02) " @ lsl sp, r1, r2")
TEST_UNSUPPORTED(__inst_thumb32(0xfa0ff002) " @ lsl r0, pc, r2")
TEST_UNSUPPORTED(__inst_thumb32(0xfa0df002) " @ lsl r0, sp, r2")
TEST_UNSUPPORTED(__inst_thumb32(0xfa01f00f) " @ lsl r0, r1, pc")
TEST_UNSUPPORTED(__inst_thumb32(0xfa01f00d) " @ lsl r0, r1, sp")
TEST_RR( "sxtah r0, r",0, HH1,", r",1, HH2,"")
TEST_RR( "sxtah r14,r",12, HH2,", r",10,HH1,", ror #8")
TEST_R( "sxth r8, r",7, HH1,"")
TEST_UNSUPPORTED(__inst_thumb32(0xfa0fff87) " @ sxth pc, r7");
TEST_UNSUPPORTED(__inst_thumb32(0xfa0ffd87) " @ sxth sp, r7");
TEST_UNSUPPORTED(__inst_thumb32(0xfa0ff88f) " @ sxth r8, pc");
TEST_UNSUPPORTED(__inst_thumb32(0xfa0ff88d) " @ sxth r8, sp");
TEST_RR( "uxtah r0, r",0, HH1,", r",1, HH2,"")
TEST_RR( "uxtah r14,r",12, HH2,", r",10,HH1,", ror #8")
TEST_R( "uxth r8, r",7, HH1,"")
TEST_RR( "sxtab16 r0, r",0, HH1,", r",1, HH2,"")
TEST_RR( "sxtab16 r14,r",12, HH2,", r",10,HH1,", ror #8")
TEST_R( "sxtb16 r8, r",7, HH1,"")
TEST_RR( "uxtab16 r0, r",0, HH1,", r",1, HH2,"")
TEST_RR( "uxtab16 r14,r",12, HH2,", r",10,HH1,", ror #8")
TEST_R( "uxtb16 r8, r",7, HH1,"")
TEST_RR( "sxtab r0, r",0, HH1,", r",1, HH2,"")
TEST_RR( "sxtab r14,r",12, HH2,", r",10,HH1,", ror #8")
TEST_R( "sxtb r8, r",7, HH1,"")
TEST_RR( "uxtab r0, r",0, HH1,", r",1, HH2,"")
TEST_RR( "uxtab r14,r",12, HH2,", r",10,HH1,", ror #8")
TEST_R( "uxtb r8, r",7, HH1,"")
TEST_UNSUPPORTED(__inst_thumb32(0xfa6000f0) "")
TEST_UNSUPPORTED(__inst_thumb32(0xfa7fffff) "")
#define PARALLEL_ADD_SUB(op) \
TEST_RR( op"add16 r0, r",0, HH1,", r",1, HH2,"") \
TEST_RR( op"add16 r14, r",12,HH2,", r",10,HH1,"") \
TEST_RR( op"asx r0, r",0, HH1,", r",1, HH2,"") \
TEST_RR( op"asx r14, r",12,HH2,", r",10,HH1,"") \
TEST_RR( op"sax r0, r",0, HH1,", r",1, HH2,"") \
TEST_RR( op"sax r14, r",12,HH2,", r",10,HH1,"") \
TEST_RR( op"sub16 r0, r",0, HH1,", r",1, HH2,"") \
TEST_RR( op"sub16 r14, r",12,HH2,", r",10,HH1,"") \
TEST_RR( op"add8 r0, r",0, HH1,", r",1, HH2,"") \
TEST_RR( op"add8 r14, r",12,HH2,", r",10,HH1,"") \
TEST_RR( op"sub8 r0, r",0, HH1,", r",1, HH2,"") \
TEST_RR( op"sub8 r14, r",12,HH2,", r",10,HH1,"")
TEST_GROUP("Parallel addition and subtraction, signed")
PARALLEL_ADD_SUB("s")
PARALLEL_ADD_SUB("q")
PARALLEL_ADD_SUB("sh")
TEST_GROUP("Parallel addition and subtraction, unsigned")
PARALLEL_ADD_SUB("u")
PARALLEL_ADD_SUB("uq")
PARALLEL_ADD_SUB("uh")
TEST_GROUP("Miscellaneous operations")
TEST_RR("qadd r0, r",1, VAL1,", r",2, VAL2,"")
TEST_RR("qadd lr, r",9, VAL2,", r",8, VAL1,"")
TEST_RR("qsub r0, r",1, VAL1,", r",2, VAL2,"")
TEST_RR("qsub lr, r",9, VAL2,", r",8, VAL1,"")
TEST_RR("qdadd r0, r",1, VAL1,", r",2, VAL2,"")
TEST_RR("qdadd lr, r",9, VAL2,", r",8, VAL1,"")
TEST_RR("qdsub r0, r",1, VAL1,", r",2, VAL2,"")
TEST_RR("qdsub lr, r",9, VAL2,", r",8, VAL1,"")
TEST_R("rev.w r0, r",0, VAL1,"")
TEST_R("rev r14, r",12, VAL2,"")
TEST_R("rev16.w r0, r",0, VAL1,"")
TEST_R("rev16 r14, r",12, VAL2,"")
TEST_R("rbit r0, r",0, VAL1,"")
TEST_R("rbit r14, r",12, VAL2,"")
TEST_R("revsh.w r0, r",0, VAL1,"")
TEST_R("revsh r14, r",12, VAL2,"")
TEST_UNSUPPORTED(__inst_thumb32(0xfa9cff8c) " @ rev pc, r12");
TEST_UNSUPPORTED(__inst_thumb32(0xfa9cfd8c) " @ rev sp, r12");
TEST_UNSUPPORTED(__inst_thumb32(0xfa9ffe8f) " @ rev r14, pc");
TEST_UNSUPPORTED(__inst_thumb32(0xfa9dfe8d) " @ rev r14, sp");
TEST_RR("sel r0, r",0, VAL1,", r",1, VAL2,"")
TEST_RR("sel r14, r",12,VAL1,", r",10, VAL2,"")
TEST_R("clz r0, r",0, 0x0,"")
TEST_R("clz r7, r",14,0x1,"")
TEST_R("clz lr, r",7, 0xffffffff,"")
TEST_UNSUPPORTED(__inst_thumb32(0xfa80f030) "") /* Unallocated space */
TEST_UNSUPPORTED(__inst_thumb32(0xfaffff7f) "") /* Unallocated space */
TEST_UNSUPPORTED(__inst_thumb32(0xfab0f000) "") /* Unallocated space */
TEST_UNSUPPORTED(__inst_thumb32(0xfaffff7f) "") /* Unallocated space */
TEST_GROUP("Multiply, multiply accumulate, and absolute difference operations")
TEST_RR( "mul r0, r",1, VAL1,", r",2, VAL2,"")
TEST_RR( "mul r7, r",8, VAL2,", r",9, VAL2,"")
TEST_UNSUPPORTED(__inst_thumb32(0xfb08ff09) " @ mul pc, r8, r9")
TEST_UNSUPPORTED(__inst_thumb32(0xfb08fd09) " @ mul sp, r8, r9")
TEST_UNSUPPORTED(__inst_thumb32(0xfb0ff709) " @ mul r7, pc, r9")
TEST_UNSUPPORTED(__inst_thumb32(0xfb0df709) " @ mul r7, sp, r9")
TEST_UNSUPPORTED(__inst_thumb32(0xfb08f70f) " @ mul r7, r8, pc")
TEST_UNSUPPORTED(__inst_thumb32(0xfb08f70d) " @ mul r7, r8, sp")
TEST_RRR( "mla r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
TEST_RRR( "mla r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
TEST_UNSUPPORTED(__inst_thumb32(0xfb08af09) " @ mla pc, r8, r9, r10");
TEST_UNSUPPORTED(__inst_thumb32(0xfb08ad09) " @ mla sp, r8, r9, r10");
TEST_UNSUPPORTED(__inst_thumb32(0xfb0fa709) " @ mla r7, pc, r9, r10");
TEST_UNSUPPORTED(__inst_thumb32(0xfb0da709) " @ mla r7, sp, r9, r10");
TEST_UNSUPPORTED(__inst_thumb32(0xfb08a70f) " @ mla r7, r8, pc, r10");
TEST_UNSUPPORTED(__inst_thumb32(0xfb08a70d) " @ mla r7, r8, sp, r10");
TEST_UNSUPPORTED(__inst_thumb32(0xfb08d709) " @ mla r7, r8, r9, sp");
TEST_RRR( "mls r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
TEST_RRR( "mls r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
TEST_RRR( "smlabb r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
TEST_RRR( "smlabb r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
TEST_RRR( "smlatb r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
TEST_RRR( "smlatb r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
TEST_RRR( "smlabt r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
TEST_RRR( "smlabt r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
TEST_RRR( "smlatt r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
TEST_RRR( "smlatt r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
TEST_RR( "smulbb r0, r",1, VAL1,", r",2, VAL2,"")
TEST_RR( "smulbb r7, r",8, VAL3,", r",9, VAL1,"")
TEST_RR( "smultb r0, r",1, VAL1,", r",2, VAL2,"")
TEST_RR( "smultb r7, r",8, VAL3,", r",9, VAL1,"")
TEST_RR( "smulbt r0, r",1, VAL1,", r",2, VAL2,"")
TEST_RR( "smulbt r7, r",8, VAL3,", r",9, VAL1,"")
TEST_RR( "smultt r0, r",1, VAL1,", r",2, VAL2,"")
TEST_RR( "smultt r7, r",8, VAL3,", r",9, VAL1,"")
TEST_RRR( "smlad r0, r",0, HH1,", r",1, HH2,", r",2, VAL1,"")
TEST_RRR( "smlad r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"")
TEST_RRR( "smladx r0, r",0, HH1,", r",1, HH2,", r",2, VAL1,"")
TEST_RRR( "smladx r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"")
TEST_RR( "smuad r0, r",0, HH1,", r",1, HH2,"")
TEST_RR( "smuad r14, r",12,HH2,", r",10,HH1,"")
TEST_RR( "smuadx r0, r",0, HH1,", r",1, HH2,"")
TEST_RR( "smuadx r14, r",12,HH2,", r",10,HH1,"")
TEST_RRR( "smlawb r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
TEST_RRR( "smlawb r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
TEST_RRR( "smlawt r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
TEST_RRR( "smlawt r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
TEST_RR( "smulwb r0, r",1, VAL1,", r",2, VAL2,"")
TEST_RR( "smulwb r7, r",8, VAL3,", r",9, VAL1,"")
TEST_RR( "smulwt r0, r",1, VAL1,", r",2, VAL2,"")
TEST_RR( "smulwt r7, r",8, VAL3,", r",9, VAL1,"")
TEST_RRR( "smlsd r0, r",0, HH1,", r",1, HH2,", r",2, VAL1,"")
TEST_RRR( "smlsd r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"")
TEST_RRR( "smlsdx r0, r",0, HH1,", r",1, HH2,", r",2, VAL1,"")
TEST_RRR( "smlsdx r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"")
TEST_RR( "smusd r0, r",0, HH1,", r",1, HH2,"")
TEST_RR( "smusd r14, r",12,HH2,", r",10,HH1,"")
TEST_RR( "smusdx r0, r",0, HH1,", r",1, HH2,"")
TEST_RR( "smusdx r14, r",12,HH2,", r",10,HH1,"")
TEST_RRR( "smmla r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL1,"")
TEST_RRR( "smmla r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"")
TEST_RRR( "smmlar r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL1,"")
TEST_RRR( "smmlar r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"")
TEST_RR( "smmul r0, r",0, VAL1,", r",1, VAL2,"")
TEST_RR( "smmul r14, r",12,VAL2,", r",10,VAL1,"")
TEST_RR( "smmulr r0, r",0, VAL1,", r",1, VAL2,"")
TEST_RR( "smmulr r14, r",12,VAL2,", r",10,VAL1,"")
TEST_RRR( "smmls r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL1,"")
TEST_RRR( "smmls r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"")
TEST_RRR( "smmlsr r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL1,"")
TEST_RRR( "smmlsr r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"")
TEST_RRR( "usada8 r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL3,"")
TEST_RRR( "usada8 r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL3,"")
TEST_RR( "usad8 r0, r",0, VAL1,", r",1, VAL2,"")
TEST_RR( "usad8 r14, r",12,VAL2,", r",10,VAL1,"")
TEST_UNSUPPORTED(__inst_thumb32(0xfb00f010) "") /* Unallocated space */
TEST_UNSUPPORTED(__inst_thumb32(0xfb0fff1f) "") /* Unallocated space */
TEST_UNSUPPORTED(__inst_thumb32(0xfb70f010) "") /* Unallocated space */
TEST_UNSUPPORTED(__inst_thumb32(0xfb7fff1f) "") /* Unallocated space */
TEST_UNSUPPORTED(__inst_thumb32(0xfb700010) "") /* Unallocated space */
TEST_UNSUPPORTED(__inst_thumb32(0xfb7fff1f) "") /* Unallocated space */
TEST_GROUP("Long multiply, long multiply accumulate, and divide")
TEST_RR( "smull r0, r1, r",2, VAL1,", r",3, VAL2,"")
TEST_RR( "smull r7, r8, r",9, VAL2,", r",10, VAL1,"")
TEST_UNSUPPORTED(__inst_thumb32(0xfb89f80a) " @ smull pc, r8, r9, r10");
TEST_UNSUPPORTED(__inst_thumb32(0xfb89d80a) " @ smull sp, r8, r9, r10");
TEST_UNSUPPORTED(__inst_thumb32(0xfb897f0a) " @ smull r7, pc, r9, r10");
TEST_UNSUPPORTED(__inst_thumb32(0xfb897d0a) " @ smull r7, sp, r9, r10");
TEST_UNSUPPORTED(__inst_thumb32(0xfb8f780a) " @ smull r7, r8, pc, r10");
TEST_UNSUPPORTED(__inst_thumb32(0xfb8d780a) " @ smull r7, r8, sp, r10");
TEST_UNSUPPORTED(__inst_thumb32(0xfb89780f) " @ smull r7, r8, r9, pc");
TEST_UNSUPPORTED(__inst_thumb32(0xfb89780d) " @ smull r7, r8, r9, sp");
TEST_RR( "umull r0, r1, r",2, VAL1,", r",3, VAL2,"")
TEST_RR( "umull r7, r8, r",9, VAL2,", r",10, VAL1,"")
TEST_RRRR( "smlal r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
TEST_RRRR( "smlal r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
TEST_RRRR( "smlalbb r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
TEST_RRRR( "smlalbb r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
TEST_RRRR( "smlalbt r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
TEST_RRRR( "smlalbt r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
TEST_RRRR( "smlaltb r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
TEST_RRRR( "smlaltb r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
TEST_RRRR( "smlaltt r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
TEST_RRRR( "smlaltt r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
TEST_RRRR( "smlald r",0, VAL1,", r",1, VAL2, ", r",0, HH1,", r",1, HH2)
TEST_RRRR( "smlald r",11,VAL2,", r",10,VAL1, ", r",9, HH2,", r",8, HH1)
TEST_RRRR( "smlaldx r",0, VAL1,", r",1, VAL2, ", r",0, HH1,", r",1, HH2)
TEST_RRRR( "smlaldx r",11,VAL2,", r",10,VAL1, ", r",9, HH2,", r",8, HH1)
TEST_RRRR( "smlsld r",0, VAL1,", r",1, VAL2, ", r",0, HH1,", r",1, HH2)
TEST_RRRR( "smlsld r",11,VAL2,", r",10,VAL1, ", r",9, HH2,", r",8, HH1)
TEST_RRRR( "smlsldx r",0, VAL1,", r",1, VAL2, ", r",0, HH1,", r",1, HH2)
TEST_RRRR( "smlsldx r",11,VAL2,", r",10,VAL1, ", r",9, HH2,", r",8, HH1)
TEST_RRRR( "umlal r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
TEST_RRRR( "umlal r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
TEST_RRRR( "umaal r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
TEST_RRRR( "umaal r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
TEST_GROUP("Coprocessor instructions")
TEST_UNSUPPORTED(__inst_thumb32(0xfc000000) "")
TEST_UNSUPPORTED(__inst_thumb32(0xffffffff) "")
TEST_GROUP("Testing instructions in IT blocks")
TEST_ITBLOCK("sub.w r0, r0")
verbose("\n");
}
| gpl-2.0 |
ytjiang/linux | drivers/media/i2c/mt9p031.c | 481 | 32671 | /*
* Driver for MT9P031 CMOS Image Sensor from Aptina
*
* Copyright (C) 2011, Laurent Pinchart <laurent.pinchart@ideasonboard.com>
* Copyright (C) 2011, Javier Martin <javier.martin@vista-silicon.com>
* Copyright (C) 2011, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
*
* Based on the MT9V032 driver and Bastian Hecht's code.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/of_graph.h>
#include <linux/pm.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
#include <media/mt9p031.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-subdev.h>
#include "aptina-pll.h"
#define MT9P031_PIXEL_ARRAY_WIDTH 2752
#define MT9P031_PIXEL_ARRAY_HEIGHT 2004
#define MT9P031_CHIP_VERSION 0x00
#define MT9P031_CHIP_VERSION_VALUE 0x1801
#define MT9P031_ROW_START 0x01
#define MT9P031_ROW_START_MIN 0
#define MT9P031_ROW_START_MAX 2004
#define MT9P031_ROW_START_DEF 54
#define MT9P031_COLUMN_START 0x02
#define MT9P031_COLUMN_START_MIN 0
#define MT9P031_COLUMN_START_MAX 2750
#define MT9P031_COLUMN_START_DEF 16
#define MT9P031_WINDOW_HEIGHT 0x03
#define MT9P031_WINDOW_HEIGHT_MIN 2
#define MT9P031_WINDOW_HEIGHT_MAX 2006
#define MT9P031_WINDOW_HEIGHT_DEF 1944
#define MT9P031_WINDOW_WIDTH 0x04
#define MT9P031_WINDOW_WIDTH_MIN 2
#define MT9P031_WINDOW_WIDTH_MAX 2752
#define MT9P031_WINDOW_WIDTH_DEF 2592
#define MT9P031_HORIZONTAL_BLANK 0x05
#define MT9P031_HORIZONTAL_BLANK_MIN 0
#define MT9P031_HORIZONTAL_BLANK_MAX 4095
#define MT9P031_VERTICAL_BLANK 0x06
#define MT9P031_VERTICAL_BLANK_MIN 1
#define MT9P031_VERTICAL_BLANK_MAX 4096
#define MT9P031_VERTICAL_BLANK_DEF 26
#define MT9P031_OUTPUT_CONTROL 0x07
#define MT9P031_OUTPUT_CONTROL_CEN 2
#define MT9P031_OUTPUT_CONTROL_SYN 1
#define MT9P031_OUTPUT_CONTROL_DEF 0x1f82
#define MT9P031_SHUTTER_WIDTH_UPPER 0x08
#define MT9P031_SHUTTER_WIDTH_LOWER 0x09
#define MT9P031_SHUTTER_WIDTH_MIN 1
#define MT9P031_SHUTTER_WIDTH_MAX 1048575
#define MT9P031_SHUTTER_WIDTH_DEF 1943
#define MT9P031_PLL_CONTROL 0x10
#define MT9P031_PLL_CONTROL_PWROFF 0x0050
#define MT9P031_PLL_CONTROL_PWRON 0x0051
#define MT9P031_PLL_CONTROL_USEPLL 0x0052
#define MT9P031_PLL_CONFIG_1 0x11
#define MT9P031_PLL_CONFIG_2 0x12
#define MT9P031_PIXEL_CLOCK_CONTROL 0x0a
#define MT9P031_PIXEL_CLOCK_INVERT (1 << 15)
#define MT9P031_PIXEL_CLOCK_SHIFT(n) ((n) << 8)
#define MT9P031_PIXEL_CLOCK_DIVIDE(n) ((n) << 0)
#define MT9P031_FRAME_RESTART 0x0b
#define MT9P031_SHUTTER_DELAY 0x0c
#define MT9P031_RST 0x0d
#define MT9P031_RST_ENABLE 1
#define MT9P031_RST_DISABLE 0
#define MT9P031_READ_MODE_1 0x1e
#define MT9P031_READ_MODE_2 0x20
#define MT9P031_READ_MODE_2_ROW_MIR (1 << 15)
#define MT9P031_READ_MODE_2_COL_MIR (1 << 14)
#define MT9P031_READ_MODE_2_ROW_BLC (1 << 6)
#define MT9P031_ROW_ADDRESS_MODE 0x22
#define MT9P031_COLUMN_ADDRESS_MODE 0x23
#define MT9P031_GLOBAL_GAIN 0x35
#define MT9P031_GLOBAL_GAIN_MIN 8
#define MT9P031_GLOBAL_GAIN_MAX 1024
#define MT9P031_GLOBAL_GAIN_DEF 8
#define MT9P031_GLOBAL_GAIN_MULT (1 << 6)
#define MT9P031_ROW_BLACK_TARGET 0x49
#define MT9P031_ROW_BLACK_DEF_OFFSET 0x4b
#define MT9P031_GREEN1_OFFSET 0x60
#define MT9P031_GREEN2_OFFSET 0x61
#define MT9P031_BLACK_LEVEL_CALIBRATION 0x62
#define MT9P031_BLC_MANUAL_BLC (1 << 0)
#define MT9P031_RED_OFFSET 0x63
#define MT9P031_BLUE_OFFSET 0x64
#define MT9P031_TEST_PATTERN 0xa0
#define MT9P031_TEST_PATTERN_SHIFT 3
#define MT9P031_TEST_PATTERN_ENABLE (1 << 0)
#define MT9P031_TEST_PATTERN_DISABLE (0 << 0)
#define MT9P031_TEST_PATTERN_GREEN 0xa1
#define MT9P031_TEST_PATTERN_RED 0xa2
#define MT9P031_TEST_PATTERN_BLUE 0xa3
enum mt9p031_model {
MT9P031_MODEL_COLOR,
MT9P031_MODEL_MONOCHROME,
};
struct mt9p031 {
struct v4l2_subdev subdev;
struct media_pad pad;
struct v4l2_rect crop; /* Sensor window */
struct v4l2_mbus_framefmt format;
struct mt9p031_platform_data *pdata;
struct mutex power_lock; /* lock to protect power_count */
int power_count;
struct clk *clk;
struct regulator_bulk_data regulators[3];
enum mt9p031_model model;
struct aptina_pll pll;
unsigned int clk_div;
bool use_pll;
int reset;
struct v4l2_ctrl_handler ctrls;
struct v4l2_ctrl *blc_auto;
struct v4l2_ctrl *blc_offset;
/* Registers cache */
u16 output_control;
u16 mode2;
};
static struct mt9p031 *to_mt9p031(struct v4l2_subdev *sd)
{
return container_of(sd, struct mt9p031, subdev);
}
static int mt9p031_read(struct i2c_client *client, u8 reg)
{
return i2c_smbus_read_word_swapped(client, reg);
}
static int mt9p031_write(struct i2c_client *client, u8 reg, u16 data)
{
return i2c_smbus_write_word_swapped(client, reg, data);
}
static int mt9p031_set_output_control(struct mt9p031 *mt9p031, u16 clear,
u16 set)
{
struct i2c_client *client = v4l2_get_subdevdata(&mt9p031->subdev);
u16 value = (mt9p031->output_control & ~clear) | set;
int ret;
ret = mt9p031_write(client, MT9P031_OUTPUT_CONTROL, value);
if (ret < 0)
return ret;
mt9p031->output_control = value;
return 0;
}
static int mt9p031_set_mode2(struct mt9p031 *mt9p031, u16 clear, u16 set)
{
struct i2c_client *client = v4l2_get_subdevdata(&mt9p031->subdev);
u16 value = (mt9p031->mode2 & ~clear) | set;
int ret;
ret = mt9p031_write(client, MT9P031_READ_MODE_2, value);
if (ret < 0)
return ret;
mt9p031->mode2 = value;
return 0;
}
static int mt9p031_reset(struct mt9p031 *mt9p031)
{
struct i2c_client *client = v4l2_get_subdevdata(&mt9p031->subdev);
int ret;
/* Disable chip output, synchronous option update */
ret = mt9p031_write(client, MT9P031_RST, MT9P031_RST_ENABLE);
if (ret < 0)
return ret;
ret = mt9p031_write(client, MT9P031_RST, MT9P031_RST_DISABLE);
if (ret < 0)
return ret;
ret = mt9p031_write(client, MT9P031_PIXEL_CLOCK_CONTROL,
MT9P031_PIXEL_CLOCK_DIVIDE(mt9p031->clk_div));
if (ret < 0)
return ret;
return mt9p031_set_output_control(mt9p031, MT9P031_OUTPUT_CONTROL_CEN,
0);
}
static int mt9p031_clk_setup(struct mt9p031 *mt9p031)
{
static const struct aptina_pll_limits limits = {
.ext_clock_min = 6000000,
.ext_clock_max = 27000000,
.int_clock_min = 2000000,
.int_clock_max = 13500000,
.out_clock_min = 180000000,
.out_clock_max = 360000000,
.pix_clock_max = 96000000,
.n_min = 1,
.n_max = 64,
.m_min = 16,
.m_max = 255,
.p1_min = 1,
.p1_max = 128,
};
struct i2c_client *client = v4l2_get_subdevdata(&mt9p031->subdev);
struct mt9p031_platform_data *pdata = mt9p031->pdata;
int ret;
mt9p031->clk = devm_clk_get(&client->dev, NULL);
if (IS_ERR(mt9p031->clk))
return PTR_ERR(mt9p031->clk);
ret = clk_set_rate(mt9p031->clk, pdata->ext_freq);
if (ret < 0)
return ret;
/* If the external clock frequency is out of bounds for the PLL use the
* pixel clock divider only and disable the PLL.
*/
if (pdata->ext_freq > limits.ext_clock_max) {
unsigned int div;
div = DIV_ROUND_UP(pdata->ext_freq, pdata->target_freq);
div = roundup_pow_of_two(div) / 2;
mt9p031->clk_div = max_t(unsigned int, div, 64);
mt9p031->use_pll = false;
return 0;
}
mt9p031->pll.ext_clock = pdata->ext_freq;
mt9p031->pll.pix_clock = pdata->target_freq;
mt9p031->use_pll = true;
return aptina_pll_calculate(&client->dev, &limits, &mt9p031->pll);
}
static int mt9p031_pll_enable(struct mt9p031 *mt9p031)
{
struct i2c_client *client = v4l2_get_subdevdata(&mt9p031->subdev);
int ret;
if (!mt9p031->use_pll)
return 0;
ret = mt9p031_write(client, MT9P031_PLL_CONTROL,
MT9P031_PLL_CONTROL_PWRON);
if (ret < 0)
return ret;
ret = mt9p031_write(client, MT9P031_PLL_CONFIG_1,
(mt9p031->pll.m << 8) | (mt9p031->pll.n - 1));
if (ret < 0)
return ret;
ret = mt9p031_write(client, MT9P031_PLL_CONFIG_2, mt9p031->pll.p1 - 1);
if (ret < 0)
return ret;
usleep_range(1000, 2000);
ret = mt9p031_write(client, MT9P031_PLL_CONTROL,
MT9P031_PLL_CONTROL_PWRON |
MT9P031_PLL_CONTROL_USEPLL);
return ret;
}
static inline int mt9p031_pll_disable(struct mt9p031 *mt9p031)
{
struct i2c_client *client = v4l2_get_subdevdata(&mt9p031->subdev);
if (!mt9p031->use_pll)
return 0;
return mt9p031_write(client, MT9P031_PLL_CONTROL,
MT9P031_PLL_CONTROL_PWROFF);
}
static int mt9p031_power_on(struct mt9p031 *mt9p031)
{
int ret;
/* Ensure RESET_BAR is low */
if (gpio_is_valid(mt9p031->reset)) {
gpio_set_value(mt9p031->reset, 0);
usleep_range(1000, 2000);
}
/* Bring up the supplies */
ret = regulator_bulk_enable(ARRAY_SIZE(mt9p031->regulators),
mt9p031->regulators);
if (ret < 0)
return ret;
/* Enable clock */
if (mt9p031->clk) {
ret = clk_prepare_enable(mt9p031->clk);
if (ret) {
regulator_bulk_disable(ARRAY_SIZE(mt9p031->regulators),
mt9p031->regulators);
return ret;
}
}
/* Now RESET_BAR must be high */
if (gpio_is_valid(mt9p031->reset)) {
gpio_set_value(mt9p031->reset, 1);
usleep_range(1000, 2000);
}
return 0;
}
static void mt9p031_power_off(struct mt9p031 *mt9p031)
{
if (gpio_is_valid(mt9p031->reset)) {
gpio_set_value(mt9p031->reset, 0);
usleep_range(1000, 2000);
}
regulator_bulk_disable(ARRAY_SIZE(mt9p031->regulators),
mt9p031->regulators);
if (mt9p031->clk)
clk_disable_unprepare(mt9p031->clk);
}
static int __mt9p031_set_power(struct mt9p031 *mt9p031, bool on)
{
struct i2c_client *client = v4l2_get_subdevdata(&mt9p031->subdev);
int ret;
if (!on) {
mt9p031_power_off(mt9p031);
return 0;
}
ret = mt9p031_power_on(mt9p031);
if (ret < 0)
return ret;
ret = mt9p031_reset(mt9p031);
if (ret < 0) {
dev_err(&client->dev, "Failed to reset the camera\n");
return ret;
}
return v4l2_ctrl_handler_setup(&mt9p031->ctrls);
}
/* -----------------------------------------------------------------------------
* V4L2 subdev video operations
*/
static int mt9p031_set_params(struct mt9p031 *mt9p031)
{
struct i2c_client *client = v4l2_get_subdevdata(&mt9p031->subdev);
struct v4l2_mbus_framefmt *format = &mt9p031->format;
const struct v4l2_rect *crop = &mt9p031->crop;
unsigned int hblank;
unsigned int vblank;
unsigned int xskip;
unsigned int yskip;
unsigned int xbin;
unsigned int ybin;
int ret;
/* Windows position and size.
*
* TODO: Make sure the start coordinates and window size match the
* skipping, binning and mirroring (see description of registers 2 and 4
* in table 13, and Binning section on page 41).
*/
ret = mt9p031_write(client, MT9P031_COLUMN_START, crop->left);
if (ret < 0)
return ret;
ret = mt9p031_write(client, MT9P031_ROW_START, crop->top);
if (ret < 0)
return ret;
ret = mt9p031_write(client, MT9P031_WINDOW_WIDTH, crop->width - 1);
if (ret < 0)
return ret;
ret = mt9p031_write(client, MT9P031_WINDOW_HEIGHT, crop->height - 1);
if (ret < 0)
return ret;
/* Row and column binning and skipping. Use the maximum binning value
* compatible with the skipping settings.
*/
xskip = DIV_ROUND_CLOSEST(crop->width, format->width);
yskip = DIV_ROUND_CLOSEST(crop->height, format->height);
xbin = 1 << (ffs(xskip) - 1);
ybin = 1 << (ffs(yskip) - 1);
ret = mt9p031_write(client, MT9P031_COLUMN_ADDRESS_MODE,
((xbin - 1) << 4) | (xskip - 1));
if (ret < 0)
return ret;
ret = mt9p031_write(client, MT9P031_ROW_ADDRESS_MODE,
((ybin - 1) << 4) | (yskip - 1));
if (ret < 0)
return ret;
/* Blanking - use minimum value for horizontal blanking and default
* value for vertical blanking.
*/
hblank = 346 * ybin + 64 + (80 >> min_t(unsigned int, xbin, 3));
vblank = MT9P031_VERTICAL_BLANK_DEF;
ret = mt9p031_write(client, MT9P031_HORIZONTAL_BLANK, hblank - 1);
if (ret < 0)
return ret;
ret = mt9p031_write(client, MT9P031_VERTICAL_BLANK, vblank - 1);
if (ret < 0)
return ret;
return ret;
}
static int mt9p031_s_stream(struct v4l2_subdev *subdev, int enable)
{
struct mt9p031 *mt9p031 = to_mt9p031(subdev);
int ret;
if (!enable) {
/* Stop sensor readout */
ret = mt9p031_set_output_control(mt9p031,
MT9P031_OUTPUT_CONTROL_CEN, 0);
if (ret < 0)
return ret;
return mt9p031_pll_disable(mt9p031);
}
ret = mt9p031_set_params(mt9p031);
if (ret < 0)
return ret;
/* Switch to master "normal" mode */
ret = mt9p031_set_output_control(mt9p031, 0,
MT9P031_OUTPUT_CONTROL_CEN);
if (ret < 0)
return ret;
return mt9p031_pll_enable(mt9p031);
}
static int mt9p031_enum_mbus_code(struct v4l2_subdev *subdev,
struct v4l2_subdev_fh *fh,
struct v4l2_subdev_mbus_code_enum *code)
{
struct mt9p031 *mt9p031 = to_mt9p031(subdev);
if (code->pad || code->index)
return -EINVAL;
code->code = mt9p031->format.code;
return 0;
}
static int mt9p031_enum_frame_size(struct v4l2_subdev *subdev,
struct v4l2_subdev_fh *fh,
struct v4l2_subdev_frame_size_enum *fse)
{
struct mt9p031 *mt9p031 = to_mt9p031(subdev);
if (fse->index >= 8 || fse->code != mt9p031->format.code)
return -EINVAL;
fse->min_width = MT9P031_WINDOW_WIDTH_DEF
/ min_t(unsigned int, 7, fse->index + 1);
fse->max_width = fse->min_width;
fse->min_height = MT9P031_WINDOW_HEIGHT_DEF / (fse->index + 1);
fse->max_height = fse->min_height;
return 0;
}
static struct v4l2_mbus_framefmt *
__mt9p031_get_pad_format(struct mt9p031 *mt9p031, struct v4l2_subdev_fh *fh,
unsigned int pad, u32 which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
return v4l2_subdev_get_try_format(fh, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &mt9p031->format;
default:
return NULL;
}
}
static struct v4l2_rect *
__mt9p031_get_pad_crop(struct mt9p031 *mt9p031, struct v4l2_subdev_fh *fh,
unsigned int pad, u32 which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
return v4l2_subdev_get_try_crop(fh, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &mt9p031->crop;
default:
return NULL;
}
}
static int mt9p031_get_format(struct v4l2_subdev *subdev,
struct v4l2_subdev_fh *fh,
struct v4l2_subdev_format *fmt)
{
struct mt9p031 *mt9p031 = to_mt9p031(subdev);
fmt->format = *__mt9p031_get_pad_format(mt9p031, fh, fmt->pad,
fmt->which);
return 0;
}
static int mt9p031_set_format(struct v4l2_subdev *subdev,
struct v4l2_subdev_fh *fh,
struct v4l2_subdev_format *format)
{
struct mt9p031 *mt9p031 = to_mt9p031(subdev);
struct v4l2_mbus_framefmt *__format;
struct v4l2_rect *__crop;
unsigned int width;
unsigned int height;
unsigned int hratio;
unsigned int vratio;
__crop = __mt9p031_get_pad_crop(mt9p031, fh, format->pad,
format->which);
/* Clamp the width and height to avoid dividing by zero. */
width = clamp_t(unsigned int, ALIGN(format->format.width, 2),
max_t(unsigned int, __crop->width / 7,
MT9P031_WINDOW_WIDTH_MIN),
__crop->width);
height = clamp_t(unsigned int, ALIGN(format->format.height, 2),
max_t(unsigned int, __crop->height / 8,
MT9P031_WINDOW_HEIGHT_MIN),
__crop->height);
hratio = DIV_ROUND_CLOSEST(__crop->width, width);
vratio = DIV_ROUND_CLOSEST(__crop->height, height);
__format = __mt9p031_get_pad_format(mt9p031, fh, format->pad,
format->which);
__format->width = __crop->width / hratio;
__format->height = __crop->height / vratio;
format->format = *__format;
return 0;
}
static int mt9p031_get_crop(struct v4l2_subdev *subdev,
struct v4l2_subdev_fh *fh,
struct v4l2_subdev_crop *crop)
{
struct mt9p031 *mt9p031 = to_mt9p031(subdev);
crop->rect = *__mt9p031_get_pad_crop(mt9p031, fh, crop->pad,
crop->which);
return 0;
}
static int mt9p031_set_crop(struct v4l2_subdev *subdev,
struct v4l2_subdev_fh *fh,
struct v4l2_subdev_crop *crop)
{
struct mt9p031 *mt9p031 = to_mt9p031(subdev);
struct v4l2_mbus_framefmt *__format;
struct v4l2_rect *__crop;
struct v4l2_rect rect;
/* Clamp the crop rectangle boundaries and align them to a multiple of 2
* pixels to ensure a GRBG Bayer pattern.
*/
rect.left = clamp(ALIGN(crop->rect.left, 2), MT9P031_COLUMN_START_MIN,
MT9P031_COLUMN_START_MAX);
rect.top = clamp(ALIGN(crop->rect.top, 2), MT9P031_ROW_START_MIN,
MT9P031_ROW_START_MAX);
rect.width = clamp_t(unsigned int, ALIGN(crop->rect.width, 2),
MT9P031_WINDOW_WIDTH_MIN,
MT9P031_WINDOW_WIDTH_MAX);
rect.height = clamp_t(unsigned int, ALIGN(crop->rect.height, 2),
MT9P031_WINDOW_HEIGHT_MIN,
MT9P031_WINDOW_HEIGHT_MAX);
rect.width = min_t(unsigned int, rect.width,
MT9P031_PIXEL_ARRAY_WIDTH - rect.left);
rect.height = min_t(unsigned int, rect.height,
MT9P031_PIXEL_ARRAY_HEIGHT - rect.top);
__crop = __mt9p031_get_pad_crop(mt9p031, fh, crop->pad, crop->which);
if (rect.width != __crop->width || rect.height != __crop->height) {
/* Reset the output image size if the crop rectangle size has
* been modified.
*/
__format = __mt9p031_get_pad_format(mt9p031, fh, crop->pad,
crop->which);
__format->width = rect.width;
__format->height = rect.height;
}
*__crop = rect;
crop->rect = rect;
return 0;
}
/* -----------------------------------------------------------------------------
* V4L2 subdev control operations
*/
#define V4L2_CID_BLC_AUTO (V4L2_CID_USER_BASE | 0x1002)
#define V4L2_CID_BLC_TARGET_LEVEL (V4L2_CID_USER_BASE | 0x1003)
#define V4L2_CID_BLC_ANALOG_OFFSET (V4L2_CID_USER_BASE | 0x1004)
#define V4L2_CID_BLC_DIGITAL_OFFSET (V4L2_CID_USER_BASE | 0x1005)
static int mt9p031_restore_blc(struct mt9p031 *mt9p031)
{
struct i2c_client *client = v4l2_get_subdevdata(&mt9p031->subdev);
int ret;
if (mt9p031->blc_auto->cur.val != 0) {
ret = mt9p031_set_mode2(mt9p031, 0,
MT9P031_READ_MODE_2_ROW_BLC);
if (ret < 0)
return ret;
}
if (mt9p031->blc_offset->cur.val != 0) {
ret = mt9p031_write(client, MT9P031_ROW_BLACK_TARGET,
mt9p031->blc_offset->cur.val);
if (ret < 0)
return ret;
}
return 0;
}
static int mt9p031_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct mt9p031 *mt9p031 =
container_of(ctrl->handler, struct mt9p031, ctrls);
struct i2c_client *client = v4l2_get_subdevdata(&mt9p031->subdev);
u16 data;
int ret;
if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE)
return 0;
switch (ctrl->id) {
case V4L2_CID_EXPOSURE:
ret = mt9p031_write(client, MT9P031_SHUTTER_WIDTH_UPPER,
(ctrl->val >> 16) & 0xffff);
if (ret < 0)
return ret;
return mt9p031_write(client, MT9P031_SHUTTER_WIDTH_LOWER,
ctrl->val & 0xffff);
case V4L2_CID_GAIN:
/* Gain is controlled by 2 analog stages and a digital stage.
* Valid values for the 3 stages are
*
* Stage Min Max Step
* ------------------------------------------
* First analog stage x1 x2 1
* Second analog stage x1 x4 0.125
* Digital stage x1 x16 0.125
*
* To minimize noise, the gain stages should be used in the
* second analog stage, first analog stage, digital stage order.
* Gain from a previous stage should be pushed to its maximum
* value before the next stage is used.
*/
if (ctrl->val <= 32) {
data = ctrl->val;
} else if (ctrl->val <= 64) {
ctrl->val &= ~1;
data = (1 << 6) | (ctrl->val >> 1);
} else {
ctrl->val &= ~7;
data = ((ctrl->val - 64) << 5) | (1 << 6) | 32;
}
return mt9p031_write(client, MT9P031_GLOBAL_GAIN, data);
case V4L2_CID_HFLIP:
if (ctrl->val)
return mt9p031_set_mode2(mt9p031,
0, MT9P031_READ_MODE_2_COL_MIR);
else
return mt9p031_set_mode2(mt9p031,
MT9P031_READ_MODE_2_COL_MIR, 0);
case V4L2_CID_VFLIP:
if (ctrl->val)
return mt9p031_set_mode2(mt9p031,
0, MT9P031_READ_MODE_2_ROW_MIR);
else
return mt9p031_set_mode2(mt9p031,
MT9P031_READ_MODE_2_ROW_MIR, 0);
case V4L2_CID_TEST_PATTERN:
/* The digital side of the Black Level Calibration function must
* be disabled when generating a test pattern to avoid artifacts
* in the image. Activate (deactivate) the BLC-related controls
* when the test pattern is enabled (disabled).
*/
v4l2_ctrl_activate(mt9p031->blc_auto, ctrl->val == 0);
v4l2_ctrl_activate(mt9p031->blc_offset, ctrl->val == 0);
if (!ctrl->val) {
/* Restore the BLC settings. */
ret = mt9p031_restore_blc(mt9p031);
if (ret < 0)
return ret;
return mt9p031_write(client, MT9P031_TEST_PATTERN,
MT9P031_TEST_PATTERN_DISABLE);
}
ret = mt9p031_write(client, MT9P031_TEST_PATTERN_GREEN, 0x05a0);
if (ret < 0)
return ret;
ret = mt9p031_write(client, MT9P031_TEST_PATTERN_RED, 0x0a50);
if (ret < 0)
return ret;
ret = mt9p031_write(client, MT9P031_TEST_PATTERN_BLUE, 0x0aa0);
if (ret < 0)
return ret;
/* Disable digital BLC when generating a test pattern. */
ret = mt9p031_set_mode2(mt9p031, MT9P031_READ_MODE_2_ROW_BLC,
0);
if (ret < 0)
return ret;
ret = mt9p031_write(client, MT9P031_ROW_BLACK_DEF_OFFSET, 0);
if (ret < 0)
return ret;
return mt9p031_write(client, MT9P031_TEST_PATTERN,
((ctrl->val - 1) << MT9P031_TEST_PATTERN_SHIFT)
| MT9P031_TEST_PATTERN_ENABLE);
case V4L2_CID_BLC_AUTO:
ret = mt9p031_set_mode2(mt9p031,
ctrl->val ? 0 : MT9P031_READ_MODE_2_ROW_BLC,
ctrl->val ? MT9P031_READ_MODE_2_ROW_BLC : 0);
if (ret < 0)
return ret;
return mt9p031_write(client, MT9P031_BLACK_LEVEL_CALIBRATION,
ctrl->val ? 0 : MT9P031_BLC_MANUAL_BLC);
case V4L2_CID_BLC_TARGET_LEVEL:
return mt9p031_write(client, MT9P031_ROW_BLACK_TARGET,
ctrl->val);
case V4L2_CID_BLC_ANALOG_OFFSET:
data = ctrl->val & ((1 << 9) - 1);
ret = mt9p031_write(client, MT9P031_GREEN1_OFFSET, data);
if (ret < 0)
return ret;
ret = mt9p031_write(client, MT9P031_GREEN2_OFFSET, data);
if (ret < 0)
return ret;
ret = mt9p031_write(client, MT9P031_RED_OFFSET, data);
if (ret < 0)
return ret;
return mt9p031_write(client, MT9P031_BLUE_OFFSET, data);
case V4L2_CID_BLC_DIGITAL_OFFSET:
return mt9p031_write(client, MT9P031_ROW_BLACK_DEF_OFFSET,
ctrl->val & ((1 << 12) - 1));
}
return 0;
}
static struct v4l2_ctrl_ops mt9p031_ctrl_ops = {
.s_ctrl = mt9p031_s_ctrl,
};
static const char * const mt9p031_test_pattern_menu[] = {
"Disabled",
"Color Field",
"Horizontal Gradient",
"Vertical Gradient",
"Diagonal Gradient",
"Classic Test Pattern",
"Walking 1s",
"Monochrome Horizontal Bars",
"Monochrome Vertical Bars",
"Vertical Color Bars",
};
static const struct v4l2_ctrl_config mt9p031_ctrls[] = {
{
.ops = &mt9p031_ctrl_ops,
.id = V4L2_CID_BLC_AUTO,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "BLC, Auto",
.min = 0,
.max = 1,
.step = 1,
.def = 1,
.flags = 0,
}, {
.ops = &mt9p031_ctrl_ops,
.id = V4L2_CID_BLC_TARGET_LEVEL,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "BLC Target Level",
.min = 0,
.max = 4095,
.step = 1,
.def = 168,
.flags = 0,
}, {
.ops = &mt9p031_ctrl_ops,
.id = V4L2_CID_BLC_ANALOG_OFFSET,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "BLC Analog Offset",
.min = -255,
.max = 255,
.step = 1,
.def = 32,
.flags = 0,
}, {
.ops = &mt9p031_ctrl_ops,
.id = V4L2_CID_BLC_DIGITAL_OFFSET,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "BLC Digital Offset",
.min = -2048,
.max = 2047,
.step = 1,
.def = 40,
.flags = 0,
}
};
/* -----------------------------------------------------------------------------
* V4L2 subdev core operations
*/
static int mt9p031_set_power(struct v4l2_subdev *subdev, int on)
{
struct mt9p031 *mt9p031 = to_mt9p031(subdev);
int ret = 0;
mutex_lock(&mt9p031->power_lock);
/* If the power count is modified from 0 to != 0 or from != 0 to 0,
* update the power state.
*/
if (mt9p031->power_count == !on) {
ret = __mt9p031_set_power(mt9p031, !!on);
if (ret < 0)
goto out;
}
/* Update the power count. */
mt9p031->power_count += on ? 1 : -1;
WARN_ON(mt9p031->power_count < 0);
out:
mutex_unlock(&mt9p031->power_lock);
return ret;
}
/* -----------------------------------------------------------------------------
* V4L2 subdev internal operations
*/
static int mt9p031_registered(struct v4l2_subdev *subdev)
{
struct i2c_client *client = v4l2_get_subdevdata(subdev);
struct mt9p031 *mt9p031 = to_mt9p031(subdev);
s32 data;
int ret;
ret = mt9p031_power_on(mt9p031);
if (ret < 0) {
dev_err(&client->dev, "MT9P031 power up failed\n");
return ret;
}
/* Read out the chip version register */
data = mt9p031_read(client, MT9P031_CHIP_VERSION);
mt9p031_power_off(mt9p031);
if (data != MT9P031_CHIP_VERSION_VALUE) {
dev_err(&client->dev, "MT9P031 not detected, wrong version "
"0x%04x\n", data);
return -ENODEV;
}
dev_info(&client->dev, "MT9P031 detected at address 0x%02x\n",
client->addr);
return 0;
}
static int mt9p031_open(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
{
struct mt9p031 *mt9p031 = to_mt9p031(subdev);
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
crop = v4l2_subdev_get_try_crop(fh, 0);
crop->left = MT9P031_COLUMN_START_DEF;
crop->top = MT9P031_ROW_START_DEF;
crop->width = MT9P031_WINDOW_WIDTH_DEF;
crop->height = MT9P031_WINDOW_HEIGHT_DEF;
format = v4l2_subdev_get_try_format(fh, 0);
if (mt9p031->model == MT9P031_MODEL_MONOCHROME)
format->code = V4L2_MBUS_FMT_Y12_1X12;
else
format->code = V4L2_MBUS_FMT_SGRBG12_1X12;
format->width = MT9P031_WINDOW_WIDTH_DEF;
format->height = MT9P031_WINDOW_HEIGHT_DEF;
format->field = V4L2_FIELD_NONE;
format->colorspace = V4L2_COLORSPACE_SRGB;
return mt9p031_set_power(subdev, 1);
}
static int mt9p031_close(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh)
{
return mt9p031_set_power(subdev, 0);
}
static struct v4l2_subdev_core_ops mt9p031_subdev_core_ops = {
.s_power = mt9p031_set_power,
};
static struct v4l2_subdev_video_ops mt9p031_subdev_video_ops = {
.s_stream = mt9p031_s_stream,
};
static struct v4l2_subdev_pad_ops mt9p031_subdev_pad_ops = {
.enum_mbus_code = mt9p031_enum_mbus_code,
.enum_frame_size = mt9p031_enum_frame_size,
.get_fmt = mt9p031_get_format,
.set_fmt = mt9p031_set_format,
.get_crop = mt9p031_get_crop,
.set_crop = mt9p031_set_crop,
};
static struct v4l2_subdev_ops mt9p031_subdev_ops = {
.core = &mt9p031_subdev_core_ops,
.video = &mt9p031_subdev_video_ops,
.pad = &mt9p031_subdev_pad_ops,
};
static const struct v4l2_subdev_internal_ops mt9p031_subdev_internal_ops = {
.registered = mt9p031_registered,
.open = mt9p031_open,
.close = mt9p031_close,
};
/* -----------------------------------------------------------------------------
* Driver initialization and probing
*/
static struct mt9p031_platform_data *
mt9p031_get_pdata(struct i2c_client *client)
{
struct mt9p031_platform_data *pdata;
struct device_node *np;
if (!IS_ENABLED(CONFIG_OF) || !client->dev.of_node)
return client->dev.platform_data;
np = of_graph_get_next_endpoint(client->dev.of_node, NULL);
if (!np)
return NULL;
pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
goto done;
pdata->reset = of_get_named_gpio(client->dev.of_node, "reset-gpios", 0);
of_property_read_u32(np, "input-clock-frequency", &pdata->ext_freq);
of_property_read_u32(np, "pixel-clock-frequency", &pdata->target_freq);
done:
of_node_put(np);
return pdata;
}
static int mt9p031_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct mt9p031_platform_data *pdata = mt9p031_get_pdata(client);
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
struct mt9p031 *mt9p031;
unsigned int i;
int ret;
if (pdata == NULL) {
dev_err(&client->dev, "No platform data\n");
return -EINVAL;
}
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA)) {
dev_warn(&client->dev,
"I2C-Adapter doesn't support I2C_FUNC_SMBUS_WORD\n");
return -EIO;
}
mt9p031 = devm_kzalloc(&client->dev, sizeof(*mt9p031), GFP_KERNEL);
if (mt9p031 == NULL)
return -ENOMEM;
mt9p031->pdata = pdata;
mt9p031->output_control = MT9P031_OUTPUT_CONTROL_DEF;
mt9p031->mode2 = MT9P031_READ_MODE_2_ROW_BLC;
mt9p031->model = did->driver_data;
mt9p031->reset = -1;
mt9p031->regulators[0].supply = "vdd";
mt9p031->regulators[1].supply = "vdd_io";
mt9p031->regulators[2].supply = "vaa";
ret = devm_regulator_bulk_get(&client->dev, 3, mt9p031->regulators);
if (ret < 0) {
dev_err(&client->dev, "Unable to get regulators\n");
return ret;
}
v4l2_ctrl_handler_init(&mt9p031->ctrls, ARRAY_SIZE(mt9p031_ctrls) + 6);
v4l2_ctrl_new_std(&mt9p031->ctrls, &mt9p031_ctrl_ops,
V4L2_CID_EXPOSURE, MT9P031_SHUTTER_WIDTH_MIN,
MT9P031_SHUTTER_WIDTH_MAX, 1,
MT9P031_SHUTTER_WIDTH_DEF);
v4l2_ctrl_new_std(&mt9p031->ctrls, &mt9p031_ctrl_ops,
V4L2_CID_GAIN, MT9P031_GLOBAL_GAIN_MIN,
MT9P031_GLOBAL_GAIN_MAX, 1, MT9P031_GLOBAL_GAIN_DEF);
v4l2_ctrl_new_std(&mt9p031->ctrls, &mt9p031_ctrl_ops,
V4L2_CID_HFLIP, 0, 1, 1, 0);
v4l2_ctrl_new_std(&mt9p031->ctrls, &mt9p031_ctrl_ops,
V4L2_CID_VFLIP, 0, 1, 1, 0);
v4l2_ctrl_new_std(&mt9p031->ctrls, &mt9p031_ctrl_ops,
V4L2_CID_PIXEL_RATE, pdata->target_freq,
pdata->target_freq, 1, pdata->target_freq);
v4l2_ctrl_new_std_menu_items(&mt9p031->ctrls, &mt9p031_ctrl_ops,
V4L2_CID_TEST_PATTERN,
ARRAY_SIZE(mt9p031_test_pattern_menu) - 1, 0,
0, mt9p031_test_pattern_menu);
for (i = 0; i < ARRAY_SIZE(mt9p031_ctrls); ++i)
v4l2_ctrl_new_custom(&mt9p031->ctrls, &mt9p031_ctrls[i], NULL);
mt9p031->subdev.ctrl_handler = &mt9p031->ctrls;
if (mt9p031->ctrls.error) {
printk(KERN_INFO "%s: control initialization error %d\n",
__func__, mt9p031->ctrls.error);
ret = mt9p031->ctrls.error;
goto done;
}
mt9p031->blc_auto = v4l2_ctrl_find(&mt9p031->ctrls, V4L2_CID_BLC_AUTO);
mt9p031->blc_offset = v4l2_ctrl_find(&mt9p031->ctrls,
V4L2_CID_BLC_DIGITAL_OFFSET);
mutex_init(&mt9p031->power_lock);
v4l2_i2c_subdev_init(&mt9p031->subdev, client, &mt9p031_subdev_ops);
mt9p031->subdev.internal_ops = &mt9p031_subdev_internal_ops;
mt9p031->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_init(&mt9p031->subdev.entity, 1, &mt9p031->pad, 0);
if (ret < 0)
goto done;
mt9p031->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
mt9p031->crop.width = MT9P031_WINDOW_WIDTH_DEF;
mt9p031->crop.height = MT9P031_WINDOW_HEIGHT_DEF;
mt9p031->crop.left = MT9P031_COLUMN_START_DEF;
mt9p031->crop.top = MT9P031_ROW_START_DEF;
if (mt9p031->model == MT9P031_MODEL_MONOCHROME)
mt9p031->format.code = V4L2_MBUS_FMT_Y12_1X12;
else
mt9p031->format.code = V4L2_MBUS_FMT_SGRBG12_1X12;
mt9p031->format.width = MT9P031_WINDOW_WIDTH_DEF;
mt9p031->format.height = MT9P031_WINDOW_HEIGHT_DEF;
mt9p031->format.field = V4L2_FIELD_NONE;
mt9p031->format.colorspace = V4L2_COLORSPACE_SRGB;
if (gpio_is_valid(pdata->reset)) {
ret = devm_gpio_request_one(&client->dev, pdata->reset,
GPIOF_OUT_INIT_LOW, "mt9p031_rst");
if (ret < 0)
goto done;
mt9p031->reset = pdata->reset;
}
ret = mt9p031_clk_setup(mt9p031);
done:
if (ret < 0) {
v4l2_ctrl_handler_free(&mt9p031->ctrls);
media_entity_cleanup(&mt9p031->subdev.entity);
}
return ret;
}
static int mt9p031_remove(struct i2c_client *client)
{
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct mt9p031 *mt9p031 = to_mt9p031(subdev);
v4l2_ctrl_handler_free(&mt9p031->ctrls);
v4l2_device_unregister_subdev(subdev);
media_entity_cleanup(&subdev->entity);
return 0;
}
static const struct i2c_device_id mt9p031_id[] = {
{ "mt9p031", MT9P031_MODEL_COLOR },
{ "mt9p031m", MT9P031_MODEL_MONOCHROME },
{ }
};
MODULE_DEVICE_TABLE(i2c, mt9p031_id);
#if IS_ENABLED(CONFIG_OF)
static const struct of_device_id mt9p031_of_match[] = {
{ .compatible = "aptina,mt9p031", },
{ .compatible = "aptina,mt9p031m", },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, mt9p031_of_match);
#endif
static struct i2c_driver mt9p031_i2c_driver = {
.driver = {
.of_match_table = of_match_ptr(mt9p031_of_match),
.name = "mt9p031",
},
.probe = mt9p031_probe,
.remove = mt9p031_remove,
.id_table = mt9p031_id,
};
module_i2c_driver(mt9p031_i2c_driver);
MODULE_DESCRIPTION("Aptina MT9P031 Camera driver");
MODULE_AUTHOR("Bastian Hecht <hechtb@gmail.com>");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
ZeroInfinityXDA/OQC-m9 | drivers/platform/msm/msm_bus/msm_bus_fabric.c | 481 | 27584 | /* Copyright (c) 2010-2014, Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/radix-tree.h>
#include "msm_bus_core.h"
enum {
DISABLE,
ENABLE,
};
/**
* msm_bus_fabric_add_node() - Add a node to the fabric structure
* @fabric: Fabric device to which the node should be added
* @info: The node to be added
*/
static int msm_bus_fabric_add_node(struct msm_bus_fabric *fabric,
struct msm_bus_inode_info *info)
{
int status = -ENOMEM, ctx;
MSM_BUS_DBG("msm_bus_fabric_add_node: ID %d Gw: %d\n",
info->node_info->priv_id, info->node_info->gateway);
status = radix_tree_preload(GFP_ATOMIC);
if (status)
goto out;
status = radix_tree_insert(&fabric->fab_tree, info->node_info->priv_id,
info);
radix_tree_preload_end();
if (IS_SLAVE(info->node_info->priv_id))
radix_tree_tag_set(&fabric->fab_tree, info->node_info->priv_id,
SLAVE_NODE);
for (ctx = 0; ctx < NUM_CTX; ctx++) {
if (info->node_info->slaveclk[ctx]) {
radix_tree_tag_set(&fabric->fab_tree,
info->node_info->priv_id, CLK_NODE);
break;
}
info->nodeclk[ctx].enable = false;
info->nodeclk[ctx].dirty = false;
}
if (info->node_info->nr_lim) {
int iid = msm_bus_board_get_iid(info->node_info->id);
struct msm_bus_fabric_device *fabdev =
msm_bus_get_fabric_device(GET_FABID(iid));
if (!fabdev)
BUG_ON(1);
radix_tree_tag_set(&fabric->fab_tree,
info->node_info->priv_id, MASTER_NODE);
fabdev->num_nr_lim++;
MSM_BUS_ERR("%s: Adding %d There are %d nodes", __func__,
info->node_info->id, fabdev->num_nr_lim);
}
out:
return status;
}
/**
* msm_bus_add_fab() - Add a fabric (gateway) to the current fabric
* @fabric: Fabric device to which the gateway info should be added
* @info: Gateway node to be added to the fabric
*/
static int msm_bus_fabric_add_fab(struct msm_bus_fabric *fabric,
struct msm_bus_inode_info *info)
{
struct msm_bus_fabnodeinfo *fabnodeinfo;
MSM_BUS_DBG("msm_bus_fabric_add_fab: ID %d Gw: %d\n",
info->node_info->priv_id, info->node_info->gateway);
fabnodeinfo = kzalloc(sizeof(struct msm_bus_fabnodeinfo), GFP_KERNEL);
if (fabnodeinfo == NULL) {
MSM_FAB_ERR("msm_bus_fabric_add_fab: "
"No Node Info\n");
MSM_FAB_ERR("axi: Cannot register fabric!\n");
return -ENOMEM;
}
fabnodeinfo->info = info;
fabnodeinfo->info->num_pnodes = -1;
list_add_tail(&fabnodeinfo->list, &fabric->gateways);
return 0;
}
/**
* register_fabric_info() - Create the internal fabric structure and
* build the topology tree from platform specific data
* @pdev: Platform device for getting base addresses
* @fabric: Fabric to which the gateways, nodes should be added
*
* This function is called from probe. Iterates over the platform data,
* and builds the topology
*/
static int register_fabric_info(struct platform_device *pdev,
struct msm_bus_fabric *fabric)
{
int i = 0, ret = 0, err = 0;
MSM_BUS_DBG("id:%d pdata-id: %d len: %d\n", fabric->fabdev.id,
fabric->pdata->id, fabric->pdata->len);
fabric->hw_data = fabric->fabdev.hw_algo.allocate_hw_data(pdev,
fabric->pdata);
if (ZERO_OR_NULL_PTR(fabric->hw_data) && fabric->pdata->ahb == 0) {
MSM_BUS_ERR("Couldn't allocate hw_data for fab: %d\n",
fabric->fabdev.id);
goto error;
}
for (i = 0; i < fabric->pdata->len; i++) {
struct msm_bus_inode_info *info;
int ctx, j;
info = kzalloc(sizeof(struct msm_bus_inode_info), GFP_KERNEL);
if (info == NULL) {
MSM_BUS_ERR("Error allocating info\n");
return -ENOMEM;
}
info->node_info = fabric->pdata->info + i;
info->commit_index = -1;
info->num_pnodes = -1;
for (ctx = 0; ctx < NUM_CTX; ctx++) {
if (info->node_info->slaveclk[ctx]) {
info->nodeclk[ctx].clk = clk_get_sys("msm_bus",
info->node_info->slaveclk[ctx]);
if (IS_ERR(info->nodeclk[ctx].clk)) {
MSM_BUS_ERR("Couldn't get clk %s\n",
info->node_info->slaveclk[ctx]);
err = -EINVAL;
}
info->nodeclk[ctx].enable = false;
info->nodeclk[ctx].dirty = false;
}
if (info->node_info->memclk[ctx]) {
info->memclk[ctx].clk = clk_get_sys("msm_bus",
info->node_info->memclk[ctx]);
if (IS_ERR(info->memclk[ctx].clk)) {
MSM_BUS_ERR("Couldn't get clk %s\n",
info->node_info->memclk[ctx]);
err = -EINVAL;
}
info->memclk[ctx].enable = false;
info->memclk[ctx].dirty = false;
}
}
if (info->node_info->iface_clk_node) {
info->iface_clk.clk = clk_get_sys(info->node_info->
iface_clk_node, "iface_clk");
if (IS_ERR(info->iface_clk.clk)) {
MSM_BUS_ERR("ERR: Couldn't get clk %s\n",
info->node_info->iface_clk_node);
}
}
ret = info->node_info->gateway ?
msm_bus_fabric_add_fab(fabric, info) :
msm_bus_fabric_add_node(fabric, info);
if (ret) {
MSM_BUS_ERR("Unable to add node info, ret: %d\n", ret);
kfree(info);
goto error;
}
if (fabric->fabdev.hw_algo.node_init == NULL)
continue;
if (info->iface_clk.clk) {
MSM_BUS_DBG("Enabled iface clock for node init: %d\n",
info->node_info->priv_id);
clk_prepare_enable(info->iface_clk.clk);
}
for (j = 0; j < NUM_CTX; j++)
clk_prepare_enable(fabric->info.nodeclk[j].clk);
fabric->fabdev.hw_algo.node_init(fabric->hw_data, info);
if (ret) {
MSM_BUS_ERR("Unable to init node info, ret: %d\n", ret);
kfree(info);
}
for (j = 0; j < NUM_CTX; j++)
clk_disable_unprepare(fabric->info.nodeclk[j].clk);
if (info->iface_clk.clk) {
MSM_BUS_DBG("Disable iface_clk after node init: %d\n",
info->node_info->priv_id);
clk_disable_unprepare(info->iface_clk.clk);
}
}
MSM_BUS_DBG("Fabric: %d nmasters: %d nslaves: %d\n"
" ntieredslaves: %d, rpm_enabled: %d\n",
fabric->fabdev.id, fabric->pdata->nmasters,
fabric->pdata->nslaves, fabric->pdata->ntieredslaves,
fabric->pdata->rpm_enabled);
MSM_BUS_DBG("msm_bus_register_fabric_info i: %d\n", i);
fabric->num_nodes = fabric->pdata->len;
error:
fabric->num_nodes = i;
msm_bus_dbg_commit_data(fabric->fabdev.name, NULL, 0, 0, 0,
MSM_BUS_DBG_REGISTER);
return ret | err;
}
/**
* msm_bus_fabric_update_clks() - Set the clocks for fabrics and slaves
* @fabric: Fabric for which the clocks need to be updated
* @slave: The node for which the clocks need to be updated
* @index: The index for which the current clocks are set
* @curr_clk_hz:Current clock value
* @req_clk_hz: Requested clock value
* @bwsum: Bandwidth Sum
* @clk_flag: Flag determining whether fabric clock or the slave clock has to
* be set. If clk_flag is set, fabric clock is set, else slave clock is set.
*/
static int msm_bus_fabric_update_clks(struct msm_bus_fabric_device *fabdev,
struct msm_bus_inode_info *slave, int index,
uint64_t curr_clk_hz, uint64_t req_clk_hz,
uint64_t bwsum_hz, int clk_flag, int ctx,
unsigned int cl_active_flag)
{
int i, status = 0;
uint64_t max_pclk = 0, rate;
uint64_t *pclk = NULL;
struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev);
struct nodeclk *nodeclk;
/**
* Integration for clock rates is not required if context is not
* same as client's active-only flag
*/
if (ctx != cl_active_flag)
goto skip_set_clks;
/* Maximum for this gateway */
for (i = 0; i <= slave->num_pnodes; i++) {
if (i == index && (req_clk_hz < curr_clk_hz))
continue;
slave->pnode[i].sel_clk = &slave->pnode[i].clk[ctx];
max_pclk = max(max_pclk, *slave->pnode[i].sel_clk);
}
*slave->link_info.sel_clk =
max(max_pclk, max(bwsum_hz, req_clk_hz));
/* Is this gateway or slave? */
if (clk_flag && (!fabric->ahb)) {
struct msm_bus_fabnodeinfo *fabgw = NULL;
struct msm_bus_inode_info *info = NULL;
/* Maximum of all gateways set at fabric */
list_for_each_entry(fabgw, &fabric->gateways, list) {
info = fabgw->info;
if (!info)
continue;
info->link_info.sel_clk = &info->link_info.clk[ctx];
max_pclk = max(max_pclk, *info->link_info.sel_clk);
}
MSM_BUS_DBG("max_pclk from gateways: %llu\n", max_pclk);
/* Maximum of all slave clocks. */
for (i = 0; i < fabric->pdata->len; i++) {
if (fabric->pdata->info[i].gateway ||
(fabric->pdata->info[i].id < SLAVE_ID_KEY))
continue;
info = radix_tree_lookup(&fabric->fab_tree,
fabric->pdata->info[i].priv_id);
if (!info)
continue;
info->link_info.sel_clk = &info->link_info.clk[ctx];
max_pclk = max(max_pclk, *info->link_info.sel_clk);
}
MSM_BUS_DBG("max_pclk from slaves & gws: %llu\n", max_pclk);
fabric->info.link_info.sel_clk =
&fabric->info.link_info.clk[ctx];
pclk = fabric->info.link_info.sel_clk;
} else {
slave->link_info.sel_clk = &slave->link_info.clk[ctx];
pclk = slave->link_info.sel_clk;
}
*pclk = max(max_pclk, max(bwsum_hz, req_clk_hz));
if (!fabric->pdata->rpm_enabled)
goto skip_set_clks;
if (clk_flag) {
nodeclk = &fabric->info.nodeclk[ctx];
if (nodeclk->clk) {
MSM_BUS_DBG("clks: id: %d set-clk: %llu bws_hz:%llu\n",
fabric->fabdev.id, *pclk, bwsum_hz);
if (nodeclk->rate != *pclk) {
nodeclk->dirty = true;
nodeclk->rate = *pclk;
}
fabric->clk_dirty = true;
}
} else {
nodeclk = &slave->nodeclk[ctx];
if (nodeclk->clk) {
rate = *pclk;
MSM_BUS_DBG("clks: id: %d set-clk: %llu bws_hz: %llu\n",
slave->node_info->priv_id, rate,
bwsum_hz);
if (nodeclk->rate != rate) {
nodeclk->dirty = true;
nodeclk->rate = rate;
}
}
if (!status && slave->memclk[ctx].clk) {
rate = *slave->link_info.sel_clk;
if (slave->memclk[ctx].rate != rate) {
slave->memclk[ctx].rate = rate;
slave->memclk[ctx].dirty = true;
}
slave->memclk[ctx].rate = rate;
fabric->clk_dirty = true;
}
}
skip_set_clks:
return status;
}
void msm_bus_fabric_update_bw(struct msm_bus_fabric_device *fabdev,
struct msm_bus_inode_info *hop, struct msm_bus_inode_info *info,
int64_t add_bw, int *master_tiers, int ctx)
{
struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev);
void *sel_cdata;
long rounded_rate, cur_rate;
sel_cdata = fabric->cdata[ctx];
/* If it's an ahb fabric, don't calculate arb values */
if (fabric->ahb) {
MSM_BUS_DBG("AHB fabric, skipping bw calculation\n");
return;
}
if (!add_bw) {
MSM_BUS_DBG("No bandwidth delta. Skipping commit\n");
return;
}
if ((info->node_info->hw_sel != MSM_BUS_RPM) &&
fabdev->hw_algo.update_bw_reg(info->node_info->mode)) {
/* Enable clocks before accessing QoS registers */
if (fabric->info.nodeclk[DUAL_CTX].clk) {
if (fabric->info.nodeclk[DUAL_CTX].rate == 0) {
cur_rate = clk_get_rate(
fabric->info.nodeclk[DUAL_CTX].clk);
rounded_rate = clk_round_rate(
fabric->info.nodeclk[DUAL_CTX].clk,
cur_rate ? cur_rate : 1);
if (clk_set_rate(
fabric->info.nodeclk[DUAL_CTX].clk,
rounded_rate))
MSM_BUS_ERR("clk en:Node:%d rate:%ld",
fabric->fabdev.id, rounded_rate);
clk_prepare_enable(
fabric->info.nodeclk[DUAL_CTX].clk);
}
}
if (info->iface_clk.clk)
clk_prepare_enable(info->iface_clk.clk);
if (hop->iface_clk.clk)
clk_prepare_enable(hop->iface_clk.clk);
}
fabdev->hw_algo.update_bw(hop, info, fabric->pdata, sel_cdata,
master_tiers, add_bw);
if ((info->node_info->hw_sel != MSM_BUS_RPM) &&
fabdev->hw_algo.update_bw_reg(info->node_info->mode)) {
/* Disable clocks after accessing QoS registers */
if (fabric->info.nodeclk[DUAL_CTX].clk &&
fabric->info.nodeclk[DUAL_CTX].rate == 0)
clk_disable_unprepare(
fabric->info.nodeclk[DUAL_CTX].clk);
if (info->iface_clk.clk) {
MSM_BUS_DBG("Commented:Will disable clk for info:%d\n",
info->node_info->priv_id);
clk_disable_unprepare(info->iface_clk.clk);
}
if (hop->iface_clk.clk) {
MSM_BUS_DBG("Commented Will disable clk for hop:%d\n",
hop->node_info->priv_id);
clk_disable_unprepare(hop->iface_clk.clk);
}
}
fabric->arb_dirty = true;
}
static int msm_bus_fabric_clk_set(int enable, struct msm_bus_inode_info *info)
{
int i, status = 0;
long rounded_rate;
for (i = 0; i < NUM_CTX; i++) {
if (info->nodeclk[i].dirty) {
if (info->nodeclk[i].rate != 0) {
rounded_rate = clk_round_rate(info->
nodeclk[i].clk, info->nodeclk[i].rate);
status = clk_set_rate(info->nodeclk[i].clk,
rounded_rate);
MSM_BUS_DBG("AXI: node: %d set_rate: %ld\n",
info->node_info->id, rounded_rate);
}
if (enable && !(info->nodeclk[i].enable)) {
clk_prepare_enable(info->nodeclk[i].clk);
info->nodeclk[i].dirty = false;
info->nodeclk[i].enable = true;
} else if ((info->nodeclk[i].rate == 0) && (!enable)
&& (info->nodeclk[i].enable)) {
clk_disable_unprepare(info->nodeclk[i].clk);
info->nodeclk[i].dirty = false;
info->nodeclk[i].enable = false;
}
}
if (info->memclk[i].dirty) {
if (info->nodeclk[i].rate != 0) {
rounded_rate = clk_round_rate(info->
memclk[i].clk, info->memclk[i].rate);
status = clk_set_rate(info->memclk[i].clk,
rounded_rate);
MSM_BUS_DBG("AXI: node: %d set_rate: %ld\n",
info->node_info->id, rounded_rate);
}
if (enable && !(info->memclk[i].enable)) {
clk_prepare_enable(info->memclk[i].clk);
info->memclk[i].dirty = false;
info->memclk[i].enable = true;
} else if (info->memclk[i].rate == 0 && (!enable) &&
(info->memclk[i].enable)) {
clk_disable_unprepare(info->memclk[i].clk);
info->memclk[i].dirty = false;
info->memclk[i].enable = false;
}
}
}
return status;
}
/**
* msm_bus_fabric_clk_commit() - Call clock enable and update clock
* values.
*/
static int msm_bus_fabric_clk_commit(int enable, struct msm_bus_fabric *fabric)
{
unsigned int i, nfound = 0, status = 0;
struct msm_bus_inode_info *info[fabric->pdata->nslaves];
if (fabric->clk_dirty == true)
status = msm_bus_fabric_clk_set(enable, &fabric->info);
if (status)
MSM_BUS_WARN("Error setting clocks on fabric: %d\n",
fabric->fabdev.id);
nfound = radix_tree_gang_lookup_tag(&fabric->fab_tree, (void **)&info,
fabric->fabdev.id, fabric->pdata->nslaves, CLK_NODE);
if (nfound == 0) {
MSM_BUS_DBG("No clock nodes found for fabric: %d\n",
fabric->fabdev.id);
goto out;
}
for (i = 0; i < nfound; i++) {
status = msm_bus_fabric_clk_set(enable, info[i]);
if (status)
MSM_BUS_WARN("Error setting clocks for node: %d\n",
info[i]->node_info->id);
}
out:
return status;
}
static void msm_bus_fabric_config_limiter(
struct msm_bus_fabric_device *fabdev,
struct msm_bus_inode_info *info)
{
struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev);
long rounded_rate, cur_rate;
if (fabdev->hw_algo.config_limiter == NULL)
return;
/* Enable clocks before accessing QoS registers */
if (fabric->info.nodeclk[DUAL_CTX].clk) {
if (fabric->info.nodeclk[DUAL_CTX].rate == 0) {
cur_rate = clk_get_rate(
fabric->info.nodeclk[DUAL_CTX].clk);
rounded_rate = clk_round_rate(
fabric->info.nodeclk[DUAL_CTX].clk,
cur_rate ? cur_rate : 1);
if (clk_set_rate(fabric->info.nodeclk[DUAL_CTX].clk,
rounded_rate))
MSM_BUS_ERR("Error: clk: en: Node: %d rate: %ld",
fabric->fabdev.id, rounded_rate);
clk_prepare_enable(fabric->info.nodeclk[DUAL_CTX].clk);
}
}
if (info->iface_clk.clk)
clk_prepare_enable(info->iface_clk.clk);
fabdev->hw_algo.config_limiter(fabric->pdata, info);
/* Disable clocks after accessing QoS registers */
if (fabric->info.nodeclk[DUAL_CTX].clk &&
fabric->info.nodeclk[DUAL_CTX].rate == 0)
clk_disable_unprepare(fabric->info.nodeclk[DUAL_CTX].clk);
if (info->iface_clk.clk) {
MSM_BUS_DBG("Commented: Will disable clock for info: %d\n",
info->node_info->priv_id);
clk_disable_unprepare(info->iface_clk.clk);
}
}
static void msm_bus_fabric_config_master(
struct msm_bus_fabric_device *fabdev,
struct msm_bus_inode_info *info, uint64_t req_clk, uint64_t req_bw)
{
struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev);
long rounded_rate, cur_rate;
if (fabdev->hw_algo.config_master == NULL)
return;
/* Enable clocks before accessing QoS registers */
if (fabric->info.nodeclk[DUAL_CTX].clk) {
if (fabric->info.nodeclk[DUAL_CTX].rate == 0) {
cur_rate = clk_get_rate(
fabric->info.nodeclk[DUAL_CTX].clk);
rounded_rate = clk_round_rate(
fabric->info.nodeclk[DUAL_CTX].clk,
cur_rate ? cur_rate : 1);
if (clk_set_rate(fabric->info.nodeclk[DUAL_CTX].clk,
rounded_rate))
MSM_BUS_ERR("Error: clk: en: Node: %d rate: %ld",
fabric->fabdev.id, rounded_rate);
clk_prepare_enable(fabric->info.nodeclk[DUAL_CTX].clk);
}
}
if (info->iface_clk.clk)
clk_prepare_enable(info->iface_clk.clk);
fabdev->hw_algo.config_master(fabric->pdata, info, req_clk, req_bw);
/* Disable clocks after accessing QoS registers */
if (fabric->info.nodeclk[DUAL_CTX].clk &&
fabric->info.nodeclk[DUAL_CTX].rate == 0)
clk_disable_unprepare(fabric->info.nodeclk[DUAL_CTX].clk);
if (info->iface_clk.clk) {
MSM_BUS_DBG("Commented: Will disable clock for info: %d\n",
info->node_info->priv_id);
clk_disable_unprepare(info->iface_clk.clk);
}
}
/**
* msm_bus_fabric_hw_commit() - Commit the arbitration data to Hardware.
* @fabric: Fabric for which the data should be committed
* */
static int msm_bus_fabric_hw_commit(struct msm_bus_fabric_device *fabdev)
{
int status = 0;
struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev);
/*
* For a non-zero bandwidth request, clocks should be enabled before
* sending the arbitration data to RPM, but should be disabled only
* after commiting the data.
*/
status = msm_bus_fabric_clk_commit(ENABLE, fabric);
if (status)
MSM_BUS_DBG("Error setting clocks on fabric: %d\n",
fabric->fabdev.id);
if (!fabric->arb_dirty) {
MSM_BUS_DBG("Not committing as fabric not arb_dirty\n");
goto skip_arb;
}
status = fabdev->hw_algo.commit(fabric->pdata, fabric->hw_data,
(void **)fabric->cdata);
if (status)
MSM_BUS_DBG("Error committing arb data for fabric: %d\n",
fabric->fabdev.id);
fabric->arb_dirty = false;
skip_arb:
/*
* If the bandwidth request is 0 for a fabric, the clocks
* should be disabled after arbitration data is committed.
*/
status = msm_bus_fabric_clk_commit(DISABLE, fabric);
if (status)
MSM_BUS_WARN("Error disabling clocks on fabric: %d\n",
fabric->fabdev.id);
fabric->clk_dirty = false;
return status;
}
/**
* msm_bus_fabric_port_halt() - Used to halt a master port
* @fabric: Fabric on which the current master node is present
* @portid: Port id of the master
*/
int msm_bus_fabric_port_halt(struct msm_bus_fabric_device *fabdev, int iid)
{
struct msm_bus_inode_info *info = NULL;
uint8_t mport;
uint32_t haltid = 0;
struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev);
info = fabdev->algo->find_node(fabdev, iid);
if (!info) {
MSM_BUS_ERR("Error: Info not found for id: %u", iid);
return -EINVAL;
}
haltid = fabric->pdata->haltid;
mport = info->node_info->masterp[0];
return fabdev->hw_algo.port_halt(haltid, mport);
}
/**
* msm_bus_fabric_port_unhalt() - Used to unhalt a master port
* @fabric: Fabric on which the current master node is present
* @portid: Port id of the master
*/
int msm_bus_fabric_port_unhalt(struct msm_bus_fabric_device *fabdev, int iid)
{
struct msm_bus_inode_info *info = NULL;
uint8_t mport;
uint32_t haltid = 0;
struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev);
info = fabdev->algo->find_node(fabdev, iid);
if (!info) {
MSM_BUS_ERR("Error: Info not found for id: %u", iid);
return -EINVAL;
}
haltid = fabric->pdata->haltid;
mport = info->node_info->masterp[0];
return fabdev->hw_algo.port_unhalt(haltid, mport);
}
/**
* msm_bus_fabric_find_gw_node() - This function finds the gateway node
* attached on a given fabric
* @id: ID of the gateway node
* @fabric: Fabric to find the gateway node on
* Function returns: Pointer to the gateway node
*/
static struct msm_bus_inode_info *msm_bus_fabric_find_gw_node(struct
msm_bus_fabric_device * fabdev, int id)
{
struct msm_bus_inode_info *info = NULL;
struct msm_bus_fabnodeinfo *fab;
struct msm_bus_fabric *fabric;
if (!fabdev) {
MSM_BUS_ERR("No fabric device found!\n");
return NULL;
}
fabric = to_msm_bus_fabric(fabdev);
if (!fabric || IS_ERR(fabric)) {
MSM_BUS_ERR("No fabric type found!\n");
return NULL;
}
list_for_each_entry(fab, &fabric->gateways, list) {
if (fab->info->node_info->priv_id == id) {
info = fab->info;
break;
}
}
return info;
}
static struct msm_bus_inode_info *msm_bus_fabric_find_node(struct
msm_bus_fabric_device * fabdev, int id)
{
struct msm_bus_inode_info *info = NULL;
struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev);
info = radix_tree_lookup(&fabric->fab_tree, id);
if (!info)
MSM_BUS_ERR("Null info found for id %d\n", id);
return info;
}
static struct list_head *msm_bus_fabric_get_gw_list(struct msm_bus_fabric_device
*fabdev)
{
struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev);
if (!fabric || IS_ERR(fabric)) {
MSM_BUS_ERR("No fabric found from fabdev\n");
return NULL;
}
return &fabric->gateways;
}
static struct msm_bus_fab_algorithm msm_bus_algo = {
.update_clks = msm_bus_fabric_update_clks,
.update_bw = msm_bus_fabric_update_bw,
.port_halt = msm_bus_fabric_port_halt,
.port_unhalt = msm_bus_fabric_port_unhalt,
.commit = msm_bus_fabric_hw_commit,
.find_node = msm_bus_fabric_find_node,
.find_gw_node = msm_bus_fabric_find_gw_node,
.get_gw_list = msm_bus_fabric_get_gw_list,
.config_master = msm_bus_fabric_config_master,
.config_limiter = msm_bus_fabric_config_limiter,
};
static int msm_bus_fabric_hw_init(struct msm_bus_fabric_registration *pdata,
struct msm_bus_hw_algorithm *hw_algo)
{
int ret = 0;
switch (pdata->hw_sel) {
case MSM_BUS_NOC:
msm_bus_noc_hw_init(pdata, hw_algo);
break;
case MSM_BUS_BIMC:
msm_bus_bimc_hw_init(pdata, hw_algo);
break;
default:
ret = msm_bus_rpm_hw_init(pdata, hw_algo);
if (ret) {
MSM_BUS_ERR("RPM initialization failed\n");
ret = -EINVAL;
}
break;
}
return ret;
}
static int msm_bus_fabric_probe(struct platform_device *pdev)
{
int ctx, ret = 0;
struct msm_bus_fabric *fabric;
struct msm_bus_fabric_registration *pdata;
fabric = kzalloc(sizeof(struct msm_bus_fabric), GFP_KERNEL);
if (!fabric) {
MSM_BUS_ERR("Fabric alloc failed\n");
return -ENOMEM;
}
INIT_LIST_HEAD(&fabric->gateways);
INIT_RADIX_TREE(&fabric->fab_tree, GFP_ATOMIC);
fabric->num_nodes = 0;
fabric->fabdev.visited = false;
fabric->info.node_info = kzalloc(sizeof(struct msm_bus_node_info),
GFP_KERNEL);
if (ZERO_OR_NULL_PTR(fabric->info.node_info)) {
MSM_BUS_ERR("Fabric node info alloc failed\n");
kfree(fabric);
return -ENOMEM;
}
fabric->info.num_pnodes = -1;
fabric->info.link_info.clk[DUAL_CTX] = 0;
fabric->info.link_info.bw[DUAL_CTX] = 0;
fabric->info.link_info.clk[ACTIVE_CTX] = 0;
fabric->info.link_info.bw[ACTIVE_CTX] = 0;
/* If possible, get pdata from device-tree */
if (pdev->dev.of_node) {
pdata = msm_bus_of_get_fab_data(pdev);
if (IS_ERR(pdata) || ZERO_OR_NULL_PTR(pdata)) {
pr_err("Null platform data\n");
kfree(fabric->info.node_info);
kfree(fabric);
return PTR_ERR(pdata);
}
msm_bus_board_init(pdata);
fabric->fabdev.id = pdata->id;
msm_bus_of_get_nfab(pdev, pdata);
} else {
pdata = (struct msm_bus_fabric_registration *)pdev->
dev.platform_data;
fabric->fabdev.id = pdev->id;
}
fabric->fabdev.name = pdata->name;
fabric->fabdev.nr_lim_thresh = pdata->nr_lim_thresh;
fabric->fabdev.eff_fact = pdata->eff_fact;
fabric->fabdev.algo = &msm_bus_algo;
fabric->info.node_info->priv_id = fabric->fabdev.id;
fabric->info.node_info->id = fabric->fabdev.id;
ret = msm_bus_fabric_hw_init(pdata, &fabric->fabdev.hw_algo);
if (ret) {
MSM_BUS_ERR("Error initializing hardware for fabric: %d\n",
fabric->fabdev.id);
goto err;
}
fabric->ahb = pdata->ahb;
fabric->pdata = pdata;
fabric->pdata->board_algo->assign_iids(fabric->pdata,
fabric->fabdev.id);
fabric->fabdev.board_algo = fabric->pdata->board_algo;
/*
* clk and bw for fabric->info will contain the max bw and clk
* it will allow. This info will come from the boards file.
*/
ret = msm_bus_fabric_device_register(&fabric->fabdev);
if (ret) {
MSM_BUS_ERR("Error registering fabric %d ret %d\n",
fabric->fabdev.id, ret);
goto err;
}
for (ctx = 0; ctx < NUM_CTX; ctx++) {
if (pdata->fabclk[ctx]) {
fabric->info.nodeclk[ctx].clk = clk_get(
&fabric->fabdev.dev, pdata->fabclk[ctx]);
if (IS_ERR(fabric->info.nodeclk[ctx].clk)) {
MSM_BUS_ERR("Couldn't get clock %s\n",
pdata->fabclk[ctx]);
ret = -EINVAL;
goto err;
}
fabric->info.nodeclk[ctx].enable = false;
fabric->info.nodeclk[ctx].dirty = false;
}
}
/* Find num. of slaves, masters, populate gateways, radix tree */
ret = register_fabric_info(pdev, fabric);
if (ret) {
MSM_BUS_ERR("Could not register fabric %d info, ret: %d\n",
fabric->fabdev.id, ret);
goto err;
}
if (!fabric->ahb) {
/* Allocate memory for commit data */
for (ctx = 0; ctx < NUM_CTX; ctx++) {
ret = fabric->fabdev.hw_algo.allocate_commit_data(
fabric->pdata, &fabric->cdata[ctx], ctx);
if (ret) {
MSM_BUS_ERR("Failed to alloc commit data for "
"fab: %d, ret = %d\n",
fabric->fabdev.id, ret);
goto err;
}
}
}
if (msmbus_coresight_init(pdev))
pr_warn("Coresight support absent for bus: %d\n", pdata->id);
return ret;
err:
kfree(fabric->info.node_info);
kfree(fabric);
return ret;
}
static int msm_bus_fabric_remove(struct platform_device *pdev)
{
struct msm_bus_fabric_device *fabdev = NULL;
struct msm_bus_fabric *fabric;
int i;
int ret = 0;
fabdev = platform_get_drvdata(pdev);
msmbus_coresight_remove(pdev);
msm_bus_fabric_device_unregister(fabdev);
fabric = to_msm_bus_fabric(fabdev);
msm_bus_dbg_commit_data(fabric->fabdev.name, NULL, 0, 0, 0,
MSM_BUS_DBG_UNREGISTER);
for (i = 0; i < fabric->pdata->nmasters; i++)
radix_tree_delete(&fabric->fab_tree, fabric->fabdev.id + i);
for (i = (fabric->fabdev.id + SLAVE_ID_KEY); i <
fabric->pdata->nslaves; i++)
radix_tree_delete(&fabric->fab_tree, i);
if (!fabric->ahb) {
fabdev->hw_algo.free_commit_data(fabric->cdata[DUAL_CTX]);
fabdev->hw_algo.free_commit_data(fabric->cdata[ACTIVE_CTX]);
}
kfree(fabric->info.node_info);
kfree(fabric->hw_data);
kfree(fabric);
return ret;
}
static struct of_device_id fabric_match[] = {
{.compatible = "msm-bus-fabric"},
{}
};
static struct platform_driver msm_bus_fabric_driver = {
.probe = msm_bus_fabric_probe,
.remove = msm_bus_fabric_remove,
.driver = {
.name = "msm_bus_fabric",
.owner = THIS_MODULE,
.of_match_table = fabric_match,
},
};
int __init msm_bus_fabric_init_driver(void)
{
static bool initialized;
if (initialized)
return 0;
else
initialized = true;
MSM_BUS_ERR("msm_bus_fabric_init_driver\n");
msm_bus_arb_setops_legacy(&arb_ops);
return platform_driver_register(&msm_bus_fabric_driver);
}
EXPORT_SYMBOL(msm_bus_fabric_init_driver);
subsys_initcall(msm_bus_fabric_init_driver);
| gpl-2.0 |
marcOcram/Acer-Liquid-MT-Kernel | arch/powerpc/sysdev/simple_gpio.c | 737 | 3362 | /*
* Simple Memory-Mapped GPIOs
*
* Copyright (c) MontaVista Software, Inc. 2008.
*
* Author: Anton Vorontsov <avorontsov@ru.mvista.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/ioport.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/gpio.h>
#include <asm/prom.h>
#include "simple_gpio.h"
struct u8_gpio_chip {
struct of_mm_gpio_chip mm_gc;
spinlock_t lock;
/* shadowed data register to clear/set bits safely */
u8 data;
};
static struct u8_gpio_chip *to_u8_gpio_chip(struct of_mm_gpio_chip *mm_gc)
{
return container_of(mm_gc, struct u8_gpio_chip, mm_gc);
}
static u8 u8_pin2mask(unsigned int pin)
{
return 1 << (8 - 1 - pin);
}
static int u8_gpio_get(struct gpio_chip *gc, unsigned int gpio)
{
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
return in_8(mm_gc->regs) & u8_pin2mask(gpio);
}
static void u8_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct u8_gpio_chip *u8_gc = to_u8_gpio_chip(mm_gc);
unsigned long flags;
spin_lock_irqsave(&u8_gc->lock, flags);
if (val)
u8_gc->data |= u8_pin2mask(gpio);
else
u8_gc->data &= ~u8_pin2mask(gpio);
out_8(mm_gc->regs, u8_gc->data);
spin_unlock_irqrestore(&u8_gc->lock, flags);
}
static int u8_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
return 0;
}
static int u8_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
u8_gpio_set(gc, gpio, val);
return 0;
}
static void u8_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
{
struct u8_gpio_chip *u8_gc = to_u8_gpio_chip(mm_gc);
u8_gc->data = in_8(mm_gc->regs);
}
static int __init u8_simple_gpiochip_add(struct device_node *np)
{
int ret;
struct u8_gpio_chip *u8_gc;
struct of_mm_gpio_chip *mm_gc;
struct of_gpio_chip *of_gc;
struct gpio_chip *gc;
u8_gc = kzalloc(sizeof(*u8_gc), GFP_KERNEL);
if (!u8_gc)
return -ENOMEM;
spin_lock_init(&u8_gc->lock);
mm_gc = &u8_gc->mm_gc;
of_gc = &mm_gc->of_gc;
gc = &of_gc->gc;
mm_gc->save_regs = u8_gpio_save_regs;
of_gc->gpio_cells = 2;
gc->ngpio = 8;
gc->direction_input = u8_gpio_dir_in;
gc->direction_output = u8_gpio_dir_out;
gc->get = u8_gpio_get;
gc->set = u8_gpio_set;
ret = of_mm_gpiochip_add(np, mm_gc);
if (ret)
goto err;
return 0;
err:
kfree(u8_gc);
return ret;
}
void __init simple_gpiochip_init(const char *compatible)
{
struct device_node *np;
for_each_compatible_node(np, NULL, compatible) {
int ret;
struct resource r;
ret = of_address_to_resource(np, 0, &r);
if (ret)
goto err;
switch (resource_size(&r)) {
case 1:
ret = u8_simple_gpiochip_add(np);
if (ret)
goto err;
break;
default:
/*
* Whenever you need support for GPIO bank width > 1,
* please just turn u8_ code into huge macros, and
* construct needed uX_ code with it.
*/
ret = -ENOSYS;
goto err;
}
continue;
err:
pr_err("%s: registration failed, status %d\n",
np->full_name, ret);
}
}
| gpl-2.0 |
SM-G920P/SM-N920 | drivers/media/tdmb/fc8080/fc8080_ppi.c | 737 | 8976 | /*****************************************************************************
Copyright(c) 2013 FCI Inc. All Rights Reserved
File name : fc8080_ppi.c
Description : EBI2LCD interface header file
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
History :
----------------------------------------------------------------------
*******************************************************************************/
#include "linux/io.h"
#include <linux/mutex.h>
#include "fci_types.h"
#include "fc8080_regs.h"
#include "fc8080_ppi.h"
#include "fci_oal.h"
#include "tdmb.h"
#define BBM_BASE_ADDR (0)
#define PPI_BMODE 0x00
#define PPI_WMODE 0x04
#define PPI_LMODE 0x08
#define PPI_RD_THRESH 0x30
#define PPI_RD_REG 0x20
#define PPI_READ 0x40
#define PPI_WRITE 0x00
#define PPI_AINC 0x80
static DEFINE_MUTEX(lock);
u32 base_address;
#define FC8080_PPI_REG_OUT(x) writeb(x, (void __iomem *)base_address)
#define FC8080_PPI_REG_IN readb((void __iomem *)base_address)
s32 fc8080_ppi_init(HANDLE handle, u16 param1, u16 param2)
{
base_address = param2;
base_address <<= 16;
base_address |= param1;
DPRINTK("%s : 0x%p\n", __func__, (void __iomem *)base_address);
return BBM_OK;
}
s32 fc8080_ppi_byteread(HANDLE handle, u16 addr, u8 *data)
{
u16 length = 1;
u8 command;
mutex_lock(&lock);
FC8080_PPI_REG_OUT((addr >> 12) & 0x0f);
FC8080_PPI_REG_OUT((addr >> 8) & 0x0f);
FC8080_PPI_REG_OUT((addr >> 4) & 0x0f);
FC8080_PPI_REG_OUT(addr & 0x0f);
command = (u8) (PPI_READ);
FC8080_PPI_REG_OUT(command >> 4);
FC8080_PPI_REG_OUT(command);
FC8080_PPI_REG_OUT(length >> 4);
FC8080_PPI_REG_OUT(length);
*data = FC8080_PPI_REG_IN << 4;
*data |= (FC8080_PPI_REG_IN & 0x0f);
mutex_unlock(&lock);
return BBM_OK;
}
s32 fc8080_ppi_wordread(HANDLE handle, u16 addr, u16 *data)
{
u16 length = 2;
u8 command;
mutex_lock(&lock);
FC8080_PPI_REG_OUT((addr >> 12) & 0x0f);
FC8080_PPI_REG_OUT((addr >> 8) & 0x0f);
FC8080_PPI_REG_OUT((addr >> 4) & 0x0f);
FC8080_PPI_REG_OUT(addr & 0x0f);
command = (u8) (PPI_READ | PPI_AINC);
FC8080_PPI_REG_OUT(command >> 4);
FC8080_PPI_REG_OUT(command);
FC8080_PPI_REG_OUT(length >> 4);
FC8080_PPI_REG_OUT(length);
*data = (FC8080_PPI_REG_IN & 0x0f) << 4;
*data |= FC8080_PPI_REG_IN & 0x0f;
*data |= (FC8080_PPI_REG_IN & 0x0f) << 12;
*data |= (FC8080_PPI_REG_IN & 0x0f) << 8;
mutex_unlock(&lock);
return BBM_OK;
}
s32 fc8080_ppi_longread(HANDLE handle, u16 addr, u32 *data)
{
u16 length = 4;
u8 command;
mutex_lock(&lock);
FC8080_PPI_REG_OUT((addr >> 12) & 0x0f);
FC8080_PPI_REG_OUT((addr >> 8) & 0x0f);
FC8080_PPI_REG_OUT((addr >> 4) & 0x0f);
FC8080_PPI_REG_OUT(addr & 0x0f);
command = (u8) (PPI_READ | PPI_AINC);
FC8080_PPI_REG_OUT(command >> 4);
FC8080_PPI_REG_OUT(command);
FC8080_PPI_REG_OUT(length >> 4);
FC8080_PPI_REG_OUT(length);
*data = (FC8080_PPI_REG_IN & 0x0f) << 4;
*data |= FC8080_PPI_REG_IN & 0x0f;
*data |= (FC8080_PPI_REG_IN & 0x0f) << 12;
*data |= (FC8080_PPI_REG_IN & 0x0f) << 8;
*data |= (FC8080_PPI_REG_IN & 0x0f) << 20;
*data |= (FC8080_PPI_REG_IN & 0x0f) << 16;
*data |= (FC8080_PPI_REG_IN & 0x0f) << 28;
*data |= (FC8080_PPI_REG_IN & 0x0f) << 24;
mutex_unlock(&lock);
return BBM_OK;
}
s32 fc8080_ppi_bulkread(HANDLE handle, u16 addr, u8 *data, u16 length)
{
s32 i, j;
u8 command;
u16 x, y;
x = length / 255;
y = length % 255;
mutex_lock(&lock);
for (i = 0; i < x; i++, addr += 255) {
FC8080_PPI_REG_OUT((addr >> 12) & 0x0f);
FC8080_PPI_REG_OUT((addr >> 8) & 0x0f);
FC8080_PPI_REG_OUT((addr >> 4) & 0x0f);
FC8080_PPI_REG_OUT(addr & 0x0f);
command = (u8) (PPI_READ | PPI_AINC);
FC8080_PPI_REG_OUT(command >> 4);
FC8080_PPI_REG_OUT(command);
FC8080_PPI_REG_OUT((255 >> 4) & 0x0f);
FC8080_PPI_REG_OUT(255 & 0x0f);
for (j = 0; j < 255; j++) {
data[i * 255 + j] =
(u8) ((FC8080_PPI_REG_IN & 0x0f) << 4);
data[i * 255 + j] |=
(u8) (FC8080_PPI_REG_IN & 0x0f);
}
}
if (y) {
FC8080_PPI_REG_OUT((addr >> 12) & 0x0f);
FC8080_PPI_REG_OUT((addr >> 8) & 0x0f);
FC8080_PPI_REG_OUT((addr >> 4) & 0x0f);
FC8080_PPI_REG_OUT(addr & 0x0f);
command = (u8) (PPI_READ | PPI_AINC);
FC8080_PPI_REG_OUT(command >> 4);
FC8080_PPI_REG_OUT(command);
FC8080_PPI_REG_OUT((y >> 4) & 0x0f);
FC8080_PPI_REG_OUT(y & 0x0f);
for (j = 0; j < y; j++) {
data[x * 255 + j] =
(u8) ((FC8080_PPI_REG_IN & 0x0f) << 4);
data[x * 255 + j] |=
(u8) (FC8080_PPI_REG_IN & 0x0f);
}
}
mutex_unlock(&lock);
return BBM_OK;
}
s32 fc8080_ppi_bytewrite(HANDLE handle, u16 addr, u8 data)
{
u16 length = 1;
u8 command;
mutex_lock(&lock);
FC8080_PPI_REG_OUT((addr >> 12) & 0x0f);
FC8080_PPI_REG_OUT((addr >> 8) & 0x0f);
FC8080_PPI_REG_OUT((addr >> 4) & 0x0f);
FC8080_PPI_REG_OUT(addr & 0x0f);
command = (u8) (PPI_WRITE);
FC8080_PPI_REG_OUT(command >> 4);
FC8080_PPI_REG_OUT(command);
FC8080_PPI_REG_OUT(length >> 4);
FC8080_PPI_REG_OUT(length);
FC8080_PPI_REG_OUT(data >> 4);
FC8080_PPI_REG_OUT(data);
mutex_unlock(&lock);
return BBM_OK;
}
s32 fc8080_ppi_wordwrite(HANDLE handle, u16 addr, u16 data)
{
u16 length = 2;
u8 command;
mutex_lock(&lock);
FC8080_PPI_REG_OUT((addr >> 12) & 0x0f);
FC8080_PPI_REG_OUT((addr >> 8) & 0x0f);
FC8080_PPI_REG_OUT((addr >> 4) & 0x0f);
FC8080_PPI_REG_OUT(addr & 0x0f);
command = (u8) (PPI_WRITE | PPI_AINC);
FC8080_PPI_REG_OUT(command >> 4);
FC8080_PPI_REG_OUT(command);
FC8080_PPI_REG_OUT(length >> 4);
FC8080_PPI_REG_OUT(length);
FC8080_PPI_REG_OUT(data >> 4);
FC8080_PPI_REG_OUT(data);
FC8080_PPI_REG_OUT(data >> 12);
FC8080_PPI_REG_OUT(data >> 8);
mutex_unlock(&lock);
return BBM_OK;
}
s32 fc8080_ppi_longwrite(HANDLE handle, u16 addr, u32 data)
{
u16 length = 4;
u8 command;
mutex_lock(&lock);
FC8080_PPI_REG_OUT((addr >> 12) & 0x0f);
FC8080_PPI_REG_OUT((addr >> 8) & 0x0f);
FC8080_PPI_REG_OUT((addr >> 4) & 0x0f);
FC8080_PPI_REG_OUT(addr & 0x0f);
command = (u8) (PPI_WRITE | PPI_AINC);
FC8080_PPI_REG_OUT(command >> 4);
FC8080_PPI_REG_OUT(command);
FC8080_PPI_REG_OUT(length >> 4);
FC8080_PPI_REG_OUT(length);
FC8080_PPI_REG_OUT(data >> 4);
FC8080_PPI_REG_OUT(data);
FC8080_PPI_REG_OUT(data >> 12);
FC8080_PPI_REG_OUT(data >> 8);
FC8080_PPI_REG_OUT(data >> 20);
FC8080_PPI_REG_OUT(data >> 16);
FC8080_PPI_REG_OUT(data >> 28);
FC8080_PPI_REG_OUT(data >> 24);
mutex_unlock(&lock);
return BBM_OK;
}
s32 fc8080_ppi_bulkwrite(HANDLE handle, u16 addr, u8 *data, u16 length)
{
s32 i, j;
u8 command;
u16 x, y;
x = length / 255;
y = length % 255;
mutex_lock(&lock);
for (i = 0; i < x; i++, addr += 255) {
FC8080_PPI_REG_OUT((addr >> 12) & 0x0f);
FC8080_PPI_REG_OUT((addr >> 8) & 0x0f);
FC8080_PPI_REG_OUT((addr >> 4) & 0x0f);
FC8080_PPI_REG_OUT(addr & 0x0f);
command = (u8) (PPI_WRITE | PPI_AINC);
FC8080_PPI_REG_OUT(command >> 4);
FC8080_PPI_REG_OUT(command);
FC8080_PPI_REG_OUT((255 >> 4) & 0x0f);
FC8080_PPI_REG_OUT(255 & 0x0f);
for (j = 0; j < 255; j++) {
FC8080_PPI_REG_OUT((data[i * 255 + j] >> 4) & 0x0f);
FC8080_PPI_REG_OUT(data[i * 255 + j] & 0x0f);
}
}
if (y) {
FC8080_PPI_REG_OUT((addr >> 12) & 0x0f);
FC8080_PPI_REG_OUT((addr >> 8) & 0x0f);
FC8080_PPI_REG_OUT((addr >> 4) & 0x0f);
FC8080_PPI_REG_OUT(addr & 0x0f);
command = (u8) (PPI_WRITE | PPI_AINC);
FC8080_PPI_REG_OUT(command >> 4);
FC8080_PPI_REG_OUT(command);
FC8080_PPI_REG_OUT((y >> 4) & 0x0f);
FC8080_PPI_REG_OUT(y & 0x0f);
for (j = 0; j < y; j++) {
FC8080_PPI_REG_OUT((data[x * 255 + j] >> 4) & 0x0f);
FC8080_PPI_REG_OUT(data[x * 255 + j] & 0x0f);
}
}
mutex_unlock(&lock);
return BBM_OK;
}
s32 fc8080_ppi_dataread(HANDLE handle, u16 addr, u8 *data, u32 length)
{
s32 i;
u8 command;
mutex_lock(&lock);
FC8080_PPI_REG_OUT((addr >> 12) & 0x0f);
FC8080_PPI_REG_OUT((addr >> 8) & 0x0f);
FC8080_PPI_REG_OUT((addr >> 4) & 0x0f);
FC8080_PPI_REG_OUT(addr & 0x0f);
command = (u8) (PPI_READ | PPI_RD_THRESH);
FC8080_PPI_REG_OUT(command >> 4);
FC8080_PPI_REG_OUT(command);
FC8080_PPI_REG_OUT(0);
FC8080_PPI_REG_OUT(0);
for (i = 0; i < length; i++) {
data[i] = (u8) ((FC8080_PPI_REG_IN & 0x0f) << 4);
data[i] |= (u8) (FC8080_PPI_REG_IN & 0x0f);
}
mutex_unlock(&lock);
return BBM_OK;
}
s32 fc8080_ppi_deinit(HANDLE handle)
{
base_address = 0;
return BBM_OK;
}
| gpl-2.0 |
Mazout360/lge-kernel-gb | arch/mips/kernel/ptrace.c | 737 | 14279 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 Ross Biro
* Copyright (C) Linus Torvalds
* Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
* Copyright (C) 1996 David S. Miller
* Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
* Copyright (C) 1999 MIPS Technologies, Inc.
* Copyright (C) 2000 Ulf Carlsson
*
* At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit
* binaries.
*/
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/smp.h>
#include <linux/user.h>
#include <linux/security.h>
#include <linux/audit.h>
#include <linux/seccomp.h>
#include <asm/byteorder.h>
#include <asm/cpu.h>
#include <asm/dsp.h>
#include <asm/fpu.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/bootinfo.h>
#include <asm/reg.h>
/*
* Called by kernel/ptrace.c when detaching..
*
* Make sure single step bits etc are not set.
*/
void ptrace_disable(struct task_struct *child)
{
/* Don't load the watchpoint registers for the ex-child. */
clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
}
/*
* Read a general register set. We always use the 64-bit format, even
* for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
* Registers are sign extended to fill the available space.
*/
int ptrace_getregs(struct task_struct *child, __s64 __user *data)
{
struct pt_regs *regs;
int i;
if (!access_ok(VERIFY_WRITE, data, 38 * 8))
return -EIO;
regs = task_pt_regs(child);
for (i = 0; i < 32; i++)
__put_user((long)regs->regs[i], data + i);
__put_user((long)regs->lo, data + EF_LO - EF_R0);
__put_user((long)regs->hi, data + EF_HI - EF_R0);
__put_user((long)regs->cp0_epc, data + EF_CP0_EPC - EF_R0);
__put_user((long)regs->cp0_badvaddr, data + EF_CP0_BADVADDR - EF_R0);
__put_user((long)regs->cp0_status, data + EF_CP0_STATUS - EF_R0);
__put_user((long)regs->cp0_cause, data + EF_CP0_CAUSE - EF_R0);
return 0;
}
/*
* Write a general register set. As for PTRACE_GETREGS, we always use
* the 64-bit format. On a 32-bit kernel only the lower order half
* (according to endianness) will be used.
*/
int ptrace_setregs(struct task_struct *child, __s64 __user *data)
{
struct pt_regs *regs;
int i;
if (!access_ok(VERIFY_READ, data, 38 * 8))
return -EIO;
regs = task_pt_regs(child);
for (i = 0; i < 32; i++)
__get_user(regs->regs[i], data + i);
__get_user(regs->lo, data + EF_LO - EF_R0);
__get_user(regs->hi, data + EF_HI - EF_R0);
__get_user(regs->cp0_epc, data + EF_CP0_EPC - EF_R0);
/* badvaddr, status, and cause may not be written. */
return 0;
}
int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
{
int i;
unsigned int tmp;
if (!access_ok(VERIFY_WRITE, data, 33 * 8))
return -EIO;
if (tsk_used_math(child)) {
fpureg_t *fregs = get_fpu_regs(child);
for (i = 0; i < 32; i++)
__put_user(fregs[i], i + (__u64 __user *) data);
} else {
for (i = 0; i < 32; i++)
__put_user((__u64) -1, i + (__u64 __user *) data);
}
__put_user(child->thread.fpu.fcr31, data + 64);
preempt_disable();
if (cpu_has_fpu) {
unsigned int flags;
if (cpu_has_mipsmt) {
unsigned int vpflags = dvpe();
flags = read_c0_status();
__enable_fpu();
__asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp));
write_c0_status(flags);
evpe(vpflags);
} else {
flags = read_c0_status();
__enable_fpu();
__asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp));
write_c0_status(flags);
}
} else {
tmp = 0;
}
preempt_enable();
__put_user(tmp, data + 65);
return 0;
}
int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
{
fpureg_t *fregs;
int i;
if (!access_ok(VERIFY_READ, data, 33 * 8))
return -EIO;
fregs = get_fpu_regs(child);
for (i = 0; i < 32; i++)
__get_user(fregs[i], i + (__u64 __user *) data);
__get_user(child->thread.fpu.fcr31, data + 64);
/* FIR may not be written. */
return 0;
}
int ptrace_get_watch_regs(struct task_struct *child,
struct pt_watch_regs __user *addr)
{
enum pt_watch_style style;
int i;
if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0)
return -EIO;
if (!access_ok(VERIFY_WRITE, addr, sizeof(struct pt_watch_regs)))
return -EIO;
#ifdef CONFIG_32BIT
style = pt_watch_style_mips32;
#define WATCH_STYLE mips32
#else
style = pt_watch_style_mips64;
#define WATCH_STYLE mips64
#endif
__put_user(style, &addr->style);
__put_user(current_cpu_data.watch_reg_use_cnt,
&addr->WATCH_STYLE.num_valid);
for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) {
__put_user(child->thread.watch.mips3264.watchlo[i],
&addr->WATCH_STYLE.watchlo[i]);
__put_user(child->thread.watch.mips3264.watchhi[i] & 0xfff,
&addr->WATCH_STYLE.watchhi[i]);
__put_user(current_cpu_data.watch_reg_masks[i],
&addr->WATCH_STYLE.watch_masks[i]);
}
for (; i < 8; i++) {
__put_user(0, &addr->WATCH_STYLE.watchlo[i]);
__put_user(0, &addr->WATCH_STYLE.watchhi[i]);
__put_user(0, &addr->WATCH_STYLE.watch_masks[i]);
}
return 0;
}
int ptrace_set_watch_regs(struct task_struct *child,
struct pt_watch_regs __user *addr)
{
int i;
int watch_active = 0;
unsigned long lt[NUM_WATCH_REGS];
u16 ht[NUM_WATCH_REGS];
if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0)
return -EIO;
if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs)))
return -EIO;
/* Check the values. */
for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) {
__get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]);
#ifdef CONFIG_32BIT
if (lt[i] & __UA_LIMIT)
return -EINVAL;
#else
if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) {
if (lt[i] & 0xffffffff80000000UL)
return -EINVAL;
} else {
if (lt[i] & __UA_LIMIT)
return -EINVAL;
}
#endif
__get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]);
if (ht[i] & ~0xff8)
return -EINVAL;
}
/* Install them. */
for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) {
if (lt[i] & 7)
watch_active = 1;
child->thread.watch.mips3264.watchlo[i] = lt[i];
/* Set the G bit. */
child->thread.watch.mips3264.watchhi[i] = ht[i];
}
if (watch_active)
set_tsk_thread_flag(child, TIF_LOAD_WATCH);
else
clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
return 0;
}
long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
int ret;
switch (request) {
/* when I and D space are separate, these will need to be fixed. */
case PTRACE_PEEKTEXT: /* read word at location addr. */
case PTRACE_PEEKDATA:
ret = generic_ptrace_peekdata(child, addr, data);
break;
/* Read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
struct pt_regs *regs;
unsigned long tmp = 0;
regs = task_pt_regs(child);
ret = 0; /* Default return value. */
switch (addr) {
case 0 ... 31:
tmp = regs->regs[addr];
break;
case FPR_BASE ... FPR_BASE + 31:
if (tsk_used_math(child)) {
fpureg_t *fregs = get_fpu_regs(child);
#ifdef CONFIG_32BIT
/*
* The odd registers are actually the high
* order bits of the values stored in the even
* registers - unless we're using r2k_switch.S.
*/
if (addr & 1)
tmp = (unsigned long) (fregs[((addr & ~1) - 32)] >> 32);
else
tmp = (unsigned long) (fregs[(addr - 32)] & 0xffffffff);
#endif
#ifdef CONFIG_64BIT
tmp = fregs[addr - FPR_BASE];
#endif
} else {
tmp = -1; /* FP not yet used */
}
break;
case PC:
tmp = regs->cp0_epc;
break;
case CAUSE:
tmp = regs->cp0_cause;
break;
case BADVADDR:
tmp = regs->cp0_badvaddr;
break;
case MMHI:
tmp = regs->hi;
break;
case MMLO:
tmp = regs->lo;
break;
#ifdef CONFIG_CPU_HAS_SMARTMIPS
case ACX:
tmp = regs->acx;
break;
#endif
case FPC_CSR:
tmp = child->thread.fpu.fcr31;
break;
case FPC_EIR: { /* implementation / version register */
unsigned int flags;
#ifdef CONFIG_MIPS_MT_SMTC
unsigned long irqflags;
unsigned int mtflags;
#endif /* CONFIG_MIPS_MT_SMTC */
preempt_disable();
if (!cpu_has_fpu) {
preempt_enable();
break;
}
#ifdef CONFIG_MIPS_MT_SMTC
/* Read-modify-write of Status must be atomic */
local_irq_save(irqflags);
mtflags = dmt();
#endif /* CONFIG_MIPS_MT_SMTC */
if (cpu_has_mipsmt) {
unsigned int vpflags = dvpe();
flags = read_c0_status();
__enable_fpu();
__asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
write_c0_status(flags);
evpe(vpflags);
} else {
flags = read_c0_status();
__enable_fpu();
__asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
write_c0_status(flags);
}
#ifdef CONFIG_MIPS_MT_SMTC
emt(mtflags);
local_irq_restore(irqflags);
#endif /* CONFIG_MIPS_MT_SMTC */
preempt_enable();
break;
}
case DSP_BASE ... DSP_BASE + 5: {
dspreg_t *dregs;
if (!cpu_has_dsp) {
tmp = 0;
ret = -EIO;
goto out;
}
dregs = __get_dsp_regs(child);
tmp = (unsigned long) (dregs[addr - DSP_BASE]);
break;
}
case DSP_CONTROL:
if (!cpu_has_dsp) {
tmp = 0;
ret = -EIO;
goto out;
}
tmp = child->thread.dsp.dspcontrol;
break;
default:
tmp = 0;
ret = -EIO;
goto out;
}
ret = put_user(tmp, (unsigned long __user *) data);
break;
}
/* when I and D space are separate, this will have to be fixed. */
case PTRACE_POKETEXT: /* write the word at location addr. */
case PTRACE_POKEDATA:
ret = generic_ptrace_pokedata(child, addr, data);
break;
case PTRACE_POKEUSR: {
struct pt_regs *regs;
ret = 0;
regs = task_pt_regs(child);
switch (addr) {
case 0 ... 31:
regs->regs[addr] = data;
break;
case FPR_BASE ... FPR_BASE + 31: {
fpureg_t *fregs = get_fpu_regs(child);
if (!tsk_used_math(child)) {
/* FP not yet used */
memset(&child->thread.fpu, ~0,
sizeof(child->thread.fpu));
child->thread.fpu.fcr31 = 0;
}
#ifdef CONFIG_32BIT
/*
* The odd registers are actually the high order bits
* of the values stored in the even registers - unless
* we're using r2k_switch.S.
*/
if (addr & 1) {
fregs[(addr & ~1) - FPR_BASE] &= 0xffffffff;
fregs[(addr & ~1) - FPR_BASE] |= ((unsigned long long) data) << 32;
} else {
fregs[addr - FPR_BASE] &= ~0xffffffffLL;
fregs[addr - FPR_BASE] |= data;
}
#endif
#ifdef CONFIG_64BIT
fregs[addr - FPR_BASE] = data;
#endif
break;
}
case PC:
regs->cp0_epc = data;
break;
case MMHI:
regs->hi = data;
break;
case MMLO:
regs->lo = data;
break;
#ifdef CONFIG_CPU_HAS_SMARTMIPS
case ACX:
regs->acx = data;
break;
#endif
case FPC_CSR:
child->thread.fpu.fcr31 = data;
break;
case DSP_BASE ... DSP_BASE + 5: {
dspreg_t *dregs;
if (!cpu_has_dsp) {
ret = -EIO;
break;
}
dregs = __get_dsp_regs(child);
dregs[addr - DSP_BASE] = data;
break;
}
case DSP_CONTROL:
if (!cpu_has_dsp) {
ret = -EIO;
break;
}
child->thread.dsp.dspcontrol = data;
break;
default:
/* The rest are not allowed. */
ret = -EIO;
break;
}
break;
}
case PTRACE_GETREGS:
ret = ptrace_getregs(child, (__s64 __user *) data);
break;
case PTRACE_SETREGS:
ret = ptrace_setregs(child, (__s64 __user *) data);
break;
case PTRACE_GETFPREGS:
ret = ptrace_getfpregs(child, (__u32 __user *) data);
break;
case PTRACE_SETFPREGS:
ret = ptrace_setfpregs(child, (__u32 __user *) data);
break;
case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
case PTRACE_CONT: { /* restart after signal. */
ret = -EIO;
if (!valid_signal(data))
break;
if (request == PTRACE_SYSCALL) {
set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
}
else {
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
}
child->exit_code = data;
wake_up_process(child);
ret = 0;
break;
}
/*
* make the child exit. Best I can do is send it a sigkill.
* perhaps it should be put in the status that it wants to
* exit.
*/
case PTRACE_KILL:
ret = 0;
if (child->exit_state == EXIT_ZOMBIE) /* already dead */
break;
child->exit_code = SIGKILL;
wake_up_process(child);
break;
case PTRACE_GET_THREAD_AREA:
ret = put_user(task_thread_info(child)->tp_value,
(unsigned long __user *) data);
break;
case PTRACE_GET_WATCH_REGS:
ret = ptrace_get_watch_regs(child,
(struct pt_watch_regs __user *) addr);
break;
case PTRACE_SET_WATCH_REGS:
ret = ptrace_set_watch_regs(child,
(struct pt_watch_regs __user *) addr);
break;
default:
ret = ptrace_request(child, request, addr, data);
break;
}
out:
return ret;
}
static inline int audit_arch(void)
{
int arch = EM_MIPS;
#ifdef CONFIG_64BIT
arch |= __AUDIT_ARCH_64BIT;
#endif
#if defined(__LITTLE_ENDIAN)
arch |= __AUDIT_ARCH_LE;
#endif
return arch;
}
/*
* Notification of system call entry/exit
* - triggered by current->work.syscall_trace
*/
asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
{
/* do the secure computing check first */
if (!entryexit)
secure_computing(regs->regs[0]);
if (unlikely(current->audit_context) && entryexit)
audit_syscall_exit(AUDITSC_RESULT(regs->regs[2]),
regs->regs[2]);
if (!(current->ptrace & PT_PTRACED))
goto out;
if (!test_thread_flag(TIF_SYSCALL_TRACE))
goto out;
/* The 0x80 provides a way for the tracing parent to distinguish
between a syscall stop and SIGTRAP delivery */
ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ?
0x80 : 0));
/*
* this isn't the same as continuing with a signal, but it will do
* for normal use. strace only continues with a signal if the
* stopping signal is not SIGTRAP. -brl
*/
if (current->exit_code) {
send_sig(current->exit_code, current, 1);
current->exit_code = 0;
}
out:
if (unlikely(current->audit_context) && !entryexit)
audit_syscall_entry(audit_arch(), regs->regs[0],
regs->regs[4], regs->regs[5],
regs->regs[6], regs->regs[7]);
}
| gpl-2.0 |
hiikezoe/android_kernel_kyocera_isw11k | arch/arm/mm/cache-xsc3l2.c | 993 | 5779 | /*
* arch/arm/mm/cache-xsc3l2.c - XScale3 L2 cache controller support
*
* Copyright (C) 2007 ARM Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <asm/system.h>
#include <asm/cputype.h>
#include <asm/cacheflush.h>
#include <asm/kmap_types.h>
#include <asm/fixmap.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include "mm.h"
#define CR_L2 (1 << 26)
#define CACHE_LINE_SIZE 32
#define CACHE_LINE_SHIFT 5
#define CACHE_WAY_PER_SET 8
#define CACHE_WAY_SIZE(l2ctype) (8192 << (((l2ctype) >> 8) & 0xf))
#define CACHE_SET_SIZE(l2ctype) (CACHE_WAY_SIZE(l2ctype) >> CACHE_LINE_SHIFT)
static inline int xsc3_l2_present(void)
{
unsigned long l2ctype;
__asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype));
return !!(l2ctype & 0xf8);
}
static inline void xsc3_l2_clean_mva(unsigned long addr)
{
__asm__("mcr p15, 1, %0, c7, c11, 1" : : "r" (addr));
}
static inline void xsc3_l2_inv_mva(unsigned long addr)
{
__asm__("mcr p15, 1, %0, c7, c7, 1" : : "r" (addr));
}
static inline void xsc3_l2_inv_all(void)
{
unsigned long l2ctype, set_way;
int set, way;
__asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype));
for (set = 0; set < CACHE_SET_SIZE(l2ctype); set++) {
for (way = 0; way < CACHE_WAY_PER_SET; way++) {
set_way = (way << 29) | (set << 5);
__asm__("mcr p15, 1, %0, c7, c11, 2" : : "r"(set_way));
}
}
dsb();
}
#ifdef CONFIG_HIGHMEM
#define l2_map_save_flags(x) raw_local_save_flags(x)
#define l2_map_restore_flags(x) raw_local_irq_restore(x)
#else
#define l2_map_save_flags(x) ((x) = 0)
#define l2_map_restore_flags(x) ((void)(x))
#endif
static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va,
unsigned long flags)
{
#ifdef CONFIG_HIGHMEM
unsigned long va = prev_va & PAGE_MASK;
unsigned long pa_offset = pa << (32 - PAGE_SHIFT);
if (unlikely(pa_offset < (prev_va << (32 - PAGE_SHIFT)))) {
/*
* Switching to a new page. Because cache ops are
* using virtual addresses only, we must put a mapping
* in place for it. We also enable interrupts for a
* short while and disable them again to protect this
* mapping.
*/
unsigned long idx;
raw_local_irq_restore(flags);
idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id();
va = __fix_to_virt(FIX_KMAP_BEGIN + idx);
raw_local_irq_restore(flags | PSR_I_BIT);
set_pte_ext(TOP_PTE(va), pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL), 0);
local_flush_tlb_kernel_page(va);
}
return va + (pa_offset >> (32 - PAGE_SHIFT));
#else
return __phys_to_virt(pa);
#endif
}
static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
{
unsigned long vaddr, flags;
if (start == 0 && end == -1ul) {
xsc3_l2_inv_all();
return;
}
vaddr = -1; /* to force the first mapping */
l2_map_save_flags(flags);
/*
* Clean and invalidate partial first cache line.
*/
if (start & (CACHE_LINE_SIZE - 1)) {
vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr, flags);
xsc3_l2_clean_mva(vaddr);
xsc3_l2_inv_mva(vaddr);
start = (start | (CACHE_LINE_SIZE - 1)) + 1;
}
/*
* Invalidate all full cache lines between 'start' and 'end'.
*/
while (start < (end & ~(CACHE_LINE_SIZE - 1))) {
vaddr = l2_map_va(start, vaddr, flags);
xsc3_l2_inv_mva(vaddr);
start += CACHE_LINE_SIZE;
}
/*
* Clean and invalidate partial last cache line.
*/
if (start < end) {
vaddr = l2_map_va(start, vaddr, flags);
xsc3_l2_clean_mva(vaddr);
xsc3_l2_inv_mva(vaddr);
}
l2_map_restore_flags(flags);
dsb();
}
static void xsc3_l2_clean_range(unsigned long start, unsigned long end)
{
unsigned long vaddr, flags;
vaddr = -1; /* to force the first mapping */
l2_map_save_flags(flags);
start &= ~(CACHE_LINE_SIZE - 1);
while (start < end) {
vaddr = l2_map_va(start, vaddr, flags);
xsc3_l2_clean_mva(vaddr);
start += CACHE_LINE_SIZE;
}
l2_map_restore_flags(flags);
dsb();
}
/*
* optimize L2 flush all operation by set/way format
*/
static inline void xsc3_l2_flush_all(void)
{
unsigned long l2ctype, set_way;
int set, way;
__asm__("mrc p15, 1, %0, c0, c0, 1" : "=r" (l2ctype));
for (set = 0; set < CACHE_SET_SIZE(l2ctype); set++) {
for (way = 0; way < CACHE_WAY_PER_SET; way++) {
set_way = (way << 29) | (set << 5);
__asm__("mcr p15, 1, %0, c7, c15, 2" : : "r"(set_way));
}
}
dsb();
}
static void xsc3_l2_flush_range(unsigned long start, unsigned long end)
{
unsigned long vaddr, flags;
if (start == 0 && end == -1ul) {
xsc3_l2_flush_all();
return;
}
vaddr = -1; /* to force the first mapping */
l2_map_save_flags(flags);
start &= ~(CACHE_LINE_SIZE - 1);
while (start < end) {
vaddr = l2_map_va(start, vaddr, flags);
xsc3_l2_clean_mva(vaddr);
xsc3_l2_inv_mva(vaddr);
start += CACHE_LINE_SIZE;
}
l2_map_restore_flags(flags);
dsb();
}
static int __init xsc3_l2_init(void)
{
if (!cpu_is_xsc3() || !xsc3_l2_present())
return 0;
if (get_cr() & CR_L2) {
pr_info("XScale3 L2 cache enabled.\n");
xsc3_l2_inv_all();
outer_cache.inv_range = xsc3_l2_inv_range;
outer_cache.clean_range = xsc3_l2_clean_range;
outer_cache.flush_range = xsc3_l2_flush_range;
}
return 0;
}
core_initcall(xsc3_l2_init);
| gpl-2.0 |
xplodwild/android_kernel_asus_tf300t | drivers/staging/comedi/drivers/pcl816.c | 2529 | 36528 | /*
comedi/drivers/pcl816.c
Author: Juan Grigera <juan@grigera.com.ar>
based on pcl818 by Michal Dobes <dobes@tesnet.cz> and bits of pcl812
hardware driver for Advantech cards:
card: PCL-816, PCL814B
driver: pcl816
*/
/*
Driver: pcl816
Description: Advantech PCL-816 cards, PCL-814
Author: Juan Grigera <juan@grigera.com.ar>
Devices: [Advantech] PCL-816 (pcl816), PCL-814B (pcl814b)
Status: works
Updated: Tue, 2 Apr 2002 23:15:21 -0800
PCL 816 and 814B have 16 SE/DIFF ADCs, 16 DACs, 16 DI and 16 DO.
Differences are at resolution (16 vs 12 bits).
The driver support AI command mode, other subdevices not written.
Analog output and digital input and output are not supported.
Configuration Options:
[0] - IO Base
[1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7)
[2] - DMA (0=disable, 1, 3)
[3] - 0, 10=10MHz clock for 8254
1= 1MHz clock for 8254
*/
#include "../comedidev.h"
#include <linux/ioport.h>
#include <linux/mc146818rtc.h>
#include <linux/gfp.h>
#include <linux/delay.h>
#include <asm/dma.h>
#include "8253.h"
#define DEBUG(x) x
/* boards constants */
/* IO space len */
#define PCLx1x_RANGE 16
/* #define outb(x,y) printk("OUTB(%x, 200+%d)\n", x,y-0x200); outb(x,y) */
/* INTEL 8254 counters */
#define PCL816_CTR0 4
#define PCL816_CTR1 5
#define PCL816_CTR2 6
/* R: counter read-back register W: counter control */
#define PCL816_CTRCTL 7
/* R: A/D high byte W: A/D range control */
#define PCL816_RANGE 9
/* W: clear INT request */
#define PCL816_CLRINT 10
/* R: next mux scan channel W: mux scan channel & range control pointer */
#define PCL816_MUX 11
/* R/W: operation control register */
#define PCL816_CONTROL 12
/* R: return status byte W: set DMA/IRQ */
#define PCL816_STATUS 13
#define PCL816_STATUS_DRDY_MASK 0x80
/* R: low byte of A/D W: soft A/D trigger */
#define PCL816_AD_LO 8
/* R: high byte of A/D W: A/D range control */
#define PCL816_AD_HI 9
/* type of interrupt handler */
#define INT_TYPE_AI1_INT 1
#define INT_TYPE_AI1_DMA 2
#define INT_TYPE_AI3_INT 4
#define INT_TYPE_AI3_DMA 5
#ifdef unused
#define INT_TYPE_AI1_DMA_RTC 9
#define INT_TYPE_AI3_DMA_RTC 10
/* RTC stuff... */
#define RTC_IRQ 8
#define RTC_IO_EXTENT 0x10
#endif
#define MAGIC_DMA_WORD 0x5a5a
static const struct comedi_lrange range_pcl816 = { 8, {
BIP_RANGE(10),
BIP_RANGE(5),
BIP_RANGE(2.5),
BIP_RANGE(1.25),
UNI_RANGE(10),
UNI_RANGE(5),
UNI_RANGE(2.5),
UNI_RANGE(1.25),
}
};
struct pcl816_board {
const char *name; /* board name */
int n_ranges; /* len of range list */
int n_aichan; /* num of A/D chans in diferencial mode */
unsigned int ai_ns_min; /* minimal allowed delay between samples (in ns) */
int n_aochan; /* num of D/A chans */
int n_dichan; /* num of DI chans */
int n_dochan; /* num of DO chans */
const struct comedi_lrange *ai_range_type; /* default A/D rangelist */
const struct comedi_lrange *ao_range_type; /* default D/A rangelist */
unsigned int io_range; /* len of IO space */
unsigned int IRQbits; /* allowed interrupts */
unsigned int DMAbits; /* allowed DMA chans */
int ai_maxdata; /* maxdata for A/D */
int ao_maxdata; /* maxdata for D/A */
int ai_chanlist; /* allowed len of channel list A/D */
int ao_chanlist; /* allowed len of channel list D/A */
int i8254_osc_base; /* 1/frequency of on board oscilator in ns */
};
static const struct pcl816_board boardtypes[] = {
{"pcl816", 8, 16, 10000, 1, 16, 16, &range_pcl816,
&range_pcl816, PCLx1x_RANGE,
0x00fc, /* IRQ mask */
0x0a, /* DMA mask */
0xffff, /* 16-bit card */
0xffff, /* D/A maxdata */
1024,
1, /* ao chan list */
100},
{"pcl814b", 8, 16, 10000, 1, 16, 16, &range_pcl816,
&range_pcl816, PCLx1x_RANGE,
0x00fc,
0x0a,
0x3fff, /* 14 bit card */
0x3fff,
1024,
1,
100},
};
#define n_boardtypes (sizeof(boardtypes)/sizeof(struct pcl816_board))
#define devpriv ((struct pcl816_private *)dev->private)
#define this_board ((const struct pcl816_board *)dev->board_ptr)
static int pcl816_attach(struct comedi_device *dev,
struct comedi_devconfig *it);
static int pcl816_detach(struct comedi_device *dev);
#ifdef unused
static int RTC_lock = 0; /* RTC lock */
static int RTC_timer_lock = 0; /* RTC int lock */
#endif
static struct comedi_driver driver_pcl816 = {
.driver_name = "pcl816",
.module = THIS_MODULE,
.attach = pcl816_attach,
.detach = pcl816_detach,
.board_name = &boardtypes[0].name,
.num_names = n_boardtypes,
.offset = sizeof(struct pcl816_board),
};
static int __init driver_pcl816_init_module(void)
{
return comedi_driver_register(&driver_pcl816);
}
static void __exit driver_pcl816_cleanup_module(void)
{
comedi_driver_unregister(&driver_pcl816);
}
module_init(driver_pcl816_init_module);
module_exit(driver_pcl816_cleanup_module);
struct pcl816_private {
unsigned int dma; /* used DMA, 0=don't use DMA */
int dma_rtc; /* 1=RTC used with DMA, 0=no RTC alloc */
#ifdef unused
unsigned long rtc_iobase; /* RTC port region */
unsigned int rtc_iosize;
unsigned int rtc_irq;
#endif
unsigned long dmabuf[2]; /* pointers to begin of DMA buffers */
unsigned int dmapages[2]; /* len of DMA buffers in PAGE_SIZEs */
unsigned int hwdmaptr[2]; /* hardware address of DMA buffers */
unsigned int hwdmasize[2]; /* len of DMA buffers in Bytes */
unsigned int dmasamplsize; /* size in samples hwdmasize[0]/2 */
unsigned int last_top_dma; /* DMA pointer in last RTC int */
int next_dma_buf; /* which DMA buffer will be used next round */
long dma_runs_to_end; /* how many we must permorm DMA transfer to end of record */
unsigned long last_dma_run; /* how many bytes we must transfer on last DMA page */
unsigned int ai_scans; /* len of scanlist */
unsigned char ai_neverending; /* if=1, then we do neverending record (you must use cancel()) */
int irq_free; /* 1=have allocated IRQ */
int irq_blocked; /* 1=IRQ now uses any subdev */
#ifdef unused
int rtc_irq_blocked; /* 1=we now do AI with DMA&RTC */
#endif
int irq_was_now_closed; /* when IRQ finish, there's stored int816_mode for last interrupt */
int int816_mode; /* who now uses IRQ - 1=AI1 int, 2=AI1 dma, 3=AI3 int, 4AI3 dma */
struct comedi_subdevice *last_int_sub; /* ptr to subdevice which now finish */
int ai_act_scan; /* how many scans we finished */
unsigned int ai_act_chanlist[16]; /* MUX setting for actual AI operations */
unsigned int ai_act_chanlist_len; /* how long is actual MUX list */
unsigned int ai_act_chanlist_pos; /* actual position in MUX list */
unsigned int ai_n_chan; /* how many channels per scan */
unsigned int ai_poll_ptr; /* how many sampes transfer poll */
struct comedi_subdevice *sub_ai; /* ptr to AI subdevice */
#ifdef unused
struct timer_list rtc_irq_timer; /* timer for RTC sanity check */
unsigned long rtc_freq; /* RTC int freq */
#endif
};
/*
==============================================================================
*/
static int check_channel_list(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned int *chanlist, unsigned int chanlen);
static void setup_channel_list(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned int *chanlist, unsigned int seglen);
static int pcl816_ai_cancel(struct comedi_device *dev,
struct comedi_subdevice *s);
static void start_pacer(struct comedi_device *dev, int mode,
unsigned int divisor1, unsigned int divisor2);
#ifdef unused
static int set_rtc_irq_bit(unsigned char bit);
#endif
static int pcl816_ai_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_cmd *cmd);
static int pcl816_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s);
/*
==============================================================================
ANALOG INPUT MODE0, 816 cards, slow version
*/
static int pcl816_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
int n;
int timeout;
DPRINTK("mode 0 analog input\n");
/* software trigger, DMA and INT off */
outb(0, dev->iobase + PCL816_CONTROL);
/* clear INT (conversion end) flag */
outb(0, dev->iobase + PCL816_CLRINT);
/* Set the input channel */
outb(CR_CHAN(insn->chanspec) & 0xf, dev->iobase + PCL816_MUX);
/* select gain */
outb(CR_RANGE(insn->chanspec), dev->iobase + PCL816_RANGE);
for (n = 0; n < insn->n; n++) {
outb(0, dev->iobase + PCL816_AD_LO); /* start conversion */
timeout = 100;
while (timeout--) {
if (!(inb(dev->iobase + PCL816_STATUS) &
PCL816_STATUS_DRDY_MASK)) {
/* return read value */
data[n] =
((inb(dev->iobase +
PCL816_AD_HI) << 8) |
(inb(dev->iobase + PCL816_AD_LO)));
/* clear INT (conversion end) flag */
outb(0, dev->iobase + PCL816_CLRINT);
break;
}
udelay(1);
}
/* Return timeout error */
if (!timeout) {
comedi_error(dev, "A/D insn timeout\n");
data[0] = 0;
/* clear INT (conversion end) flag */
outb(0, dev->iobase + PCL816_CLRINT);
return -EIO;
}
}
return n;
}
/*
==============================================================================
analog input interrupt mode 1 & 3, 818 cards
one sample per interrupt version
*/
static irqreturn_t interrupt_pcl816_ai_mode13_int(int irq, void *d)
{
struct comedi_device *dev = d;
struct comedi_subdevice *s = dev->subdevices + 0;
int low, hi;
int timeout = 50; /* wait max 50us */
while (timeout--) {
if (!(inb(dev->iobase + PCL816_STATUS) &
PCL816_STATUS_DRDY_MASK))
break;
udelay(1);
}
if (!timeout) { /* timeout, bail error */
outb(0, dev->iobase + PCL816_CLRINT); /* clear INT request */
comedi_error(dev, "A/D mode1/3 IRQ without DRDY!");
pcl816_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
comedi_event(dev, s);
return IRQ_HANDLED;
}
/* get the sample */
low = inb(dev->iobase + PCL816_AD_LO);
hi = inb(dev->iobase + PCL816_AD_HI);
comedi_buf_put(s->async, (hi << 8) | low);
outb(0, dev->iobase + PCL816_CLRINT); /* clear INT request */
if (++devpriv->ai_act_chanlist_pos >= devpriv->ai_act_chanlist_len)
devpriv->ai_act_chanlist_pos = 0;
s->async->cur_chan++;
if (s->async->cur_chan >= devpriv->ai_n_chan) {
s->async->cur_chan = 0;
devpriv->ai_act_scan++;
}
if (!devpriv->ai_neverending)
/* all data sampled */
if (devpriv->ai_act_scan >= devpriv->ai_scans) {
/* all data sampled */
pcl816_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA;
}
comedi_event(dev, s);
return IRQ_HANDLED;
}
/*
==============================================================================
analog input dma mode 1 & 3, 816 cards
*/
static void transfer_from_dma_buf(struct comedi_device *dev,
struct comedi_subdevice *s, short *ptr,
unsigned int bufptr, unsigned int len)
{
int i;
s->async->events = 0;
for (i = 0; i < len; i++) {
comedi_buf_put(s->async, ptr[bufptr++]);
if (++devpriv->ai_act_chanlist_pos >=
devpriv->ai_act_chanlist_len) {
devpriv->ai_act_chanlist_pos = 0;
}
s->async->cur_chan++;
if (s->async->cur_chan >= devpriv->ai_n_chan) {
s->async->cur_chan = 0;
devpriv->ai_act_scan++;
}
if (!devpriv->ai_neverending)
/* all data sampled */
if (devpriv->ai_act_scan >= devpriv->ai_scans) {
pcl816_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA;
s->async->events |= COMEDI_CB_BLOCK;
break;
}
}
comedi_event(dev, s);
}
static irqreturn_t interrupt_pcl816_ai_mode13_dma(int irq, void *d)
{
struct comedi_device *dev = d;
struct comedi_subdevice *s = dev->subdevices + 0;
int len, bufptr, this_dma_buf;
unsigned long dma_flags;
short *ptr;
disable_dma(devpriv->dma);
this_dma_buf = devpriv->next_dma_buf;
/* switch dma bufs */
if ((devpriv->dma_runs_to_end > -1) || devpriv->ai_neverending) {
devpriv->next_dma_buf = 1 - devpriv->next_dma_buf;
set_dma_mode(devpriv->dma, DMA_MODE_READ);
dma_flags = claim_dma_lock();
/* clear_dma_ff (devpriv->dma); */
set_dma_addr(devpriv->dma,
devpriv->hwdmaptr[devpriv->next_dma_buf]);
if (devpriv->dma_runs_to_end) {
set_dma_count(devpriv->dma,
devpriv->hwdmasize[devpriv->
next_dma_buf]);
} else {
set_dma_count(devpriv->dma, devpriv->last_dma_run);
}
release_dma_lock(dma_flags);
enable_dma(devpriv->dma);
}
devpriv->dma_runs_to_end--;
outb(0, dev->iobase + PCL816_CLRINT); /* clear INT request */
ptr = (short *)devpriv->dmabuf[this_dma_buf];
len = (devpriv->hwdmasize[0] >> 1) - devpriv->ai_poll_ptr;
bufptr = devpriv->ai_poll_ptr;
devpriv->ai_poll_ptr = 0;
transfer_from_dma_buf(dev, s, ptr, bufptr, len);
return IRQ_HANDLED;
}
/*
==============================================================================
INT procedure
*/
static irqreturn_t interrupt_pcl816(int irq, void *d)
{
struct comedi_device *dev = d;
DPRINTK("<I>");
if (!dev->attached) {
comedi_error(dev, "premature interrupt");
return IRQ_HANDLED;
}
switch (devpriv->int816_mode) {
case INT_TYPE_AI1_DMA:
case INT_TYPE_AI3_DMA:
return interrupt_pcl816_ai_mode13_dma(irq, d);
case INT_TYPE_AI1_INT:
case INT_TYPE_AI3_INT:
return interrupt_pcl816_ai_mode13_int(irq, d);
}
outb(0, dev->iobase + PCL816_CLRINT); /* clear INT request */
if ((!dev->irq) | (!devpriv->irq_free) | (!devpriv->irq_blocked) |
(!devpriv->int816_mode)) {
if (devpriv->irq_was_now_closed) {
devpriv->irq_was_now_closed = 0;
/* comedi_error(dev,"last IRQ.."); */
return IRQ_HANDLED;
}
comedi_error(dev, "bad IRQ!");
return IRQ_NONE;
}
comedi_error(dev, "IRQ from unknown source!");
return IRQ_NONE;
}
/*
==============================================================================
COMMAND MODE
*/
static void pcl816_cmdtest_out(int e, struct comedi_cmd *cmd)
{
printk(KERN_INFO "pcl816 e=%d startsrc=%x scansrc=%x convsrc=%x\n", e,
cmd->start_src, cmd->scan_begin_src, cmd->convert_src);
printk(KERN_INFO "pcl816 e=%d startarg=%d scanarg=%d convarg=%d\n", e,
cmd->start_arg, cmd->scan_begin_arg, cmd->convert_arg);
printk(KERN_INFO "pcl816 e=%d stopsrc=%x scanend=%x\n", e,
cmd->stop_src, cmd->scan_end_src);
printk(KERN_INFO "pcl816 e=%d stoparg=%d scanendarg=%d chanlistlen=%d\n",
e, cmd->stop_arg, cmd->scan_end_arg, cmd->chanlist_len);
}
/*
==============================================================================
*/
static int pcl816_ai_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_cmd *cmd)
{
int err = 0;
int tmp, divisor1 = 0, divisor2 = 0;
DEBUG(printk(KERN_INFO "pcl816 pcl812_ai_cmdtest\n");
pcl816_cmdtest_out(-1, cmd);
);
/* step 1: make sure trigger sources are trivially valid */
tmp = cmd->start_src;
cmd->start_src &= TRIG_NOW;
if (!cmd->start_src || tmp != cmd->start_src)
err++;
tmp = cmd->scan_begin_src;
cmd->scan_begin_src &= TRIG_FOLLOW;
if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src)
err++;
tmp = cmd->convert_src;
cmd->convert_src &= TRIG_EXT | TRIG_TIMER;
if (!cmd->convert_src || tmp != cmd->convert_src)
err++;
tmp = cmd->scan_end_src;
cmd->scan_end_src &= TRIG_COUNT;
if (!cmd->scan_end_src || tmp != cmd->scan_end_src)
err++;
tmp = cmd->stop_src;
cmd->stop_src &= TRIG_COUNT | TRIG_NONE;
if (!cmd->stop_src || tmp != cmd->stop_src)
err++;
if (err)
return 1;
/*
* step 2: make sure trigger sources
* are unique and mutually compatible
*/
if (cmd->start_src != TRIG_NOW) {
cmd->start_src = TRIG_NOW;
err++;
}
if (cmd->scan_begin_src != TRIG_FOLLOW) {
cmd->scan_begin_src = TRIG_FOLLOW;
err++;
}
if (cmd->convert_src != TRIG_EXT && cmd->convert_src != TRIG_TIMER) {
cmd->convert_src = TRIG_TIMER;
err++;
}
if (cmd->scan_end_src != TRIG_COUNT) {
cmd->scan_end_src = TRIG_COUNT;
err++;
}
if (cmd->stop_src != TRIG_NONE && cmd->stop_src != TRIG_COUNT)
err++;
if (err)
return 2;
/* step 3: make sure arguments are trivially compatible */
if (cmd->start_arg != 0) {
cmd->start_arg = 0;
err++;
}
if (cmd->scan_begin_arg != 0) {
cmd->scan_begin_arg = 0;
err++;
}
if (cmd->convert_src == TRIG_TIMER) {
if (cmd->convert_arg < this_board->ai_ns_min) {
cmd->convert_arg = this_board->ai_ns_min;
err++;
}
} else { /* TRIG_EXT */
if (cmd->convert_arg != 0) {
cmd->convert_arg = 0;
err++;
}
}
if (cmd->scan_end_arg != cmd->chanlist_len) {
cmd->scan_end_arg = cmd->chanlist_len;
err++;
}
if (cmd->stop_src == TRIG_COUNT) {
if (!cmd->stop_arg) {
cmd->stop_arg = 1;
err++;
}
} else { /* TRIG_NONE */
if (cmd->stop_arg != 0) {
cmd->stop_arg = 0;
err++;
}
}
if (err)
return 3;
/* step 4: fix up any arguments */
if (cmd->convert_src == TRIG_TIMER) {
tmp = cmd->convert_arg;
i8253_cascade_ns_to_timer(this_board->i8254_osc_base,
&divisor1, &divisor2,
&cmd->convert_arg,
cmd->flags & TRIG_ROUND_MASK);
if (cmd->convert_arg < this_board->ai_ns_min)
cmd->convert_arg = this_board->ai_ns_min;
if (tmp != cmd->convert_arg)
err++;
}
if (err)
return 4;
/* step 5: complain about special chanlist considerations */
if (cmd->chanlist) {
if (!check_channel_list(dev, s, cmd->chanlist,
cmd->chanlist_len))
return 5; /* incorrect channels list */
}
return 0;
}
static int pcl816_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
unsigned int divisor1 = 0, divisor2 = 0, dma_flags, bytes, dmairq;
struct comedi_cmd *cmd = &s->async->cmd;
unsigned int seglen;
if (cmd->start_src != TRIG_NOW)
return -EINVAL;
if (cmd->scan_begin_src != TRIG_FOLLOW)
return -EINVAL;
if (cmd->scan_end_src != TRIG_COUNT)
return -EINVAL;
if (cmd->scan_end_arg != cmd->chanlist_len)
return -EINVAL;
/* if(cmd->chanlist_len>MAX_CHANLIST_LEN) return -EINVAL; */
if (devpriv->irq_blocked)
return -EBUSY;
if (cmd->convert_src == TRIG_TIMER) {
if (cmd->convert_arg < this_board->ai_ns_min)
cmd->convert_arg = this_board->ai_ns_min;
i8253_cascade_ns_to_timer(this_board->i8254_osc_base, &divisor1,
&divisor2, &cmd->convert_arg,
cmd->flags & TRIG_ROUND_MASK);
/* PCL816 crash if any divisor is set to 1 */
if (divisor1 == 1) {
divisor1 = 2;
divisor2 /= 2;
}
if (divisor2 == 1) {
divisor2 = 2;
divisor1 /= 2;
}
}
start_pacer(dev, -1, 0, 0); /* stop pacer */
seglen = check_channel_list(dev, s, cmd->chanlist, cmd->chanlist_len);
if (seglen < 1)
return -EINVAL;
setup_channel_list(dev, s, cmd->chanlist, seglen);
udelay(1);
devpriv->ai_n_chan = cmd->chanlist_len;
devpriv->ai_act_scan = 0;
s->async->cur_chan = 0;
devpriv->irq_blocked = 1;
devpriv->ai_poll_ptr = 0;
devpriv->irq_was_now_closed = 0;
if (cmd->stop_src == TRIG_COUNT) {
devpriv->ai_scans = cmd->stop_arg;
devpriv->ai_neverending = 0;
} else {
devpriv->ai_scans = 0;
devpriv->ai_neverending = 1;
}
/* don't we want wake up every scan? */
if ((cmd->flags & TRIG_WAKE_EOS)) {
printk(KERN_INFO
"pl816: You wankt WAKE_EOS but I dont want handle it");
/* devpriv->ai_eos=1; */
/* if (devpriv->ai_n_chan==1) */
/* devpriv->dma=0; // DMA is useless for this situation */
}
if (devpriv->dma) {
bytes = devpriv->hwdmasize[0];
if (!devpriv->ai_neverending) {
/* how many */
bytes = s->async->cmd.chanlist_len *
s->async->cmd.chanlist_len *
sizeof(short);
/* how many DMA pages we must fill */
devpriv->dma_runs_to_end = bytes /
devpriv->hwdmasize[0];
/* on last dma transfer must be moved */
devpriv->last_dma_run = bytes % devpriv->hwdmasize[0];
devpriv->dma_runs_to_end--;
if (devpriv->dma_runs_to_end >= 0)
bytes = devpriv->hwdmasize[0];
} else
devpriv->dma_runs_to_end = -1;
devpriv->next_dma_buf = 0;
set_dma_mode(devpriv->dma, DMA_MODE_READ);
dma_flags = claim_dma_lock();
clear_dma_ff(devpriv->dma);
set_dma_addr(devpriv->dma, devpriv->hwdmaptr[0]);
set_dma_count(devpriv->dma, bytes);
release_dma_lock(dma_flags);
enable_dma(devpriv->dma);
}
start_pacer(dev, 1, divisor1, divisor2);
dmairq = ((devpriv->dma & 0x3) << 4) | (dev->irq & 0x7);
switch (cmd->convert_src) {
case TRIG_TIMER:
devpriv->int816_mode = INT_TYPE_AI1_DMA;
/* Pacer+IRQ+DMA */
outb(0x32, dev->iobase + PCL816_CONTROL);
/* write irq and DMA to card */
outb(dmairq, dev->iobase + PCL816_STATUS);
break;
default:
devpriv->int816_mode = INT_TYPE_AI3_DMA;
/* Ext trig+IRQ+DMA */
outb(0x34, dev->iobase + PCL816_CONTROL);
/* write irq to card */
outb(dmairq, dev->iobase + PCL816_STATUS);
break;
}
DPRINTK("pcl816 END: pcl812_ai_cmd()\n");
return 0;
}
static int pcl816_ai_poll(struct comedi_device *dev, struct comedi_subdevice *s)
{
unsigned long flags;
unsigned int top1, top2, i;
if (!devpriv->dma)
return 0; /* poll is valid only for DMA transfer */
spin_lock_irqsave(&dev->spinlock, flags);
for (i = 0; i < 20; i++) {
top1 = get_dma_residue(devpriv->dma); /* where is now DMA */
top2 = get_dma_residue(devpriv->dma);
if (top1 == top2)
break;
}
if (top1 != top2) {
spin_unlock_irqrestore(&dev->spinlock, flags);
return 0;
}
/* where is now DMA in buffer */
top1 = devpriv->hwdmasize[0] - top1;
top1 >>= 1; /* sample position */
top2 = top1 - devpriv->ai_poll_ptr;
if (top2 < 1) { /* no new samples */
spin_unlock_irqrestore(&dev->spinlock, flags);
return 0;
}
transfer_from_dma_buf(dev, s,
(short *)devpriv->dmabuf[devpriv->next_dma_buf],
devpriv->ai_poll_ptr, top2);
devpriv->ai_poll_ptr = top1; /* new buffer position */
spin_unlock_irqrestore(&dev->spinlock, flags);
return s->async->buf_write_count - s->async->buf_read_count;
}
/*
==============================================================================
cancel any mode 1-4 AI
*/
static int pcl816_ai_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
/* DEBUG(printk("pcl816_ai_cancel()\n");) */
if (devpriv->irq_blocked > 0) {
switch (devpriv->int816_mode) {
#ifdef unused
case INT_TYPE_AI1_DMA_RTC:
case INT_TYPE_AI3_DMA_RTC:
set_rtc_irq_bit(0); /* stop RTC */
del_timer(&devpriv->rtc_irq_timer);
#endif
case INT_TYPE_AI1_DMA:
case INT_TYPE_AI3_DMA:
disable_dma(devpriv->dma);
case INT_TYPE_AI1_INT:
case INT_TYPE_AI3_INT:
outb(inb(dev->iobase + PCL816_CONTROL) & 0x73,
dev->iobase + PCL816_CONTROL); /* Stop A/D */
udelay(1);
outb(0, dev->iobase + PCL816_CONTROL); /* Stop A/D */
/* Stop pacer */
outb(0xb0, dev->iobase + PCL816_CTRCTL);
outb(0x70, dev->iobase + PCL816_CTRCTL);
outb(0, dev->iobase + PCL816_AD_LO);
inb(dev->iobase + PCL816_AD_LO);
inb(dev->iobase + PCL816_AD_HI);
/* clear INT request */
outb(0, dev->iobase + PCL816_CLRINT);
/* Stop A/D */
outb(0, dev->iobase + PCL816_CONTROL);
devpriv->irq_blocked = 0;
devpriv->irq_was_now_closed = devpriv->int816_mode;
devpriv->int816_mode = 0;
devpriv->last_int_sub = s;
/* s->busy = 0; */
break;
}
}
DEBUG(printk("comedi: pcl816_ai_cancel() successful\n");)
return 0;
}
/*
==============================================================================
chech for PCL816
*/
static int pcl816_check(unsigned long iobase)
{
outb(0x00, iobase + PCL816_MUX);
udelay(1);
if (inb(iobase + PCL816_MUX) != 0x00)
return 1; /* there isn't card */
outb(0x55, iobase + PCL816_MUX);
udelay(1);
if (inb(iobase + PCL816_MUX) != 0x55)
return 1; /* there isn't card */
outb(0x00, iobase + PCL816_MUX);
udelay(1);
outb(0x18, iobase + PCL816_CONTROL);
udelay(1);
if (inb(iobase + PCL816_CONTROL) != 0x18)
return 1; /* there isn't card */
return 0; /* ok, card exist */
}
/*
==============================================================================
reset whole PCL-816 cards
*/
static void pcl816_reset(struct comedi_device *dev)
{
/* outb (0, dev->iobase + PCL818_DA_LO); DAC=0V */
/* outb (0, dev->iobase + PCL818_DA_HI); */
/* udelay (1); */
/* outb (0, dev->iobase + PCL818_DO_HI); DO=$0000 */
/* outb (0, dev->iobase + PCL818_DO_LO); */
/* udelay (1); */
outb(0, dev->iobase + PCL816_CONTROL);
outb(0, dev->iobase + PCL816_MUX);
outb(0, dev->iobase + PCL816_CLRINT);
outb(0xb0, dev->iobase + PCL816_CTRCTL); /* Stop pacer */
outb(0x70, dev->iobase + PCL816_CTRCTL);
outb(0x30, dev->iobase + PCL816_CTRCTL);
outb(0, dev->iobase + PCL816_RANGE);
}
/*
==============================================================================
Start/stop pacer onboard pacer
*/
static void
start_pacer(struct comedi_device *dev, int mode, unsigned int divisor1,
unsigned int divisor2)
{
outb(0x32, dev->iobase + PCL816_CTRCTL);
outb(0xff, dev->iobase + PCL816_CTR0);
outb(0x00, dev->iobase + PCL816_CTR0);
udelay(1);
/* set counter 2 as mode 3 */
outb(0xb4, dev->iobase + PCL816_CTRCTL);
/* set counter 1 as mode 3 */
outb(0x74, dev->iobase + PCL816_CTRCTL);
udelay(1);
if (mode == 1) {
DPRINTK("mode %d, divisor1 %d, divisor2 %d\n", mode, divisor1,
divisor2);
outb(divisor2 & 0xff, dev->iobase + PCL816_CTR2);
outb((divisor2 >> 8) & 0xff, dev->iobase + PCL816_CTR2);
outb(divisor1 & 0xff, dev->iobase + PCL816_CTR1);
outb((divisor1 >> 8) & 0xff, dev->iobase + PCL816_CTR1);
}
/* clear pending interrupts (just in case) */
/* outb(0, dev->iobase + PCL816_CLRINT); */
}
/*
==============================================================================
Check if channel list from user is builded correctly
If it's ok, then return non-zero length of repeated segment of channel list
*/
static int
check_channel_list(struct comedi_device *dev,
struct comedi_subdevice *s, unsigned int *chanlist,
unsigned int chanlen)
{
unsigned int chansegment[16];
unsigned int i, nowmustbechan, seglen, segpos;
/* correct channel and range number check itself comedi/range.c */
if (chanlen < 1) {
comedi_error(dev, "range/channel list is empty!");
return 0;
}
if (chanlen > 1) {
/* first channel is every time ok */
chansegment[0] = chanlist[0];
for (i = 1, seglen = 1; i < chanlen; i++, seglen++) {
/* build part of chanlist */
DEBUG(printk(KERN_INFO "%d. %d %d\n", i,
CR_CHAN(chanlist[i]),
CR_RANGE(chanlist[i]));)
/* we detect loop, this must by finish */
if (chanlist[0] == chanlist[i])
break;
nowmustbechan =
(CR_CHAN(chansegment[i - 1]) + 1) % chanlen;
if (nowmustbechan != CR_CHAN(chanlist[i])) {
/* channel list isn't continuous :-( */
printk(KERN_WARNING
"comedi%d: pcl816: channel list must "
"be continuous! chanlist[%i]=%d but "
"must be %d or %d!\n", dev->minor,
i, CR_CHAN(chanlist[i]), nowmustbechan,
CR_CHAN(chanlist[0]));
return 0;
}
/* well, this is next correct channel in list */
chansegment[i] = chanlist[i];
}
/* check whole chanlist */
for (i = 0, segpos = 0; i < chanlen; i++) {
DEBUG(printk("%d %d=%d %d\n",
CR_CHAN(chansegment[i % seglen]),
CR_RANGE(chansegment[i % seglen]),
CR_CHAN(chanlist[i]),
CR_RANGE(chanlist[i]));)
if (chanlist[i] != chansegment[i % seglen]) {
printk(KERN_WARNING
"comedi%d: pcl816: bad channel or range"
" number! chanlist[%i]=%d,%d,%d and not"
" %d,%d,%d!\n", dev->minor, i,
CR_CHAN(chansegment[i]),
CR_RANGE(chansegment[i]),
CR_AREF(chansegment[i]),
CR_CHAN(chanlist[i % seglen]),
CR_RANGE(chanlist[i % seglen]),
CR_AREF(chansegment[i % seglen]));
return 0; /* chan/gain list is strange */
}
}
} else {
seglen = 1;
}
return seglen; /* we can serve this with MUX logic */
}
/*
==============================================================================
Program scan/gain logic with channel list.
*/
static void
setup_channel_list(struct comedi_device *dev,
struct comedi_subdevice *s, unsigned int *chanlist,
unsigned int seglen)
{
unsigned int i;
devpriv->ai_act_chanlist_len = seglen;
devpriv->ai_act_chanlist_pos = 0;
for (i = 0; i < seglen; i++) { /* store range list to card */
devpriv->ai_act_chanlist[i] = CR_CHAN(chanlist[i]);
outb(CR_CHAN(chanlist[0]) & 0xf, dev->iobase + PCL816_MUX);
/* select gain */
outb(CR_RANGE(chanlist[0]), dev->iobase + PCL816_RANGE);
}
udelay(1);
/* select channel interval to scan */
outb(devpriv->ai_act_chanlist[0] |
(devpriv->ai_act_chanlist[seglen - 1] << 4),
dev->iobase + PCL816_MUX);
}
#ifdef unused
/*
==============================================================================
Enable(1)/disable(0) periodic interrupts from RTC
*/
static int set_rtc_irq_bit(unsigned char bit)
{
unsigned char val;
unsigned long flags;
if (bit == 1) {
RTC_timer_lock++;
if (RTC_timer_lock > 1)
return 0;
} else {
RTC_timer_lock--;
if (RTC_timer_lock < 0)
RTC_timer_lock = 0;
if (RTC_timer_lock > 0)
return 0;
}
save_flags(flags);
cli();
val = CMOS_READ(RTC_CONTROL);
if (bit)
val |= RTC_PIE;
else
val &= ~RTC_PIE;
CMOS_WRITE(val, RTC_CONTROL);
CMOS_READ(RTC_INTR_FLAGS);
restore_flags(flags);
return 0;
}
#endif
/*
==============================================================================
Free any resources that we have claimed
*/
static void free_resources(struct comedi_device *dev)
{
/* printk("free_resource()\n"); */
if (dev->private) {
pcl816_ai_cancel(dev, devpriv->sub_ai);
pcl816_reset(dev);
if (devpriv->dma)
free_dma(devpriv->dma);
if (devpriv->dmabuf[0])
free_pages(devpriv->dmabuf[0], devpriv->dmapages[0]);
if (devpriv->dmabuf[1])
free_pages(devpriv->dmabuf[1], devpriv->dmapages[1]);
#ifdef unused
if (devpriv->rtc_irq)
free_irq(devpriv->rtc_irq, dev);
if ((devpriv->dma_rtc) && (RTC_lock == 1)) {
if (devpriv->rtc_iobase)
release_region(devpriv->rtc_iobase,
devpriv->rtc_iosize);
}
#endif
}
if (dev->irq)
free_irq(dev->irq, dev);
if (dev->iobase)
release_region(dev->iobase, this_board->io_range);
/* printk("free_resource() end\n"); */
}
/*
==============================================================================
Initialization
*/
static int pcl816_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
int ret;
unsigned long iobase;
unsigned int irq, dma;
unsigned long pages;
/* int i; */
struct comedi_subdevice *s;
/* claim our I/O space */
iobase = it->options[0];
printk("comedi%d: pcl816: board=%s, ioport=0x%03lx", dev->minor,
this_board->name, iobase);
if (!request_region(iobase, this_board->io_range, "pcl816")) {
printk("I/O port conflict\n");
return -EIO;
}
dev->iobase = iobase;
if (pcl816_check(iobase)) {
printk(KERN_ERR ", I cann't detect board. FAIL!\n");
return -EIO;
}
ret = alloc_private(dev, sizeof(struct pcl816_private));
if (ret < 0)
return ret; /* Can't alloc mem */
/* set up some name stuff */
dev->board_name = this_board->name;
/* grab our IRQ */
irq = 0;
if (this_board->IRQbits != 0) { /* board support IRQ */
irq = it->options[1];
if (irq) { /* we want to use IRQ */
if (((1 << irq) & this_board->IRQbits) == 0) {
printk
(", IRQ %u is out of allowed range, "
"DISABLING IT", irq);
irq = 0; /* Bad IRQ */
} else {
if (request_irq
(irq, interrupt_pcl816, 0, "pcl816", dev)) {
printk
(", unable to allocate IRQ %u, "
"DISABLING IT", irq);
irq = 0; /* Can't use IRQ */
} else {
printk(KERN_INFO ", irq=%u", irq);
}
}
}
}
dev->irq = irq;
if (irq) /* 1=we have allocated irq */
devpriv->irq_free = 1;
else
devpriv->irq_free = 0;
devpriv->irq_blocked = 0; /* number of subdevice which use IRQ */
devpriv->int816_mode = 0; /* mode of irq */
#ifdef unused
/* grab RTC for DMA operations */
devpriv->dma_rtc = 0;
if (it->options[2] > 0) { /* we want to use DMA */
if (RTC_lock == 0) {
if (!request_region(RTC_PORT(0), RTC_IO_EXTENT,
"pcl816 (RTC)"))
goto no_rtc;
}
devpriv->rtc_iobase = RTC_PORT(0);
devpriv->rtc_iosize = RTC_IO_EXTENT;
RTC_lock++;
#ifdef UNTESTED_CODE
if (!request_irq(RTC_IRQ, interrupt_pcl816_ai_mode13_dma_rtc, 0,
"pcl816 DMA (RTC)", dev)) {
devpriv->dma_rtc = 1;
devpriv->rtc_irq = RTC_IRQ;
printk(", dma_irq=%u", devpriv->rtc_irq);
} else {
RTC_lock--;
if (RTC_lock == 0) {
if (devpriv->rtc_iobase)
release_region(devpriv->rtc_iobase,
devpriv->rtc_iosize);
}
devpriv->rtc_iobase = 0;
devpriv->rtc_iosize = 0;
}
#else
printk("pcl816: RTC code missing");
#endif
}
no_rtc:
#endif
/* grab our DMA */
dma = 0;
devpriv->dma = dma;
if ((devpriv->irq_free == 0) && (devpriv->dma_rtc == 0))
goto no_dma; /* if we haven't IRQ, we can't use DMA */
if (this_board->DMAbits != 0) { /* board support DMA */
dma = it->options[2];
if (dma < 1)
goto no_dma; /* DMA disabled */
if (((1 << dma) & this_board->DMAbits) == 0) {
printk(", DMA is out of allowed range, FAIL!\n");
return -EINVAL; /* Bad DMA */
}
ret = request_dma(dma, "pcl816");
if (ret) {
printk(KERN_ERR
", unable to allocate DMA %u, FAIL!\n", dma);
return -EBUSY; /* DMA isn't free */
}
devpriv->dma = dma;
printk(KERN_INFO ", dma=%u", dma);
pages = 2; /* we need 16KB */
devpriv->dmabuf[0] = __get_dma_pages(GFP_KERNEL, pages);
if (!devpriv->dmabuf[0]) {
printk(", unable to allocate DMA buffer, FAIL!\n");
/*
* maybe experiment with try_to_free_pages()
* will help ....
*/
return -EBUSY; /* no buffer :-( */
}
devpriv->dmapages[0] = pages;
devpriv->hwdmaptr[0] = virt_to_bus((void *)devpriv->dmabuf[0]);
devpriv->hwdmasize[0] = (1 << pages) * PAGE_SIZE;
/* printk("%d %d %ld, ",devpriv->dmapages[0],devpriv->hwdmasize[0],PAGE_SIZE); */
if (devpriv->dma_rtc == 0) { /* we must do duble buff :-( */
devpriv->dmabuf[1] = __get_dma_pages(GFP_KERNEL, pages);
if (!devpriv->dmabuf[1]) {
printk(KERN_ERR
", unable to allocate DMA buffer, "
"FAIL!\n");
return -EBUSY;
}
devpriv->dmapages[1] = pages;
devpriv->hwdmaptr[1] =
virt_to_bus((void *)devpriv->dmabuf[1]);
devpriv->hwdmasize[1] = (1 << pages) * PAGE_SIZE;
}
}
no_dma:
/* if (this_board->n_aochan > 0)
subdevs[1] = COMEDI_SUBD_AO;
if (this_board->n_dichan > 0)
subdevs[2] = COMEDI_SUBD_DI;
if (this_board->n_dochan > 0)
subdevs[3] = COMEDI_SUBD_DO;
*/
ret = alloc_subdevices(dev, 1);
if (ret < 0)
return ret;
s = dev->subdevices + 0;
if (this_board->n_aichan > 0) {
s->type = COMEDI_SUBD_AI;
devpriv->sub_ai = s;
dev->read_subdev = s;
s->subdev_flags = SDF_READABLE | SDF_CMD_READ;
s->n_chan = this_board->n_aichan;
s->subdev_flags |= SDF_DIFF;
/* printk (", %dchans DIFF DAC - %d", s->n_chan, i); */
s->maxdata = this_board->ai_maxdata;
s->len_chanlist = this_board->ai_chanlist;
s->range_table = this_board->ai_range_type;
s->cancel = pcl816_ai_cancel;
s->do_cmdtest = pcl816_ai_cmdtest;
s->do_cmd = pcl816_ai_cmd;
s->poll = pcl816_ai_poll;
s->insn_read = pcl816_ai_insn_read;
} else {
s->type = COMEDI_SUBD_UNUSED;
}
#if 0
case COMEDI_SUBD_AO:
s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
s->n_chan = this_board->n_aochan;
s->maxdata = this_board->ao_maxdata;
s->len_chanlist = this_board->ao_chanlist;
s->range_table = this_board->ao_range_type;
break;
case COMEDI_SUBD_DI:
s->subdev_flags = SDF_READABLE;
s->n_chan = this_board->n_dichan;
s->maxdata = 1;
s->len_chanlist = this_board->n_dichan;
s->range_table = &range_digital;
break;
case COMEDI_SUBD_DO:
s->subdev_flags = SDF_WRITABLE;
s->n_chan = this_board->n_dochan;
s->maxdata = 1;
s->len_chanlist = this_board->n_dochan;
s->range_table = &range_digital;
break;
#endif
pcl816_reset(dev);
printk("\n");
return 0;
}
/*
==============================================================================
Removes device
*/
static int pcl816_detach(struct comedi_device *dev)
{
DEBUG(printk(KERN_INFO "comedi%d: pcl816: remove\n", dev->minor);)
free_resources(dev);
#ifdef unused
if (devpriv->dma_rtc)
RTC_lock--;
#endif
return 0;
}
MODULE_AUTHOR("Comedi http://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
javilonas/Enki-GT-I9300 | drivers/rtc/rtc-mpc5121.c | 2529 | 8924 | /*
* Real-time clock driver for MPC5121
*
* Copyright 2007, Domen Puncer <domen.puncer@telargo.com>
* Copyright 2008, Freescale Semiconductor, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/rtc.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/io.h>
#include <linux/slab.h>
struct mpc5121_rtc_regs {
u8 set_time; /* RTC + 0x00 */
u8 hour_set; /* RTC + 0x01 */
u8 minute_set; /* RTC + 0x02 */
u8 second_set; /* RTC + 0x03 */
u8 set_date; /* RTC + 0x04 */
u8 month_set; /* RTC + 0x05 */
u8 weekday_set; /* RTC + 0x06 */
u8 date_set; /* RTC + 0x07 */
u8 write_sw; /* RTC + 0x08 */
u8 sw_set; /* RTC + 0x09 */
u16 year_set; /* RTC + 0x0a */
u8 alm_enable; /* RTC + 0x0c */
u8 alm_hour_set; /* RTC + 0x0d */
u8 alm_min_set; /* RTC + 0x0e */
u8 int_enable; /* RTC + 0x0f */
u8 reserved1;
u8 hour; /* RTC + 0x11 */
u8 minute; /* RTC + 0x12 */
u8 second; /* RTC + 0x13 */
u8 month; /* RTC + 0x14 */
u8 wday_mday; /* RTC + 0x15 */
u16 year; /* RTC + 0x16 */
u8 int_alm; /* RTC + 0x18 */
u8 int_sw; /* RTC + 0x19 */
u8 alm_status; /* RTC + 0x1a */
u8 sw_minute; /* RTC + 0x1b */
u8 bus_error_1; /* RTC + 0x1c */
u8 int_day; /* RTC + 0x1d */
u8 int_min; /* RTC + 0x1e */
u8 int_sec; /* RTC + 0x1f */
/*
* target_time:
* intended to be used for hibernation but hibernation
* does not work on silicon rev 1.5 so use it for non-volatile
* storage of offset between the actual_time register and linux
* time
*/
u32 target_time; /* RTC + 0x20 */
/*
* actual_time:
* readonly time since VBAT_RTC was last connected
*/
u32 actual_time; /* RTC + 0x24 */
u32 keep_alive; /* RTC + 0x28 */
};
struct mpc5121_rtc_data {
unsigned irq;
unsigned irq_periodic;
struct mpc5121_rtc_regs __iomem *regs;
struct rtc_device *rtc;
struct rtc_wkalrm wkalarm;
};
/*
* Update second/minute/hour registers.
*
* This is just so alarm will work.
*/
static void mpc5121_rtc_update_smh(struct mpc5121_rtc_regs __iomem *regs,
struct rtc_time *tm)
{
out_8(®s->second_set, tm->tm_sec);
out_8(®s->minute_set, tm->tm_min);
out_8(®s->hour_set, tm->tm_hour);
/* set time sequence */
out_8(®s->set_time, 0x1);
out_8(®s->set_time, 0x3);
out_8(®s->set_time, 0x1);
out_8(®s->set_time, 0x0);
}
static int mpc5121_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct mpc5121_rtc_data *rtc = dev_get_drvdata(dev);
struct mpc5121_rtc_regs __iomem *regs = rtc->regs;
unsigned long now;
/*
* linux time is actual_time plus the offset saved in target_time
*/
now = in_be32(®s->actual_time) + in_be32(®s->target_time);
rtc_time_to_tm(now, tm);
/*
* update second minute hour registers
* so alarms will work
*/
mpc5121_rtc_update_smh(regs, tm);
return rtc_valid_tm(tm);
}
static int mpc5121_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct mpc5121_rtc_data *rtc = dev_get_drvdata(dev);
struct mpc5121_rtc_regs __iomem *regs = rtc->regs;
int ret;
unsigned long now;
/*
* The actual_time register is read only so we write the offset
* between it and linux time to the target_time register.
*/
ret = rtc_tm_to_time(tm, &now);
if (ret == 0)
out_be32(®s->target_time, now - in_be32(®s->actual_time));
/*
* update second minute hour registers
* so alarms will work
*/
mpc5121_rtc_update_smh(regs, tm);
return 0;
}
static int mpc5121_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
struct mpc5121_rtc_data *rtc = dev_get_drvdata(dev);
struct mpc5121_rtc_regs __iomem *regs = rtc->regs;
*alarm = rtc->wkalarm;
alarm->pending = in_8(®s->alm_status);
return 0;
}
static int mpc5121_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
struct mpc5121_rtc_data *rtc = dev_get_drvdata(dev);
struct mpc5121_rtc_regs __iomem *regs = rtc->regs;
/*
* the alarm has no seconds so deal with it
*/
if (alarm->time.tm_sec) {
alarm->time.tm_sec = 0;
alarm->time.tm_min++;
if (alarm->time.tm_min >= 60) {
alarm->time.tm_min = 0;
alarm->time.tm_hour++;
if (alarm->time.tm_hour >= 24)
alarm->time.tm_hour = 0;
}
}
alarm->time.tm_mday = -1;
alarm->time.tm_mon = -1;
alarm->time.tm_year = -1;
out_8(®s->alm_min_set, alarm->time.tm_min);
out_8(®s->alm_hour_set, alarm->time.tm_hour);
out_8(®s->alm_enable, alarm->enabled);
rtc->wkalarm = *alarm;
return 0;
}
static irqreturn_t mpc5121_rtc_handler(int irq, void *dev)
{
struct mpc5121_rtc_data *rtc = dev_get_drvdata((struct device *)dev);
struct mpc5121_rtc_regs __iomem *regs = rtc->regs;
if (in_8(®s->int_alm)) {
/* acknowledge and clear status */
out_8(®s->int_alm, 1);
out_8(®s->alm_status, 1);
rtc_update_irq(rtc->rtc, 1, RTC_IRQF | RTC_AF);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static irqreturn_t mpc5121_rtc_handler_upd(int irq, void *dev)
{
struct mpc5121_rtc_data *rtc = dev_get_drvdata((struct device *)dev);
struct mpc5121_rtc_regs __iomem *regs = rtc->regs;
if (in_8(®s->int_sec) && (in_8(®s->int_enable) & 0x1)) {
/* acknowledge */
out_8(®s->int_sec, 1);
rtc_update_irq(rtc->rtc, 1, RTC_IRQF | RTC_UF);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static int mpc5121_rtc_alarm_irq_enable(struct device *dev,
unsigned int enabled)
{
struct mpc5121_rtc_data *rtc = dev_get_drvdata(dev);
struct mpc5121_rtc_regs __iomem *regs = rtc->regs;
int val;
if (enabled)
val = 1;
else
val = 0;
out_8(®s->alm_enable, val);
rtc->wkalarm.enabled = val;
return 0;
}
static const struct rtc_class_ops mpc5121_rtc_ops = {
.read_time = mpc5121_rtc_read_time,
.set_time = mpc5121_rtc_set_time,
.read_alarm = mpc5121_rtc_read_alarm,
.set_alarm = mpc5121_rtc_set_alarm,
.alarm_irq_enable = mpc5121_rtc_alarm_irq_enable,
};
static int __devinit mpc5121_rtc_probe(struct platform_device *op)
{
struct mpc5121_rtc_data *rtc;
int err = 0;
u32 ka;
rtc = kzalloc(sizeof(*rtc), GFP_KERNEL);
if (!rtc)
return -ENOMEM;
rtc->regs = of_iomap(op->dev.of_node, 0);
if (!rtc->regs) {
dev_err(&op->dev, "%s: couldn't map io space\n", __func__);
err = -ENOSYS;
goto out_free;
}
device_init_wakeup(&op->dev, 1);
dev_set_drvdata(&op->dev, rtc);
rtc->irq = irq_of_parse_and_map(op->dev.of_node, 1);
err = request_irq(rtc->irq, mpc5121_rtc_handler, IRQF_DISABLED,
"mpc5121-rtc", &op->dev);
if (err) {
dev_err(&op->dev, "%s: could not request irq: %i\n",
__func__, rtc->irq);
goto out_dispose;
}
rtc->irq_periodic = irq_of_parse_and_map(op->dev.of_node, 0);
err = request_irq(rtc->irq_periodic, mpc5121_rtc_handler_upd,
IRQF_DISABLED, "mpc5121-rtc_upd", &op->dev);
if (err) {
dev_err(&op->dev, "%s: could not request irq: %i\n",
__func__, rtc->irq_periodic);
goto out_dispose2;
}
ka = in_be32(&rtc->regs->keep_alive);
if (ka & 0x02) {
dev_warn(&op->dev,
"mpc5121-rtc: Battery or oscillator failure!\n");
out_be32(&rtc->regs->keep_alive, ka);
}
rtc->rtc = rtc_device_register("mpc5121-rtc", &op->dev,
&mpc5121_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc->rtc)) {
err = PTR_ERR(rtc->rtc);
goto out_free_irq;
}
return 0;
out_free_irq:
free_irq(rtc->irq_periodic, &op->dev);
out_dispose2:
irq_dispose_mapping(rtc->irq_periodic);
free_irq(rtc->irq, &op->dev);
out_dispose:
irq_dispose_mapping(rtc->irq);
iounmap(rtc->regs);
out_free:
kfree(rtc);
return err;
}
static int __devexit mpc5121_rtc_remove(struct platform_device *op)
{
struct mpc5121_rtc_data *rtc = dev_get_drvdata(&op->dev);
struct mpc5121_rtc_regs __iomem *regs = rtc->regs;
/* disable interrupt, so there are no nasty surprises */
out_8(®s->alm_enable, 0);
out_8(®s->int_enable, in_8(®s->int_enable) & ~0x1);
rtc_device_unregister(rtc->rtc);
iounmap(rtc->regs);
free_irq(rtc->irq, &op->dev);
free_irq(rtc->irq_periodic, &op->dev);
irq_dispose_mapping(rtc->irq);
irq_dispose_mapping(rtc->irq_periodic);
dev_set_drvdata(&op->dev, NULL);
kfree(rtc);
return 0;
}
static struct of_device_id mpc5121_rtc_match[] __devinitdata = {
{ .compatible = "fsl,mpc5121-rtc", },
{},
};
static struct platform_driver mpc5121_rtc_driver = {
.driver = {
.name = "mpc5121-rtc",
.owner = THIS_MODULE,
.of_match_table = mpc5121_rtc_match,
},
.probe = mpc5121_rtc_probe,
.remove = __devexit_p(mpc5121_rtc_remove),
};
static int __init mpc5121_rtc_init(void)
{
return platform_driver_register(&mpc5121_rtc_driver);
}
module_init(mpc5121_rtc_init);
static void __exit mpc5121_rtc_exit(void)
{
platform_driver_unregister(&mpc5121_rtc_driver);
}
module_exit(mpc5121_rtc_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("John Rigby <jcrigby@gmail.com>");
| gpl-2.0 |
DerRomtester/android_kernel_samsung_aries | drivers/staging/altera-stapl/altera-jtag.c | 2785 | 22103 | /*
* altera-jtag.c
*
* altera FPGA driver
*
* Copyright (C) Altera Corporation 1998-2001
* Copyright (C) 2010 NetUP Inc.
* Copyright (C) 2010 Igor M. Liplianin <liplianin@netup.ru>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/slab.h>
#include "altera.h"
#include "altera-exprt.h"
#include "altera-jtag.h"
#define alt_jtag_io(a, b, c)\
astate->config->jtag_io(astate->config->dev, a, b, c);
#define alt_malloc(a) kzalloc(a, GFP_KERNEL);
/*
* This structure shows, for each JTAG state, which state is reached after
* a single TCK clock cycle with TMS high or TMS low, respectively. This
* describes all possible state transitions in the JTAG state machine.
*/
struct altera_jtag_machine {
enum altera_jtag_state tms_high;
enum altera_jtag_state tms_low;
};
static const struct altera_jtag_machine altera_transitions[] = {
/* RESET */ { RESET, IDLE },
/* IDLE */ { DRSELECT, IDLE },
/* DRSELECT */ { IRSELECT, DRCAPTURE },
/* DRCAPTURE */ { DREXIT1, DRSHIFT },
/* DRSHIFT */ { DREXIT1, DRSHIFT },
/* DREXIT1 */ { DRUPDATE, DRPAUSE },
/* DRPAUSE */ { DREXIT2, DRPAUSE },
/* DREXIT2 */ { DRUPDATE, DRSHIFT },
/* DRUPDATE */ { DRSELECT, IDLE },
/* IRSELECT */ { RESET, IRCAPTURE },
/* IRCAPTURE */ { IREXIT1, IRSHIFT },
/* IRSHIFT */ { IREXIT1, IRSHIFT },
/* IREXIT1 */ { IRUPDATE, IRPAUSE },
/* IRPAUSE */ { IREXIT2, IRPAUSE },
/* IREXIT2 */ { IRUPDATE, IRSHIFT },
/* IRUPDATE */ { DRSELECT, IDLE }
};
/*
* This table contains the TMS value to be used to take the NEXT STEP on
* the path to the desired state. The array index is the current state,
* and the bit position is the desired endstate. To find out which state
* is used as the intermediate state, look up the TMS value in the
* altera_transitions[] table.
*/
static const u16 altera_jtag_path_map[16] = {
/* RST RTI SDRS CDR SDR E1DR PDR E2DR */
0x0001, 0xFFFD, 0xFE01, 0xFFE7, 0xFFEF, 0xFF0F, 0xFFBF, 0xFFFF,
/* UDR SIRS CIR SIR E1IR PIR E2IR UIR */
0xFEFD, 0x0001, 0xF3FF, 0xF7FF, 0x87FF, 0xDFFF, 0xFFFF, 0x7FFD
};
/* Flag bits for alt_jtag_io() function */
#define TMS_HIGH 1
#define TMS_LOW 0
#define TDI_HIGH 1
#define TDI_LOW 0
#define READ_TDO 1
#define IGNORE_TDO 0
int altera_jinit(struct altera_state *astate)
{
struct altera_jtag *js = &astate->js;
/* initial JTAG state is unknown */
js->jtag_state = ILLEGAL_JTAG_STATE;
/* initialize to default state */
js->drstop_state = IDLE;
js->irstop_state = IDLE;
js->dr_pre = 0;
js->dr_post = 0;
js->ir_pre = 0;
js->ir_post = 0;
js->dr_length = 0;
js->ir_length = 0;
js->dr_pre_data = NULL;
js->dr_post_data = NULL;
js->ir_pre_data = NULL;
js->ir_post_data = NULL;
js->dr_buffer = NULL;
js->ir_buffer = NULL;
return 0;
}
int altera_set_drstop(struct altera_jtag *js, enum altera_jtag_state state)
{
js->drstop_state = state;
return 0;
}
int altera_set_irstop(struct altera_jtag *js, enum altera_jtag_state state)
{
js->irstop_state = state;
return 0;
}
int altera_set_dr_pre(struct altera_jtag *js,
u32 count, u32 start_index,
u8 *preamble_data)
{
int status = 0;
u32 i;
u32 j;
if (count > js->dr_pre) {
kfree(js->dr_pre_data);
js->dr_pre_data = (u8 *)alt_malloc((count + 7) >> 3);
if (js->dr_pre_data == NULL)
status = -ENOMEM;
else
js->dr_pre = count;
} else
js->dr_pre = count;
if (status == 0) {
for (i = 0; i < count; ++i) {
j = i + start_index;
if (preamble_data == NULL)
js->dr_pre_data[i >> 3] |= (1 << (i & 7));
else {
if (preamble_data[j >> 3] & (1 << (j & 7)))
js->dr_pre_data[i >> 3] |=
(1 << (i & 7));
else
js->dr_pre_data[i >> 3] &=
~(u32)(1 << (i & 7));
}
}
}
return status;
}
int altera_set_ir_pre(struct altera_jtag *js, u32 count, u32 start_index,
u8 *preamble_data)
{
int status = 0;
u32 i;
u32 j;
if (count > js->ir_pre) {
kfree(js->ir_pre_data);
js->ir_pre_data = (u8 *)alt_malloc((count + 7) >> 3);
if (js->ir_pre_data == NULL)
status = -ENOMEM;
else
js->ir_pre = count;
} else
js->ir_pre = count;
if (status == 0) {
for (i = 0; i < count; ++i) {
j = i + start_index;
if (preamble_data == NULL)
js->ir_pre_data[i >> 3] |= (1 << (i & 7));
else {
if (preamble_data[j >> 3] & (1 << (j & 7)))
js->ir_pre_data[i >> 3] |=
(1 << (i & 7));
else
js->ir_pre_data[i >> 3] &=
~(u32)(1 << (i & 7));
}
}
}
return status;
}
int altera_set_dr_post(struct altera_jtag *js, u32 count, u32 start_index,
u8 *postamble_data)
{
int status = 0;
u32 i;
u32 j;
if (count > js->dr_post) {
kfree(js->dr_post_data);
js->dr_post_data = (u8 *)alt_malloc((count + 7) >> 3);
if (js->dr_post_data == NULL)
status = -ENOMEM;
else
js->dr_post = count;
} else
js->dr_post = count;
if (status == 0) {
for (i = 0; i < count; ++i) {
j = i + start_index;
if (postamble_data == NULL)
js->dr_post_data[i >> 3] |= (1 << (i & 7));
else {
if (postamble_data[j >> 3] & (1 << (j & 7)))
js->dr_post_data[i >> 3] |=
(1 << (i & 7));
else
js->dr_post_data[i >> 3] &=
~(u32)(1 << (i & 7));
}
}
}
return status;
}
int altera_set_ir_post(struct altera_jtag *js, u32 count, u32 start_index,
u8 *postamble_data)
{
int status = 0;
u32 i;
u32 j;
if (count > js->ir_post) {
kfree(js->ir_post_data);
js->ir_post_data = (u8 *)alt_malloc((count + 7) >> 3);
if (js->ir_post_data == NULL)
status = -ENOMEM;
else
js->ir_post = count;
} else
js->ir_post = count;
if (status != 0)
return status;
for (i = 0; i < count; ++i) {
j = i + start_index;
if (postamble_data == NULL)
js->ir_post_data[i >> 3] |= (1 << (i & 7));
else {
if (postamble_data[j >> 3] & (1 << (j & 7)))
js->ir_post_data[i >> 3] |= (1 << (i & 7));
else
js->ir_post_data[i >> 3] &=
~(u32)(1 << (i & 7));
}
}
return status;
}
static void altera_jreset_idle(struct altera_state *astate)
{
struct altera_jtag *js = &astate->js;
int i;
/* Go to Test Logic Reset (no matter what the starting state may be) */
for (i = 0; i < 5; ++i)
alt_jtag_io(TMS_HIGH, TDI_LOW, IGNORE_TDO);
/* Now step to Run Test / Idle */
alt_jtag_io(TMS_LOW, TDI_LOW, IGNORE_TDO);
js->jtag_state = IDLE;
}
int altera_goto_jstate(struct altera_state *astate,
enum altera_jtag_state state)
{
struct altera_jtag *js = &astate->js;
int tms;
int count = 0;
int status = 0;
if (js->jtag_state == ILLEGAL_JTAG_STATE)
/* initialize JTAG chain to known state */
altera_jreset_idle(astate);
if (js->jtag_state == state) {
/*
* We are already in the desired state.
* If it is a stable state, loop here.
* Otherwise do nothing (no clock cycles).
*/
if ((state == IDLE) || (state == DRSHIFT) ||
(state == DRPAUSE) || (state == IRSHIFT) ||
(state == IRPAUSE)) {
alt_jtag_io(TMS_LOW, TDI_LOW, IGNORE_TDO);
} else if (state == RESET)
alt_jtag_io(TMS_HIGH, TDI_LOW, IGNORE_TDO);
} else {
while ((js->jtag_state != state) && (count < 9)) {
/* Get TMS value to take a step toward desired state */
tms = (altera_jtag_path_map[js->jtag_state] &
(1 << state))
? TMS_HIGH : TMS_LOW;
/* Take a step */
alt_jtag_io(tms, TDI_LOW, IGNORE_TDO);
if (tms)
js->jtag_state =
altera_transitions[js->jtag_state].tms_high;
else
js->jtag_state =
altera_transitions[js->jtag_state].tms_low;
++count;
}
}
if (js->jtag_state != state)
status = -EREMOTEIO;
return status;
}
int altera_wait_cycles(struct altera_state *astate,
s32 cycles,
enum altera_jtag_state wait_state)
{
struct altera_jtag *js = &astate->js;
int tms;
s32 count;
int status = 0;
if (js->jtag_state != wait_state)
status = altera_goto_jstate(astate, wait_state);
if (status == 0) {
/*
* Set TMS high to loop in RESET state
* Set TMS low to loop in any other stable state
*/
tms = (wait_state == RESET) ? TMS_HIGH : TMS_LOW;
for (count = 0L; count < cycles; count++)
alt_jtag_io(tms, TDI_LOW, IGNORE_TDO);
}
return status;
}
int altera_wait_msecs(struct altera_state *astate,
s32 microseconds, enum altera_jtag_state wait_state)
/*
* Causes JTAG hardware to sit in the specified stable
* state for the specified duration of real time. If
* no JTAG operations have been performed yet, then only
* a delay is performed. This permits the WAIT USECS
* statement to be used in VECTOR programs without causing
* any JTAG operations.
* Returns 0 for success, else appropriate error code.
*/
{
struct altera_jtag *js = &astate->js;
int status = 0;
if ((js->jtag_state != ILLEGAL_JTAG_STATE) &&
(js->jtag_state != wait_state))
status = altera_goto_jstate(astate, wait_state);
if (status == 0)
/* Wait for specified time interval */
udelay(microseconds);
return status;
}
static void altera_concatenate_data(u8 *buffer,
u8 *preamble_data,
u32 preamble_count,
u8 *target_data,
u32 start_index,
u32 target_count,
u8 *postamble_data,
u32 postamble_count)
/*
* Copies preamble data, target data, and postamble data
* into one buffer for IR or DR scans.
*/
{
u32 i, j, k;
for (i = 0L; i < preamble_count; ++i) {
if (preamble_data[i >> 3L] & (1L << (i & 7L)))
buffer[i >> 3L] |= (1L << (i & 7L));
else
buffer[i >> 3L] &= ~(u32)(1L << (i & 7L));
}
j = start_index;
k = preamble_count + target_count;
for (; i < k; ++i, ++j) {
if (target_data[j >> 3L] & (1L << (j & 7L)))
buffer[i >> 3L] |= (1L << (i & 7L));
else
buffer[i >> 3L] &= ~(u32)(1L << (i & 7L));
}
j = 0L;
k = preamble_count + target_count + postamble_count;
for (; i < k; ++i, ++j) {
if (postamble_data[j >> 3L] & (1L << (j & 7L)))
buffer[i >> 3L] |= (1L << (i & 7L));
else
buffer[i >> 3L] &= ~(u32)(1L << (i & 7L));
}
}
static int alt_jtag_drscan(struct altera_state *astate,
int start_state,
int count,
u8 *tdi,
u8 *tdo)
{
int i = 0;
int tdo_bit = 0;
int status = 1;
/* First go to DRSHIFT state */
switch (start_state) {
case 0: /* IDLE */
alt_jtag_io(1, 0, 0); /* DRSELECT */
alt_jtag_io(0, 0, 0); /* DRCAPTURE */
alt_jtag_io(0, 0, 0); /* DRSHIFT */
break;
case 1: /* DRPAUSE */
alt_jtag_io(1, 0, 0); /* DREXIT2 */
alt_jtag_io(1, 0, 0); /* DRUPDATE */
alt_jtag_io(1, 0, 0); /* DRSELECT */
alt_jtag_io(0, 0, 0); /* DRCAPTURE */
alt_jtag_io(0, 0, 0); /* DRSHIFT */
break;
case 2: /* IRPAUSE */
alt_jtag_io(1, 0, 0); /* IREXIT2 */
alt_jtag_io(1, 0, 0); /* IRUPDATE */
alt_jtag_io(1, 0, 0); /* DRSELECT */
alt_jtag_io(0, 0, 0); /* DRCAPTURE */
alt_jtag_io(0, 0, 0); /* DRSHIFT */
break;
default:
status = 0;
}
if (status) {
/* loop in the SHIFT-DR state */
for (i = 0; i < count; i++) {
tdo_bit = alt_jtag_io(
(i == count - 1),
tdi[i >> 3] & (1 << (i & 7)),
(tdo != NULL));
if (tdo != NULL) {
if (tdo_bit)
tdo[i >> 3] |= (1 << (i & 7));
else
tdo[i >> 3] &= ~(u32)(1 << (i & 7));
}
}
alt_jtag_io(0, 0, 0); /* DRPAUSE */
}
return status;
}
static int alt_jtag_irscan(struct altera_state *astate,
int start_state,
int count,
u8 *tdi,
u8 *tdo)
{
int i = 0;
int tdo_bit = 0;
int status = 1;
/* First go to IRSHIFT state */
switch (start_state) {
case 0: /* IDLE */
alt_jtag_io(1, 0, 0); /* DRSELECT */
alt_jtag_io(1, 0, 0); /* IRSELECT */
alt_jtag_io(0, 0, 0); /* IRCAPTURE */
alt_jtag_io(0, 0, 0); /* IRSHIFT */
break;
case 1: /* DRPAUSE */
alt_jtag_io(1, 0, 0); /* DREXIT2 */
alt_jtag_io(1, 0, 0); /* DRUPDATE */
alt_jtag_io(1, 0, 0); /* DRSELECT */
alt_jtag_io(1, 0, 0); /* IRSELECT */
alt_jtag_io(0, 0, 0); /* IRCAPTURE */
alt_jtag_io(0, 0, 0); /* IRSHIFT */
break;
case 2: /* IRPAUSE */
alt_jtag_io(1, 0, 0); /* IREXIT2 */
alt_jtag_io(1, 0, 0); /* IRUPDATE */
alt_jtag_io(1, 0, 0); /* DRSELECT */
alt_jtag_io(1, 0, 0); /* IRSELECT */
alt_jtag_io(0, 0, 0); /* IRCAPTURE */
alt_jtag_io(0, 0, 0); /* IRSHIFT */
break;
default:
status = 0;
}
if (status) {
/* loop in the SHIFT-IR state */
for (i = 0; i < count; i++) {
tdo_bit = alt_jtag_io(
(i == count - 1),
tdi[i >> 3] & (1 << (i & 7)),
(tdo != NULL));
if (tdo != NULL) {
if (tdo_bit)
tdo[i >> 3] |= (1 << (i & 7));
else
tdo[i >> 3] &= ~(u32)(1 << (i & 7));
}
}
alt_jtag_io(0, 0, 0); /* IRPAUSE */
}
return status;
}
static void altera_extract_target_data(u8 *buffer,
u8 *target_data,
u32 start_index,
u32 preamble_count,
u32 target_count)
/*
* Copies target data from scan buffer, filtering out
* preamble and postamble data.
*/
{
u32 i;
u32 j;
u32 k;
j = preamble_count;
k = start_index + target_count;
for (i = start_index; i < k; ++i, ++j) {
if (buffer[j >> 3] & (1 << (j & 7)))
target_data[i >> 3] |= (1 << (i & 7));
else
target_data[i >> 3] &= ~(u32)(1 << (i & 7));
}
}
int altera_irscan(struct altera_state *astate,
u32 count,
u8 *tdi_data,
u32 start_index)
/* Shifts data into instruction register */
{
struct altera_jtag *js = &astate->js;
int start_code = 0;
u32 alloc_chars = 0;
u32 shift_count = js->ir_pre + count + js->ir_post;
int status = 0;
enum altera_jtag_state start_state = ILLEGAL_JTAG_STATE;
switch (js->jtag_state) {
case ILLEGAL_JTAG_STATE:
case RESET:
case IDLE:
start_code = 0;
start_state = IDLE;
break;
case DRSELECT:
case DRCAPTURE:
case DRSHIFT:
case DREXIT1:
case DRPAUSE:
case DREXIT2:
case DRUPDATE:
start_code = 1;
start_state = DRPAUSE;
break;
case IRSELECT:
case IRCAPTURE:
case IRSHIFT:
case IREXIT1:
case IRPAUSE:
case IREXIT2:
case IRUPDATE:
start_code = 2;
start_state = IRPAUSE;
break;
default:
status = -EREMOTEIO;
break;
}
if (status == 0)
if (js->jtag_state != start_state)
status = altera_goto_jstate(astate, start_state);
if (status == 0) {
if (shift_count > js->ir_length) {
alloc_chars = (shift_count + 7) >> 3;
kfree(js->ir_buffer);
js->ir_buffer = (u8 *)alt_malloc(alloc_chars);
if (js->ir_buffer == NULL)
status = -ENOMEM;
else
js->ir_length = alloc_chars * 8;
}
}
if (status == 0) {
/*
* Copy preamble data, IR data,
* and postamble data into a buffer
*/
altera_concatenate_data(js->ir_buffer,
js->ir_pre_data,
js->ir_pre,
tdi_data,
start_index,
count,
js->ir_post_data,
js->ir_post);
/* Do the IRSCAN */
alt_jtag_irscan(astate,
start_code,
shift_count,
js->ir_buffer,
NULL);
/* alt_jtag_irscan() always ends in IRPAUSE state */
js->jtag_state = IRPAUSE;
}
if (status == 0)
if (js->irstop_state != IRPAUSE)
status = altera_goto_jstate(astate, js->irstop_state);
return status;
}
int altera_swap_ir(struct altera_state *astate,
u32 count,
u8 *in_data,
u32 in_index,
u8 *out_data,
u32 out_index)
/* Shifts data into instruction register, capturing output data */
{
struct altera_jtag *js = &astate->js;
int start_code = 0;
u32 alloc_chars = 0;
u32 shift_count = js->ir_pre + count + js->ir_post;
int status = 0;
enum altera_jtag_state start_state = ILLEGAL_JTAG_STATE;
switch (js->jtag_state) {
case ILLEGAL_JTAG_STATE:
case RESET:
case IDLE:
start_code = 0;
start_state = IDLE;
break;
case DRSELECT:
case DRCAPTURE:
case DRSHIFT:
case DREXIT1:
case DRPAUSE:
case DREXIT2:
case DRUPDATE:
start_code = 1;
start_state = DRPAUSE;
break;
case IRSELECT:
case IRCAPTURE:
case IRSHIFT:
case IREXIT1:
case IRPAUSE:
case IREXIT2:
case IRUPDATE:
start_code = 2;
start_state = IRPAUSE;
break;
default:
status = -EREMOTEIO;
break;
}
if (status == 0)
if (js->jtag_state != start_state)
status = altera_goto_jstate(astate, start_state);
if (status == 0) {
if (shift_count > js->ir_length) {
alloc_chars = (shift_count + 7) >> 3;
kfree(js->ir_buffer);
js->ir_buffer = (u8 *)alt_malloc(alloc_chars);
if (js->ir_buffer == NULL)
status = -ENOMEM;
else
js->ir_length = alloc_chars * 8;
}
}
if (status == 0) {
/*
* Copy preamble data, IR data,
* and postamble data into a buffer
*/
altera_concatenate_data(js->ir_buffer,
js->ir_pre_data,
js->ir_pre,
in_data,
in_index,
count,
js->ir_post_data,
js->ir_post);
/* Do the IRSCAN */
alt_jtag_irscan(astate,
start_code,
shift_count,
js->ir_buffer,
js->ir_buffer);
/* alt_jtag_irscan() always ends in IRPAUSE state */
js->jtag_state = IRPAUSE;
}
if (status == 0)
if (js->irstop_state != IRPAUSE)
status = altera_goto_jstate(astate, js->irstop_state);
if (status == 0)
/* Now extract the returned data from the buffer */
altera_extract_target_data(js->ir_buffer,
out_data, out_index,
js->ir_pre, count);
return status;
}
int altera_drscan(struct altera_state *astate,
u32 count,
u8 *tdi_data,
u32 start_index)
/* Shifts data into data register (ignoring output data) */
{
struct altera_jtag *js = &astate->js;
int start_code = 0;
u32 alloc_chars = 0;
u32 shift_count = js->dr_pre + count + js->dr_post;
int status = 0;
enum altera_jtag_state start_state = ILLEGAL_JTAG_STATE;
switch (js->jtag_state) {
case ILLEGAL_JTAG_STATE:
case RESET:
case IDLE:
start_code = 0;
start_state = IDLE;
break;
case DRSELECT:
case DRCAPTURE:
case DRSHIFT:
case DREXIT1:
case DRPAUSE:
case DREXIT2:
case DRUPDATE:
start_code = 1;
start_state = DRPAUSE;
break;
case IRSELECT:
case IRCAPTURE:
case IRSHIFT:
case IREXIT1:
case IRPAUSE:
case IREXIT2:
case IRUPDATE:
start_code = 2;
start_state = IRPAUSE;
break;
default:
status = -EREMOTEIO;
break;
}
if (status == 0)
if (js->jtag_state != start_state)
status = altera_goto_jstate(astate, start_state);
if (status == 0) {
if (shift_count > js->dr_length) {
alloc_chars = (shift_count + 7) >> 3;
kfree(js->dr_buffer);
js->dr_buffer = (u8 *)alt_malloc(alloc_chars);
if (js->dr_buffer == NULL)
status = -ENOMEM;
else
js->dr_length = alloc_chars * 8;
}
}
if (status == 0) {
/*
* Copy preamble data, DR data,
* and postamble data into a buffer
*/
altera_concatenate_data(js->dr_buffer,
js->dr_pre_data,
js->dr_pre,
tdi_data,
start_index,
count,
js->dr_post_data,
js->dr_post);
/* Do the DRSCAN */
alt_jtag_drscan(astate, start_code, shift_count,
js->dr_buffer, NULL);
/* alt_jtag_drscan() always ends in DRPAUSE state */
js->jtag_state = DRPAUSE;
}
if (status == 0)
if (js->drstop_state != DRPAUSE)
status = altera_goto_jstate(astate, js->drstop_state);
return status;
}
int altera_swap_dr(struct altera_state *astate, u32 count,
u8 *in_data, u32 in_index,
u8 *out_data, u32 out_index)
/* Shifts data into data register, capturing output data */
{
struct altera_jtag *js = &astate->js;
int start_code = 0;
u32 alloc_chars = 0;
u32 shift_count = js->dr_pre + count + js->dr_post;
int status = 0;
enum altera_jtag_state start_state = ILLEGAL_JTAG_STATE;
switch (js->jtag_state) {
case ILLEGAL_JTAG_STATE:
case RESET:
case IDLE:
start_code = 0;
start_state = IDLE;
break;
case DRSELECT:
case DRCAPTURE:
case DRSHIFT:
case DREXIT1:
case DRPAUSE:
case DREXIT2:
case DRUPDATE:
start_code = 1;
start_state = DRPAUSE;
break;
case IRSELECT:
case IRCAPTURE:
case IRSHIFT:
case IREXIT1:
case IRPAUSE:
case IREXIT2:
case IRUPDATE:
start_code = 2;
start_state = IRPAUSE;
break;
default:
status = -EREMOTEIO;
break;
}
if (status == 0)
if (js->jtag_state != start_state)
status = altera_goto_jstate(astate, start_state);
if (status == 0) {
if (shift_count > js->dr_length) {
alloc_chars = (shift_count + 7) >> 3;
kfree(js->dr_buffer);
js->dr_buffer = (u8 *)alt_malloc(alloc_chars);
if (js->dr_buffer == NULL)
status = -ENOMEM;
else
js->dr_length = alloc_chars * 8;
}
}
if (status == 0) {
/*
* Copy preamble data, DR data,
* and postamble data into a buffer
*/
altera_concatenate_data(js->dr_buffer,
js->dr_pre_data,
js->dr_pre,
in_data,
in_index,
count,
js->dr_post_data,
js->dr_post);
/* Do the DRSCAN */
alt_jtag_drscan(astate,
start_code,
shift_count,
js->dr_buffer,
js->dr_buffer);
/* alt_jtag_drscan() always ends in DRPAUSE state */
js->jtag_state = DRPAUSE;
}
if (status == 0)
if (js->drstop_state != DRPAUSE)
status = altera_goto_jstate(astate, js->drstop_state);
if (status == 0)
/* Now extract the returned data from the buffer */
altera_extract_target_data(js->dr_buffer,
out_data,
out_index,
js->dr_pre,
count);
return status;
}
void altera_free_buffers(struct altera_state *astate)
{
struct altera_jtag *js = &astate->js;
/* If the JTAG interface was used, reset it to TLR */
if (js->jtag_state != ILLEGAL_JTAG_STATE)
altera_jreset_idle(astate);
kfree(js->dr_pre_data);
js->dr_pre_data = NULL;
kfree(js->dr_post_data);
js->dr_post_data = NULL;
kfree(js->dr_buffer);
js->dr_buffer = NULL;
kfree(js->ir_pre_data);
js->ir_pre_data = NULL;
kfree(js->ir_post_data);
js->ir_post_data = NULL;
kfree(js->ir_buffer);
js->ir_buffer = NULL;
}
| gpl-2.0 |
onexmaster/android_kernel_htc_endeavoru | drivers/pps/clients/pps-ktimer.c | 3041 | 2674 | /*
* pps-ktimer.c -- kernel timer test client
*
*
* Copyright (C) 2005-2006 Rodolfo Giometti <giometti@linux.it>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/time.h>
#include <linux/timer.h>
#include <linux/pps_kernel.h>
/*
* Global variables
*/
static struct pps_device *pps;
static struct timer_list ktimer;
/*
* The kernel timer
*/
static void pps_ktimer_event(unsigned long ptr)
{
struct pps_event_time ts;
/* First of all we get the time stamp... */
pps_get_ts(&ts);
pps_event(pps, &ts, PPS_CAPTUREASSERT, NULL);
mod_timer(&ktimer, jiffies + HZ);
}
/*
* The echo function
*/
static void pps_ktimer_echo(struct pps_device *pps, int event, void *data)
{
dev_info(pps->dev, "echo %s %s\n",
event & PPS_CAPTUREASSERT ? "assert" : "",
event & PPS_CAPTURECLEAR ? "clear" : "");
}
/*
* The PPS info struct
*/
static struct pps_source_info pps_ktimer_info = {
.name = "ktimer",
.path = "",
.mode = PPS_CAPTUREASSERT | PPS_OFFSETASSERT |
PPS_ECHOASSERT |
PPS_CANWAIT | PPS_TSFMT_TSPEC,
.echo = pps_ktimer_echo,
.owner = THIS_MODULE,
};
/*
* Module staff
*/
static void __exit pps_ktimer_exit(void)
{
dev_info(pps->dev, "ktimer PPS source unregistered\n");
del_timer_sync(&ktimer);
pps_unregister_source(pps);
}
static int __init pps_ktimer_init(void)
{
pps = pps_register_source(&pps_ktimer_info,
PPS_CAPTUREASSERT | PPS_OFFSETASSERT);
if (pps == NULL) {
pr_err("cannot register PPS source\n");
return -ENOMEM;
}
setup_timer(&ktimer, pps_ktimer_event, 0);
mod_timer(&ktimer, jiffies + HZ);
dev_info(pps->dev, "ktimer PPS source registered\n");
return 0;
}
module_init(pps_ktimer_init);
module_exit(pps_ktimer_exit);
MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
MODULE_DESCRIPTION("dummy PPS source by using a kernel timer (just for debug)");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Hogman500/ouya_1_1-kernel | drivers/acpi/acpica/evxface.c | 3041 | 28409 | /******************************************************************************
*
* Module Name: evxface - External interfaces for ACPI events
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
#include "acevents.h"
#include "acinterp.h"
#define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evxface")
/*******************************************************************************
*
* FUNCTION: acpi_install_exception_handler
*
* PARAMETERS: Handler - Pointer to the handler function for the
* event
*
* RETURN: Status
*
* DESCRIPTION: Saves the pointer to the handler function
*
******************************************************************************/
#ifdef ACPI_FUTURE_USAGE
acpi_status acpi_install_exception_handler(acpi_exception_handler handler)
{
acpi_status status;
ACPI_FUNCTION_TRACE(acpi_install_exception_handler);
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Don't allow two handlers. */
if (acpi_gbl_exception_handler) {
status = AE_ALREADY_EXISTS;
goto cleanup;
}
/* Install the handler */
acpi_gbl_exception_handler = handler;
cleanup:
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_install_exception_handler)
#endif /* ACPI_FUTURE_USAGE */
/*******************************************************************************
*
* FUNCTION: acpi_install_global_event_handler
*
* PARAMETERS: Handler - Pointer to the global event handler function
* Context - Value passed to the handler on each event
*
* RETURN: Status
*
* DESCRIPTION: Saves the pointer to the handler function. The global handler
* is invoked upon each incoming GPE and Fixed Event. It is
* invoked at interrupt level at the time of the event dispatch.
* Can be used to update event counters, etc.
*
******************************************************************************/
acpi_status
acpi_install_global_event_handler(ACPI_GBL_EVENT_HANDLER handler, void *context)
{
acpi_status status;
ACPI_FUNCTION_TRACE(acpi_install_global_event_handler);
/* Parameter validation */
if (!handler) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Don't allow two handlers. */
if (acpi_gbl_global_event_handler) {
status = AE_ALREADY_EXISTS;
goto cleanup;
}
acpi_gbl_global_event_handler = handler;
acpi_gbl_global_event_handler_context = context;
cleanup:
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_install_global_event_handler)
/*******************************************************************************
*
* FUNCTION: acpi_install_fixed_event_handler
*
* PARAMETERS: Event - Event type to enable.
* Handler - Pointer to the handler function for the
* event
* Context - Value passed to the handler on each GPE
*
* RETURN: Status
*
* DESCRIPTION: Saves the pointer to the handler function and then enables the
* event.
*
******************************************************************************/
acpi_status
acpi_install_fixed_event_handler(u32 event,
acpi_event_handler handler, void *context)
{
acpi_status status;
ACPI_FUNCTION_TRACE(acpi_install_fixed_event_handler);
/* Parameter validation */
if (event > ACPI_EVENT_MAX) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Don't allow two handlers. */
if (NULL != acpi_gbl_fixed_event_handlers[event].handler) {
status = AE_ALREADY_EXISTS;
goto cleanup;
}
/* Install the handler before enabling the event */
acpi_gbl_fixed_event_handlers[event].handler = handler;
acpi_gbl_fixed_event_handlers[event].context = context;
status = acpi_clear_event(event);
if (ACPI_SUCCESS(status))
status = acpi_enable_event(event, 0);
if (ACPI_FAILURE(status)) {
ACPI_WARNING((AE_INFO, "Could not enable fixed event 0x%X",
event));
/* Remove the handler */
acpi_gbl_fixed_event_handlers[event].handler = NULL;
acpi_gbl_fixed_event_handlers[event].context = NULL;
} else {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Enabled fixed event %X, Handler=%p\n", event,
handler));
}
cleanup:
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_install_fixed_event_handler)
/*******************************************************************************
*
* FUNCTION: acpi_remove_fixed_event_handler
*
* PARAMETERS: Event - Event type to disable.
* Handler - Address of the handler
*
* RETURN: Status
*
* DESCRIPTION: Disables the event and unregisters the event handler.
*
******************************************************************************/
acpi_status
acpi_remove_fixed_event_handler(u32 event, acpi_event_handler handler)
{
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE(acpi_remove_fixed_event_handler);
/* Parameter validation */
if (event > ACPI_EVENT_MAX) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Disable the event before removing the handler */
status = acpi_disable_event(event, 0);
/* Always Remove the handler */
acpi_gbl_fixed_event_handlers[event].handler = NULL;
acpi_gbl_fixed_event_handlers[event].context = NULL;
if (ACPI_FAILURE(status)) {
ACPI_WARNING((AE_INFO,
"Could not write to fixed event enable register 0x%X",
event));
} else {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Disabled fixed event %X\n",
event));
}
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_remove_fixed_event_handler)
/*******************************************************************************
*
* FUNCTION: acpi_populate_handler_object
*
* PARAMETERS: handler_obj - Handler object to populate
* handler_type - The type of handler:
* ACPI_SYSTEM_NOTIFY: system_handler (00-7f)
* ACPI_DEVICE_NOTIFY: driver_handler (80-ff)
* ACPI_ALL_NOTIFY: both system and device
* handler - Address of the handler
* context - Value passed to the handler on each GPE
* next - Address of a handler object to link to
*
* RETURN: None
*
* DESCRIPTION: Populate a handler object.
*
******************************************************************************/
static void
acpi_populate_handler_object(struct acpi_object_notify_handler *handler_obj,
u32 handler_type,
acpi_notify_handler handler, void *context,
struct acpi_object_notify_handler *next)
{
handler_obj->handler_type = handler_type;
handler_obj->handler = handler;
handler_obj->context = context;
handler_obj->next = next;
}
/*******************************************************************************
*
* FUNCTION: acpi_add_handler_object
*
* PARAMETERS: parent_obj - Parent of the new object
* handler - Address of the handler
* context - Value passed to the handler on each GPE
*
* RETURN: Status
*
* DESCRIPTION: Create a new handler object and populate it.
*
******************************************************************************/
static acpi_status
acpi_add_handler_object(struct acpi_object_notify_handler *parent_obj,
acpi_notify_handler handler, void *context)
{
struct acpi_object_notify_handler *handler_obj;
/* The parent must not be a defice notify handler object. */
if (parent_obj->handler_type & ACPI_DEVICE_NOTIFY)
return AE_BAD_PARAMETER;
handler_obj = ACPI_ALLOCATE_ZEROED(sizeof(*handler_obj));
if (!handler_obj)
return AE_NO_MEMORY;
acpi_populate_handler_object(handler_obj,
ACPI_SYSTEM_NOTIFY,
handler, context,
parent_obj->next);
parent_obj->next = handler_obj;
return AE_OK;
}
/*******************************************************************************
*
* FUNCTION: acpi_install_notify_handler
*
* PARAMETERS: Device - The device for which notifies will be handled
* handler_type - The type of handler:
* ACPI_SYSTEM_NOTIFY: system_handler (00-7f)
* ACPI_DEVICE_NOTIFY: driver_handler (80-ff)
* ACPI_ALL_NOTIFY: both system and device
* Handler - Address of the handler
* Context - Value passed to the handler on each GPE
*
* RETURN: Status
*
* DESCRIPTION: Install a handler for notifies on an ACPI device
*
******************************************************************************/
acpi_status
acpi_install_notify_handler(acpi_handle device,
u32 handler_type,
acpi_notify_handler handler, void *context)
{
union acpi_operand_object *obj_desc;
union acpi_operand_object *notify_obj;
struct acpi_namespace_node *node;
acpi_status status;
ACPI_FUNCTION_TRACE(acpi_install_notify_handler);
/* Parameter validation */
if ((!device) ||
(!handler) || (handler_type > ACPI_MAX_NOTIFY_HANDLER_TYPE)) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Convert and validate the device handle */
node = acpi_ns_validate_handle(device);
if (!node) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
}
/*
* Root Object:
* Registering a notify handler on the root object indicates that the
* caller wishes to receive notifications for all objects. Note that
* only one <external> global handler can be regsitered (per notify type).
*/
if (device == ACPI_ROOT_OBJECT) {
/* Make sure the handler is not already installed */
if (((handler_type & ACPI_SYSTEM_NOTIFY) &&
acpi_gbl_system_notify.handler) ||
((handler_type & ACPI_DEVICE_NOTIFY) &&
acpi_gbl_device_notify.handler)) {
status = AE_ALREADY_EXISTS;
goto unlock_and_exit;
}
if (handler_type & ACPI_SYSTEM_NOTIFY) {
acpi_gbl_system_notify.node = node;
acpi_gbl_system_notify.handler = handler;
acpi_gbl_system_notify.context = context;
}
if (handler_type & ACPI_DEVICE_NOTIFY) {
acpi_gbl_device_notify.node = node;
acpi_gbl_device_notify.handler = handler;
acpi_gbl_device_notify.context = context;
}
/* Global notify handler installed */
}
/*
* All Other Objects:
* Caller will only receive notifications specific to the target object.
* Note that only certain object types can receive notifications.
*/
else {
/* Notifies allowed on this object? */
if (!acpi_ev_is_notify_object(node)) {
status = AE_TYPE;
goto unlock_and_exit;
}
/* Check for an existing internal object */
obj_desc = acpi_ns_get_attached_object(node);
if (obj_desc) {
/* Object exists. */
/* For a device notify, make sure there's no handler. */
if ((handler_type & ACPI_DEVICE_NOTIFY) &&
obj_desc->common_notify.device_notify) {
status = AE_ALREADY_EXISTS;
goto unlock_and_exit;
}
/* System notifies may have more handlers installed. */
notify_obj = obj_desc->common_notify.system_notify;
if ((handler_type & ACPI_SYSTEM_NOTIFY) && notify_obj) {
struct acpi_object_notify_handler *parent_obj;
if (handler_type & ACPI_DEVICE_NOTIFY) {
status = AE_ALREADY_EXISTS;
goto unlock_and_exit;
}
parent_obj = ¬ify_obj->notify;
status = acpi_add_handler_object(parent_obj,
handler,
context);
goto unlock_and_exit;
}
} else {
/* Create a new object */
obj_desc = acpi_ut_create_internal_object(node->type);
if (!obj_desc) {
status = AE_NO_MEMORY;
goto unlock_and_exit;
}
/* Attach new object to the Node */
status =
acpi_ns_attach_object(device, obj_desc, node->type);
/* Remove local reference to the object */
acpi_ut_remove_reference(obj_desc);
if (ACPI_FAILURE(status)) {
goto unlock_and_exit;
}
}
/* Install the handler */
notify_obj =
acpi_ut_create_internal_object(ACPI_TYPE_LOCAL_NOTIFY);
if (!notify_obj) {
status = AE_NO_MEMORY;
goto unlock_and_exit;
}
acpi_populate_handler_object(¬ify_obj->notify,
handler_type,
handler, context,
NULL);
if (handler_type & ACPI_SYSTEM_NOTIFY) {
obj_desc->common_notify.system_notify = notify_obj;
}
if (handler_type & ACPI_DEVICE_NOTIFY) {
obj_desc->common_notify.device_notify = notify_obj;
}
if (handler_type == ACPI_ALL_NOTIFY) {
/* Extra ref if installed in both */
acpi_ut_add_reference(notify_obj);
}
}
unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_install_notify_handler)
/*******************************************************************************
*
* FUNCTION: acpi_remove_notify_handler
*
* PARAMETERS: Device - The device for which notifies will be handled
* handler_type - The type of handler:
* ACPI_SYSTEM_NOTIFY: system_handler (00-7f)
* ACPI_DEVICE_NOTIFY: driver_handler (80-ff)
* ACPI_ALL_NOTIFY: both system and device
* Handler - Address of the handler
*
* RETURN: Status
*
* DESCRIPTION: Remove a handler for notifies on an ACPI device
*
******************************************************************************/
acpi_status
acpi_remove_notify_handler(acpi_handle device,
u32 handler_type, acpi_notify_handler handler)
{
union acpi_operand_object *notify_obj;
union acpi_operand_object *obj_desc;
struct acpi_namespace_node *node;
acpi_status status;
ACPI_FUNCTION_TRACE(acpi_remove_notify_handler);
/* Parameter validation */
if ((!device) ||
(!handler) || (handler_type > ACPI_MAX_NOTIFY_HANDLER_TYPE)) {
status = AE_BAD_PARAMETER;
goto exit;
}
/* Make sure all deferred tasks are completed */
acpi_os_wait_events_complete(NULL);
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
goto exit;
}
/* Convert and validate the device handle */
node = acpi_ns_validate_handle(device);
if (!node) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
}
/* Root Object */
if (device == ACPI_ROOT_OBJECT) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Removing notify handler for namespace root object\n"));
if (((handler_type & ACPI_SYSTEM_NOTIFY) &&
!acpi_gbl_system_notify.handler) ||
((handler_type & ACPI_DEVICE_NOTIFY) &&
!acpi_gbl_device_notify.handler)) {
status = AE_NOT_EXIST;
goto unlock_and_exit;
}
if (handler_type & ACPI_SYSTEM_NOTIFY) {
acpi_gbl_system_notify.node = NULL;
acpi_gbl_system_notify.handler = NULL;
acpi_gbl_system_notify.context = NULL;
}
if (handler_type & ACPI_DEVICE_NOTIFY) {
acpi_gbl_device_notify.node = NULL;
acpi_gbl_device_notify.handler = NULL;
acpi_gbl_device_notify.context = NULL;
}
}
/* All Other Objects */
else {
/* Notifies allowed on this object? */
if (!acpi_ev_is_notify_object(node)) {
status = AE_TYPE;
goto unlock_and_exit;
}
/* Check for an existing internal object */
obj_desc = acpi_ns_get_attached_object(node);
if (!obj_desc) {
status = AE_NOT_EXIST;
goto unlock_and_exit;
}
/* Object exists - make sure there's an existing handler */
if (handler_type & ACPI_SYSTEM_NOTIFY) {
struct acpi_object_notify_handler *handler_obj;
struct acpi_object_notify_handler *parent_obj;
notify_obj = obj_desc->common_notify.system_notify;
if (!notify_obj) {
status = AE_NOT_EXIST;
goto unlock_and_exit;
}
handler_obj = ¬ify_obj->notify;
parent_obj = NULL;
while (handler_obj->handler != handler) {
if (handler_obj->next) {
parent_obj = handler_obj;
handler_obj = handler_obj->next;
} else {
break;
}
}
if (handler_obj->handler != handler) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
}
/*
* Remove the handler. There are three possible cases.
* First, we may need to remove a non-embedded object.
* Second, we may need to remove the embedded object's
* handler data, while non-embedded objects exist.
* Finally, we may need to remove the embedded object
* entirely along with its container.
*/
if (parent_obj) {
/* Non-embedded object is being removed. */
parent_obj->next = handler_obj->next;
ACPI_FREE(handler_obj);
} else if (notify_obj->notify.next) {
/*
* The handler matches the embedded object, but
* there are more handler objects in the list.
* Replace the embedded object's data with the
* first next object's data and remove that
* object.
*/
parent_obj = ¬ify_obj->notify;
handler_obj = notify_obj->notify.next;
*parent_obj = *handler_obj;
ACPI_FREE(handler_obj);
} else {
/* No more handler objects in the list. */
obj_desc->common_notify.system_notify = NULL;
acpi_ut_remove_reference(notify_obj);
}
}
if (handler_type & ACPI_DEVICE_NOTIFY) {
notify_obj = obj_desc->common_notify.device_notify;
if (!notify_obj) {
status = AE_NOT_EXIST;
goto unlock_and_exit;
}
if (notify_obj->notify.handler != handler) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
}
/* Remove the handler */
obj_desc->common_notify.device_notify = NULL;
acpi_ut_remove_reference(notify_obj);
}
}
unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
exit:
if (ACPI_FAILURE(status))
ACPI_EXCEPTION((AE_INFO, status, "Removing notify handler"));
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_remove_notify_handler)
/*******************************************************************************
*
* FUNCTION: acpi_install_gpe_handler
*
* PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT
* defined GPEs)
* gpe_number - The GPE number within the GPE block
* Type - Whether this GPE should be treated as an
* edge- or level-triggered interrupt.
* Address - Address of the handler
* Context - Value passed to the handler on each GPE
*
* RETURN: Status
*
* DESCRIPTION: Install a handler for a General Purpose Event.
*
******************************************************************************/
acpi_status
acpi_install_gpe_handler(acpi_handle gpe_device,
u32 gpe_number,
u32 type, acpi_gpe_handler address, void *context)
{
struct acpi_gpe_event_info *gpe_event_info;
struct acpi_gpe_handler_info *handler;
acpi_status status;
acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE(acpi_install_gpe_handler);
/* Parameter validation */
if ((!address) || (type & ~ACPI_GPE_XRUPT_TYPE_MASK)) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Allocate memory for the handler object */
handler = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_handler_info));
if (!handler) {
status = AE_NO_MEMORY;
goto unlock_and_exit;
}
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
/* Ensure that we have a valid GPE number */
gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
if (!gpe_event_info) {
status = AE_BAD_PARAMETER;
goto free_and_exit;
}
/* Make sure that there isn't a handler there already */
if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
ACPI_GPE_DISPATCH_HANDLER) {
status = AE_ALREADY_EXISTS;
goto free_and_exit;
}
/* Allocate and init handler object */
handler->address = address;
handler->context = context;
handler->method_node = gpe_event_info->dispatch.method_node;
handler->original_flags = gpe_event_info->flags &
(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK);
/*
* If the GPE is associated with a method, it might have been enabled
* automatically during initialization, in which case it has to be
* disabled now to avoid spurious execution of the handler.
*/
if ((handler->original_flags & ACPI_GPE_DISPATCH_METHOD)
&& gpe_event_info->runtime_count) {
handler->originally_enabled = 1;
(void)acpi_ev_remove_gpe_reference(gpe_event_info);
}
/* Install the handler */
gpe_event_info->dispatch.handler = handler;
/* Setup up dispatch flags to indicate handler (vs. method) */
gpe_event_info->flags &=
~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK);
gpe_event_info->flags |= (u8) (type | ACPI_GPE_DISPATCH_HANDLER);
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
free_and_exit:
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
ACPI_FREE(handler);
goto unlock_and_exit;
}
ACPI_EXPORT_SYMBOL(acpi_install_gpe_handler)
/*******************************************************************************
*
* FUNCTION: acpi_remove_gpe_handler
*
* PARAMETERS: gpe_device - Namespace node for the GPE (NULL for FADT
* defined GPEs)
* gpe_number - The event to remove a handler
* Address - Address of the handler
*
* RETURN: Status
*
* DESCRIPTION: Remove a handler for a General Purpose acpi_event.
*
******************************************************************************/
acpi_status
acpi_remove_gpe_handler(acpi_handle gpe_device,
u32 gpe_number, acpi_gpe_handler address)
{
struct acpi_gpe_event_info *gpe_event_info;
struct acpi_gpe_handler_info *handler;
acpi_status status;
acpi_cpu_flags flags;
ACPI_FUNCTION_TRACE(acpi_remove_gpe_handler);
/* Parameter validation */
if (!address) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
/* Make sure all deferred tasks are completed */
acpi_os_wait_events_complete(NULL);
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
/* Ensure that we have a valid GPE number */
gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
if (!gpe_event_info) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
}
/* Make sure that a handler is indeed installed */
if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) !=
ACPI_GPE_DISPATCH_HANDLER) {
status = AE_NOT_EXIST;
goto unlock_and_exit;
}
/* Make sure that the installed handler is the same */
if (gpe_event_info->dispatch.handler->address != address) {
status = AE_BAD_PARAMETER;
goto unlock_and_exit;
}
/* Remove the handler */
handler = gpe_event_info->dispatch.handler;
/* Restore Method node (if any), set dispatch flags */
gpe_event_info->dispatch.method_node = handler->method_node;
gpe_event_info->flags &=
~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK);
gpe_event_info->flags |= handler->original_flags;
/*
* If the GPE was previously associated with a method and it was
* enabled, it should be enabled at this point to restore the
* post-initialization configuration.
*/
if ((handler->original_flags & ACPI_GPE_DISPATCH_METHOD)
&& handler->originally_enabled)
(void)acpi_ev_add_gpe_reference(gpe_event_info);
/* Now we can free the handler object */
ACPI_FREE(handler);
unlock_and_exit:
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
(void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_ACPI_STATUS(status);
}
ACPI_EXPORT_SYMBOL(acpi_remove_gpe_handler)
/*******************************************************************************
*
* FUNCTION: acpi_acquire_global_lock
*
* PARAMETERS: Timeout - How long the caller is willing to wait
* Handle - Where the handle to the lock is returned
* (if acquired)
*
* RETURN: Status
*
* DESCRIPTION: Acquire the ACPI Global Lock
*
* Note: Allows callers with the same thread ID to acquire the global lock
* multiple times. In other words, externally, the behavior of the global lock
* is identical to an AML mutex. On the first acquire, a new handle is
* returned. On any subsequent calls to acquire by the same thread, the same
* handle is returned.
*
******************************************************************************/
acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle)
{
acpi_status status;
if (!handle) {
return (AE_BAD_PARAMETER);
}
/* Must lock interpreter to prevent race conditions */
acpi_ex_enter_interpreter();
status = acpi_ex_acquire_mutex_object(timeout,
acpi_gbl_global_lock_mutex,
acpi_os_get_thread_id());
if (ACPI_SUCCESS(status)) {
/* Return the global lock handle (updated in acpi_ev_acquire_global_lock) */
*handle = acpi_gbl_global_lock_handle;
}
acpi_ex_exit_interpreter();
return (status);
}
ACPI_EXPORT_SYMBOL(acpi_acquire_global_lock)
/*******************************************************************************
*
* FUNCTION: acpi_release_global_lock
*
* PARAMETERS: Handle - Returned from acpi_acquire_global_lock
*
* RETURN: Status
*
* DESCRIPTION: Release the ACPI Global Lock. The handle must be valid.
*
******************************************************************************/
acpi_status acpi_release_global_lock(u32 handle)
{
acpi_status status;
if (!handle || (handle != acpi_gbl_global_lock_handle)) {
return (AE_NOT_ACQUIRED);
}
status = acpi_ex_release_mutex_object(acpi_gbl_global_lock_mutex);
return (status);
}
ACPI_EXPORT_SYMBOL(acpi_release_global_lock)
| gpl-2.0 |
vaginessa/Samsung_STE_Kernel | drivers/net/wireless/bcm4329/bcmsdh_sdmmc.c | 3809 | 34854 | /*
* BCMSDH Function Driver for the native SDIO/MMC driver in the Linux Kernel
*
* Copyright (C) 1999-2010, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
*
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* you also meet, for each linked independent module, the terms and conditions of
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
*
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* $Id: bcmsdh_sdmmc.c,v 1.1.2.5.6.30.4.1 2010/09/02 23:12:21 Exp $
*/
#include <typedefs.h>
#include <bcmdevs.h>
#include <bcmendian.h>
#include <bcmutils.h>
#include <osl.h>
#include <sdio.h> /* SDIO Device and Protocol Specs */
#include <sdioh.h> /* SDIO Host Controller Specification */
#include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */
#include <sdiovar.h> /* ioctl/iovars */
#include <linux/mmc/core.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
#include <dngl_stats.h>
#include <dhd.h>
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && defined(CONFIG_PM_SLEEP)
#include <linux/suspend.h>
extern volatile bool dhd_mmc_suspend;
#endif
#include "bcmsdh_sdmmc.h"
#ifndef BCMSDH_MODULE
extern int sdio_function_init(void);
extern void sdio_function_cleanup(void);
#endif /* BCMSDH_MODULE */
#if !defined(OOB_INTR_ONLY)
static void IRQHandler(struct sdio_func *func);
static void IRQHandlerF2(struct sdio_func *func);
#endif /* !defined(OOB_INTR_ONLY) */
static int sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr);
extern int sdio_reset_comm(struct mmc_card *card);
extern PBCMSDH_SDMMC_INSTANCE gInstance;
uint sd_sdmode = SDIOH_MODE_SD4; /* Use SD4 mode by default */
uint sd_f2_blocksize = 512; /* Default blocksize */
uint sd_divisor = 2; /* Default 48MHz/2 = 24MHz */
uint sd_power = 1; /* Default to SD Slot powered ON */
uint sd_clock = 1; /* Default to SD Clock turned ON */
uint sd_hiok = FALSE; /* Don't use hi-speed mode by default */
uint sd_msglevel = 0x01;
uint sd_use_dma = TRUE;
DHD_PM_RESUME_WAIT_INIT(sdioh_request_byte_wait);
DHD_PM_RESUME_WAIT_INIT(sdioh_request_word_wait);
DHD_PM_RESUME_WAIT_INIT(sdioh_request_packet_wait);
DHD_PM_RESUME_WAIT_INIT(sdioh_request_buffer_wait);
#define DMA_ALIGN_MASK 0x03
int sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data);
static int
sdioh_sdmmc_card_enablefuncs(sdioh_info_t *sd)
{
int err_ret;
uint32 fbraddr;
uint8 func;
sd_trace(("%s\n", __FUNCTION__));
/* Get the Card's common CIS address */
sd->com_cis_ptr = sdioh_sdmmc_get_cisaddr(sd, SDIOD_CCCR_CISPTR_0);
sd->func_cis_ptr[0] = sd->com_cis_ptr;
sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
/* Get the Card's function CIS (for each function) */
for (fbraddr = SDIOD_FBR_STARTADDR, func = 1;
func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
sd->func_cis_ptr[func] = sdioh_sdmmc_get_cisaddr(sd, SDIOD_FBR_CISPTR_0 + fbraddr);
sd_info(("%s: Function %d CIS Ptr = 0x%x\n",
__FUNCTION__, func, sd->func_cis_ptr[func]));
}
sd->func_cis_ptr[0] = sd->com_cis_ptr;
sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
/* Enable Function 1 */
sdio_claim_host(gInstance->func[1]);
err_ret = sdio_enable_func(gInstance->func[1]);
sdio_release_host(gInstance->func[1]);
if (err_ret) {
sd_err(("bcmsdh_sdmmc: Failed to enable F1 Err: 0x%08x", err_ret));
}
return FALSE;
}
/*
* Public entry points & extern's
*/
extern sdioh_info_t *
sdioh_attach(osl_t *osh, void *bar0, uint irq)
{
sdioh_info_t *sd;
int err_ret;
sd_trace(("%s\n", __FUNCTION__));
if (gInstance == NULL) {
sd_err(("%s: SDIO Device not present\n", __FUNCTION__));
return NULL;
}
if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) {
sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
return NULL;
}
bzero((char *)sd, sizeof(sdioh_info_t));
sd->osh = osh;
if (sdioh_sdmmc_osinit(sd) != 0) {
sd_err(("%s:sdioh_sdmmc_osinit() failed\n", __FUNCTION__));
MFREE(sd->osh, sd, sizeof(sdioh_info_t));
return NULL;
}
sd->num_funcs = 2;
sd->sd_blockmode = TRUE;
sd->use_client_ints = TRUE;
sd->client_block_size[0] = 64;
gInstance->sd = sd;
/* Claim host controller */
sdio_claim_host(gInstance->func[1]);
sd->client_block_size[1] = 64;
err_ret = sdio_set_block_size(gInstance->func[1], 64);
if (err_ret) {
sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize\n"));
}
/* Release host controller F1 */
sdio_release_host(gInstance->func[1]);
if (gInstance->func[2]) {
/* Claim host controller F2 */
sdio_claim_host(gInstance->func[2]);
sd->client_block_size[2] = sd_f2_blocksize;
err_ret = sdio_set_block_size(gInstance->func[2], sd_f2_blocksize);
if (err_ret) {
sd_err(("bcmsdh_sdmmc: Failed to set F2 blocksize to %d\n",
sd_f2_blocksize));
}
/* Release host controller F2 */
sdio_release_host(gInstance->func[2]);
}
sdioh_sdmmc_card_enablefuncs(sd);
sd_trace(("%s: Done\n", __FUNCTION__));
return sd;
}
extern SDIOH_API_RC
sdioh_detach(osl_t *osh, sdioh_info_t *sd)
{
sd_trace(("%s\n", __FUNCTION__));
if (sd) {
/* Disable Function 2 */
sdio_claim_host(gInstance->func[2]);
sdio_disable_func(gInstance->func[2]);
sdio_release_host(gInstance->func[2]);
/* Disable Function 1 */
sdio_claim_host(gInstance->func[1]);
sdio_disable_func(gInstance->func[1]);
sdio_release_host(gInstance->func[1]);
/* deregister irq */
sdioh_sdmmc_osfree(sd);
MFREE(sd->osh, sd, sizeof(sdioh_info_t));
}
return SDIOH_API_RC_SUCCESS;
}
#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
extern SDIOH_API_RC
sdioh_enable_func_intr(void)
{
uint8 reg;
int err;
if (gInstance->func[0]) {
sdio_claim_host(gInstance->func[0]);
reg = sdio_readb(gInstance->func[0], SDIOD_CCCR_INTEN, &err);
if (err) {
sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
sdio_release_host(gInstance->func[0]);
return SDIOH_API_RC_FAIL;
}
/* Enable F1 and F2 interrupts, set master enable */
reg |= (INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN | INTR_CTL_MASTER_EN);
sdio_writeb(gInstance->func[0], reg, SDIOD_CCCR_INTEN, &err);
sdio_release_host(gInstance->func[0]);
if (err) {
sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
return SDIOH_API_RC_FAIL;
}
}
return SDIOH_API_RC_SUCCESS;
}
extern SDIOH_API_RC
sdioh_disable_func_intr(void)
{
uint8 reg;
int err;
if (gInstance->func[0]) {
sdio_claim_host(gInstance->func[0]);
reg = sdio_readb(gInstance->func[0], SDIOD_CCCR_INTEN, &err);
if (err) {
sd_err(("%s: error for read SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
sdio_release_host(gInstance->func[0]);
return SDIOH_API_RC_FAIL;
}
reg &= ~(INTR_CTL_FUNC1_EN | INTR_CTL_FUNC2_EN);
/* Disable master interrupt with the last function interrupt */
if (!(reg & 0xFE))
reg = 0;
sdio_writeb(gInstance->func[0], reg, SDIOD_CCCR_INTEN, &err);
sdio_release_host(gInstance->func[0]);
if (err) {
sd_err(("%s: error for write SDIO_CCCR_IENx : 0x%x\n", __FUNCTION__, err));
return SDIOH_API_RC_FAIL;
}
}
return SDIOH_API_RC_SUCCESS;
}
#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
/* Configure callback to client when we recieve client interrupt */
extern SDIOH_API_RC
sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh)
{
sd_trace(("%s: Entering\n", __FUNCTION__));
if (fn == NULL) {
sd_err(("%s: interrupt handler is NULL, not registering\n", __FUNCTION__));
return SDIOH_API_RC_FAIL;
}
#if !defined(OOB_INTR_ONLY)
sd->intr_handler = fn;
sd->intr_handler_arg = argh;
sd->intr_handler_valid = TRUE;
/* register and unmask irq */
if (gInstance->func[2]) {
sdio_claim_host(gInstance->func[2]);
sdio_claim_irq(gInstance->func[2], IRQHandlerF2);
sdio_release_host(gInstance->func[2]);
}
if (gInstance->func[1]) {
sdio_claim_host(gInstance->func[1]);
sdio_claim_irq(gInstance->func[1], IRQHandler);
sdio_release_host(gInstance->func[1]);
}
#elif defined(HW_OOB)
sdioh_enable_func_intr();
#endif /* defined(OOB_INTR_ONLY) */
return SDIOH_API_RC_SUCCESS;
}
extern SDIOH_API_RC
sdioh_interrupt_deregister(sdioh_info_t *sd)
{
sd_trace(("%s: Entering\n", __FUNCTION__));
#if !defined(OOB_INTR_ONLY)
if (gInstance->func[1]) {
/* register and unmask irq */
sdio_claim_host(gInstance->func[1]);
sdio_release_irq(gInstance->func[1]);
sdio_release_host(gInstance->func[1]);
}
if (gInstance->func[2]) {
/* Claim host controller F2 */
sdio_claim_host(gInstance->func[2]);
sdio_release_irq(gInstance->func[2]);
/* Release host controller F2 */
sdio_release_host(gInstance->func[2]);
}
sd->intr_handler_valid = FALSE;
sd->intr_handler = NULL;
sd->intr_handler_arg = NULL;
#elif defined(HW_OOB)
sdioh_disable_func_intr();
#endif /* !defined(OOB_INTR_ONLY) */
return SDIOH_API_RC_SUCCESS;
}
extern SDIOH_API_RC
sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff)
{
sd_trace(("%s: Entering\n", __FUNCTION__));
*onoff = sd->client_intr_enabled;
return SDIOH_API_RC_SUCCESS;
}
#if defined(DHD_DEBUG)
extern bool
sdioh_interrupt_pending(sdioh_info_t *sd)
{
return (0);
}
#endif
uint
sdioh_query_iofnum(sdioh_info_t *sd)
{
return sd->num_funcs;
}
/* IOVar table */
enum {
IOV_MSGLEVEL = 1,
IOV_BLOCKMODE,
IOV_BLOCKSIZE,
IOV_DMA,
IOV_USEINTS,
IOV_NUMINTS,
IOV_NUMLOCALINTS,
IOV_HOSTREG,
IOV_DEVREG,
IOV_DIVISOR,
IOV_SDMODE,
IOV_HISPEED,
IOV_HCIREGS,
IOV_POWER,
IOV_CLOCK,
IOV_RXCHAIN
};
const bcm_iovar_t sdioh_iovars[] = {
{"sd_msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 },
{"sd_blockmode", IOV_BLOCKMODE, 0, IOVT_BOOL, 0 },
{"sd_blocksize", IOV_BLOCKSIZE, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */
{"sd_dma", IOV_DMA, 0, IOVT_BOOL, 0 },
{"sd_ints", IOV_USEINTS, 0, IOVT_BOOL, 0 },
{"sd_numints", IOV_NUMINTS, 0, IOVT_UINT32, 0 },
{"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32, 0 },
{"sd_hostreg", IOV_HOSTREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
{"sd_devreg", IOV_DEVREG, 0, IOVT_BUFFER, sizeof(sdreg_t) },
{"sd_divisor", IOV_DIVISOR, 0, IOVT_UINT32, 0 },
{"sd_power", IOV_POWER, 0, IOVT_UINT32, 0 },
{"sd_clock", IOV_CLOCK, 0, IOVT_UINT32, 0 },
{"sd_mode", IOV_SDMODE, 0, IOVT_UINT32, 100},
{"sd_highspeed", IOV_HISPEED, 0, IOVT_UINT32, 0 },
{"sd_rxchain", IOV_RXCHAIN, 0, IOVT_BOOL, 0 },
{NULL, 0, 0, 0, 0 }
};
int
sdioh_iovar_op(sdioh_info_t *si, const char *name,
void *params, int plen, void *arg, int len, bool set)
{
const bcm_iovar_t *vi = NULL;
int bcmerror = 0;
int val_size;
int32 int_val = 0;
bool bool_val;
uint32 actionid;
ASSERT(name);
ASSERT(len >= 0);
/* Get must have return space; Set does not take qualifiers */
ASSERT(set || (arg && len));
ASSERT(!set || (!params && !plen));
sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name));
if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) {
bcmerror = BCME_UNSUPPORTED;
goto exit;
}
if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0)
goto exit;
/* Set up params so get and set can share the convenience variables */
if (params == NULL) {
params = arg;
plen = len;
}
if (vi->type == IOVT_VOID)
val_size = 0;
else if (vi->type == IOVT_BUFFER)
val_size = len;
else
val_size = sizeof(int);
if (plen >= (int)sizeof(int_val))
bcopy(params, &int_val, sizeof(int_val));
bool_val = (int_val != 0) ? TRUE : FALSE;
actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
switch (actionid) {
case IOV_GVAL(IOV_MSGLEVEL):
int_val = (int32)sd_msglevel;
bcopy(&int_val, arg, val_size);
break;
case IOV_SVAL(IOV_MSGLEVEL):
sd_msglevel = int_val;
break;
case IOV_GVAL(IOV_BLOCKMODE):
int_val = (int32)si->sd_blockmode;
bcopy(&int_val, arg, val_size);
break;
case IOV_SVAL(IOV_BLOCKMODE):
si->sd_blockmode = (bool)int_val;
/* Haven't figured out how to make non-block mode with DMA */
break;
case IOV_GVAL(IOV_BLOCKSIZE):
if ((uint32)int_val > si->num_funcs) {
bcmerror = BCME_BADARG;
break;
}
int_val = (int32)si->client_block_size[int_val];
bcopy(&int_val, arg, val_size);
break;
case IOV_SVAL(IOV_BLOCKSIZE):
{
uint func = ((uint32)int_val >> 16);
uint blksize = (uint16)int_val;
uint maxsize;
if (func > si->num_funcs) {
bcmerror = BCME_BADARG;
break;
}
switch (func) {
case 0: maxsize = 32; break;
case 1: maxsize = BLOCK_SIZE_4318; break;
case 2: maxsize = BLOCK_SIZE_4328; break;
default: maxsize = 0;
}
if (blksize > maxsize) {
bcmerror = BCME_BADARG;
break;
}
if (!blksize) {
blksize = maxsize;
}
/* Now set it */
si->client_block_size[func] = blksize;
break;
}
case IOV_GVAL(IOV_RXCHAIN):
int_val = FALSE;
bcopy(&int_val, arg, val_size);
break;
case IOV_GVAL(IOV_DMA):
int_val = (int32)si->sd_use_dma;
bcopy(&int_val, arg, val_size);
break;
case IOV_SVAL(IOV_DMA):
si->sd_use_dma = (bool)int_val;
break;
case IOV_GVAL(IOV_USEINTS):
int_val = (int32)si->use_client_ints;
bcopy(&int_val, arg, val_size);
break;
case IOV_SVAL(IOV_USEINTS):
si->use_client_ints = (bool)int_val;
if (si->use_client_ints)
si->intmask |= CLIENT_INTR;
else
si->intmask &= ~CLIENT_INTR;
break;
case IOV_GVAL(IOV_DIVISOR):
int_val = (uint32)sd_divisor;
bcopy(&int_val, arg, val_size);
break;
case IOV_SVAL(IOV_DIVISOR):
sd_divisor = int_val;
break;
case IOV_GVAL(IOV_POWER):
int_val = (uint32)sd_power;
bcopy(&int_val, arg, val_size);
break;
case IOV_SVAL(IOV_POWER):
sd_power = int_val;
break;
case IOV_GVAL(IOV_CLOCK):
int_val = (uint32)sd_clock;
bcopy(&int_val, arg, val_size);
break;
case IOV_SVAL(IOV_CLOCK):
sd_clock = int_val;
break;
case IOV_GVAL(IOV_SDMODE):
int_val = (uint32)sd_sdmode;
bcopy(&int_val, arg, val_size);
break;
case IOV_SVAL(IOV_SDMODE):
sd_sdmode = int_val;
break;
case IOV_GVAL(IOV_HISPEED):
int_val = (uint32)sd_hiok;
bcopy(&int_val, arg, val_size);
break;
case IOV_SVAL(IOV_HISPEED):
sd_hiok = int_val;
break;
case IOV_GVAL(IOV_NUMINTS):
int_val = (int32)si->intrcount;
bcopy(&int_val, arg, val_size);
break;
case IOV_GVAL(IOV_NUMLOCALINTS):
int_val = (int32)0;
bcopy(&int_val, arg, val_size);
break;
case IOV_GVAL(IOV_HOSTREG):
{
sdreg_t *sd_ptr = (sdreg_t *)params;
if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) {
sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
bcmerror = BCME_BADARG;
break;
}
sd_trace(("%s: rreg%d at offset %d\n", __FUNCTION__,
(sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
sd_ptr->offset));
if (sd_ptr->offset & 1)
int_val = 8; /* sdioh_sdmmc_rreg8(si, sd_ptr->offset); */
else if (sd_ptr->offset & 2)
int_val = 16; /* sdioh_sdmmc_rreg16(si, sd_ptr->offset); */
else
int_val = 32; /* sdioh_sdmmc_rreg(si, sd_ptr->offset); */
bcopy(&int_val, arg, sizeof(int_val));
break;
}
case IOV_SVAL(IOV_HOSTREG):
{
sdreg_t *sd_ptr = (sdreg_t *)params;
if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) {
sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
bcmerror = BCME_BADARG;
break;
}
sd_trace(("%s: wreg%d value 0x%08x at offset %d\n", __FUNCTION__, sd_ptr->value,
(sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
sd_ptr->offset));
break;
}
case IOV_GVAL(IOV_DEVREG):
{
sdreg_t *sd_ptr = (sdreg_t *)params;
uint8 data = 0;
if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) {
bcmerror = BCME_SDIO_ERROR;
break;
}
int_val = (int)data;
bcopy(&int_val, arg, sizeof(int_val));
break;
}
case IOV_SVAL(IOV_DEVREG):
{
sdreg_t *sd_ptr = (sdreg_t *)params;
uint8 data = (uint8)sd_ptr->value;
if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) {
bcmerror = BCME_SDIO_ERROR;
break;
}
break;
}
default:
bcmerror = BCME_UNSUPPORTED;
break;
}
exit:
return bcmerror;
}
#if defined(OOB_INTR_ONLY) && defined(HW_OOB)
SDIOH_API_RC
sdioh_enable_hw_oob_intr(sdioh_info_t *sd, bool enable)
{
SDIOH_API_RC status;
uint8 data;
if (enable)
data = 3; /* enable hw oob interrupt */
else
data = 4; /* disable hw oob interrupt */
data |= 4; /* Active HIGH */
status = sdioh_request_byte(sd, SDIOH_WRITE, 0, 0xf2, &data);
return status;
}
#endif /* defined(OOB_INTR_ONLY) && defined(HW_OOB) */
extern SDIOH_API_RC
sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
{
SDIOH_API_RC status;
/* No lock needed since sdioh_request_byte does locking */
status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
return status;
}
extern SDIOH_API_RC
sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
{
/* No lock needed since sdioh_request_byte does locking */
SDIOH_API_RC status;
status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
return status;
}
static int
sdioh_sdmmc_get_cisaddr(sdioh_info_t *sd, uint32 regaddr)
{
/* read 24 bits and return valid 17 bit addr */
int i;
uint32 scratch, regdata;
uint8 *ptr = (uint8 *)&scratch;
for (i = 0; i < 3; i++) {
if ((sdioh_sdmmc_card_regread (sd, 0, regaddr, 1, ®data)) != SUCCESS)
sd_err(("%s: Can't read!\n", __FUNCTION__));
*ptr++ = (uint8) regdata;
regaddr++;
}
/* Only the lower 17-bits are valid */
scratch = ltoh32(scratch);
scratch &= 0x0001FFFF;
return (scratch);
}
extern SDIOH_API_RC
sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length)
{
uint32 count;
int offset;
uint32 foo;
uint8 *cis = cisd;
sd_trace(("%s: Func = %d\n", __FUNCTION__, func));
if (!sd->func_cis_ptr[func]) {
bzero(cis, length);
sd_err(("%s: no func_cis_ptr[%d]\n", __FUNCTION__, func));
return SDIOH_API_RC_FAIL;
}
sd_err(("%s: func_cis_ptr[%d]=0x%04x\n", __FUNCTION__, func, sd->func_cis_ptr[func]));
for (count = 0; count < length; count++) {
offset = sd->func_cis_ptr[func] + count;
if (sdioh_sdmmc_card_regread (sd, 0, offset, 1, &foo) < 0) {
sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
return SDIOH_API_RC_FAIL;
}
*cis = (uint8)(foo & 0xff);
cis++;
}
return SDIOH_API_RC_SUCCESS;
}
extern SDIOH_API_RC
sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte)
{
int err_ret;
sd_info(("%s: rw=%d, func=%d, addr=0x%05x\n", __FUNCTION__, rw, func, regaddr));
DHD_PM_RESUME_WAIT(sdioh_request_byte_wait);
DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
if(rw) { /* CMD52 Write */
if (func == 0) {
/* Can only directly write to some F0 registers. Handle F2 enable
* as a special case.
*/
if (regaddr == SDIOD_CCCR_IOEN) {
if (gInstance->func[2]) {
sdio_claim_host(gInstance->func[2]);
if (*byte & SDIO_FUNC_ENABLE_2) {
/* Enable Function 2 */
err_ret = sdio_enable_func(gInstance->func[2]);
if (err_ret) {
sd_err(("bcmsdh_sdmmc: enable F2 failed:%d",
err_ret));
}
} else {
/* Disable Function 2 */
err_ret = sdio_disable_func(gInstance->func[2]);
if (err_ret) {
sd_err(("bcmsdh_sdmmc: Disab F2 failed:%d",
err_ret));
}
}
sdio_release_host(gInstance->func[2]);
}
}
#if defined(MMC_SDIO_ABORT)
/* to allow abort command through F1 */
else if (regaddr == SDIOD_CCCR_IOABORT) {
sdio_claim_host(gInstance->func[func]);
/*
* this sdio_f0_writeb() can be replaced with another api
* depending upon MMC driver change.
* As of this time, this is temporaray one
*/
sdio_writeb(gInstance->func[func], *byte, regaddr, &err_ret);
sdio_release_host(gInstance->func[func]);
}
#endif /* MMC_SDIO_ABORT */
else if (regaddr < 0xF0) {
sd_err(("bcmsdh_sdmmc: F0 Wr:0x%02x: write disallowed\n", regaddr));
} else {
/* Claim host controller, perform F0 write, and release */
sdio_claim_host(gInstance->func[func]);
sdio_f0_writeb(gInstance->func[func], *byte, regaddr, &err_ret);
sdio_release_host(gInstance->func[func]);
}
} else {
/* Claim host controller, perform Fn write, and release */
sdio_claim_host(gInstance->func[func]);
sdio_writeb(gInstance->func[func], *byte, regaddr, &err_ret);
sdio_release_host(gInstance->func[func]);
}
} else { /* CMD52 Read */
/* Claim host controller, perform Fn read, and release */
sdio_claim_host(gInstance->func[func]);
if (func == 0) {
*byte = sdio_f0_readb(gInstance->func[func], regaddr, &err_ret);
} else {
*byte = sdio_readb(gInstance->func[func], regaddr, &err_ret);
}
sdio_release_host(gInstance->func[func]);
}
if (err_ret) {
sd_err(("bcmsdh_sdmmc: Failed to %s byte F%d:@0x%05x=%02x, Err: %d\n",
rw ? "Write" : "Read", func, regaddr, *byte, err_ret));
}
return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
}
extern SDIOH_API_RC
sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr,
uint32 *word, uint nbytes)
{
int err_ret = SDIOH_API_RC_FAIL;
if (func == 0) {
sd_err(("%s: Only CMD52 allowed to F0.\n", __FUNCTION__));
return SDIOH_API_RC_FAIL;
}
sd_info(("%s: cmd_type=%d, rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
__FUNCTION__, cmd_type, rw, func, addr, nbytes));
DHD_PM_RESUME_WAIT(sdioh_request_word_wait);
DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
/* Claim host controller */
sdio_claim_host(gInstance->func[func]);
if(rw) { /* CMD52 Write */
if (nbytes == 4) {
sdio_writel(gInstance->func[func], *word, addr, &err_ret);
} else if (nbytes == 2) {
sdio_writew(gInstance->func[func], (*word & 0xFFFF), addr, &err_ret);
} else {
sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
}
} else { /* CMD52 Read */
if (nbytes == 4) {
*word = sdio_readl(gInstance->func[func], addr, &err_ret);
} else if (nbytes == 2) {
*word = sdio_readw(gInstance->func[func], addr, &err_ret) & 0xFFFF;
} else {
sd_err(("%s: Invalid nbytes: %d\n", __FUNCTION__, nbytes));
}
}
/* Release host controller */
sdio_release_host(gInstance->func[func]);
if (err_ret) {
sd_err(("bcmsdh_sdmmc: Failed to %s word, Err: 0x%08x",
rw ? "Write" : "Read", err_ret));
}
return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
}
static SDIOH_API_RC
sdioh_request_packet(sdioh_info_t *sd, uint fix_inc, uint write, uint func,
uint addr, void *pkt)
{
bool fifo = (fix_inc == SDIOH_DATA_FIX);
uint32 SGCount = 0;
int err_ret = 0;
void *pnext;
sd_trace(("%s: Enter\n", __FUNCTION__));
ASSERT(pkt);
DHD_PM_RESUME_WAIT(sdioh_request_packet_wait);
DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
/* Claim host controller */
sdio_claim_host(gInstance->func[func]);
for (pnext = pkt; pnext; pnext = PKTNEXT(sd->osh, pnext)) {
uint pkt_len = PKTLEN(sd->osh, pnext);
pkt_len += 3;
pkt_len &= 0xFFFFFFFC;
#ifdef CONFIG_MMC_MSM7X00A
if ((pkt_len % 64) == 32) {
sd_trace(("%s: Rounding up TX packet +=32\n", __FUNCTION__));
pkt_len += 32;
}
#endif /* CONFIG_MMC_MSM7X00A */
/* Make sure the packet is aligned properly. If it isn't, then this
* is the fault of sdioh_request_buffer() which is supposed to give
* us something we can work with.
*/
ASSERT(((uint32)(PKTDATA(sd->osh, pkt)) & DMA_ALIGN_MASK) == 0);
if ((write) && (!fifo)) {
err_ret = sdio_memcpy_toio(gInstance->func[func], addr,
((uint8*)PKTDATA(sd->osh, pnext)),
pkt_len);
} else if (write) {
err_ret = sdio_memcpy_toio(gInstance->func[func], addr,
((uint8*)PKTDATA(sd->osh, pnext)),
pkt_len);
} else if (fifo) {
err_ret = sdio_readsb(gInstance->func[func],
((uint8*)PKTDATA(sd->osh, pnext)),
addr,
pkt_len);
} else {
err_ret = sdio_memcpy_fromio(gInstance->func[func],
((uint8*)PKTDATA(sd->osh, pnext)),
addr,
pkt_len);
}
if (err_ret) {
sd_err(("%s: %s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
__FUNCTION__,
(write) ? "TX" : "RX",
pnext, SGCount, addr, pkt_len, err_ret));
} else {
sd_trace(("%s: %s xfr'd %p[%d], addr=0x%05x, len=%d\n",
__FUNCTION__,
(write) ? "TX" : "RX",
pnext, SGCount, addr, pkt_len));
}
if (!fifo) {
addr += pkt_len;
}
SGCount ++;
}
/* Release host controller */
sdio_release_host(gInstance->func[func]);
sd_trace(("%s: Exit\n", __FUNCTION__));
return ((err_ret == 0) ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
}
/*
* This function takes a buffer or packet, and fixes everything up so that in the
* end, a DMA-able packet is created.
*
* A buffer does not have an associated packet pointer, and may or may not be aligned.
* A packet may consist of a single packet, or a packet chain. If it is a packet chain,
* then all the packets in the chain must be properly aligned. If the packet data is not
* aligned, then there may only be one packet, and in this case, it is copied to a new
* aligned packet.
*
*/
extern SDIOH_API_RC
sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint write, uint func,
uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt)
{
SDIOH_API_RC Status;
void *mypkt = NULL;
sd_trace(("%s: Enter\n", __FUNCTION__));
DHD_PM_RESUME_WAIT(sdioh_request_buffer_wait);
DHD_PM_RESUME_RETURN_ERROR(SDIOH_API_RC_FAIL);
/* Case 1: we don't have a packet. */
if (pkt == NULL) {
sd_data(("%s: Creating new %s Packet, len=%d\n",
__FUNCTION__, write ? "TX" : "RX", buflen_u));
#ifdef DHD_USE_STATIC_BUF
if (!(mypkt = PKTGET_STATIC(sd->osh, buflen_u, write ? TRUE : FALSE))) {
#else
if (!(mypkt = PKTGET(sd->osh, buflen_u, write ? TRUE : FALSE))) {
#endif /* DHD_USE_STATIC_BUF */
sd_err(("%s: PKTGET failed: len %d\n",
__FUNCTION__, buflen_u));
return SDIOH_API_RC_FAIL;
}
/* For a write, copy the buffer data into the packet. */
if (write) {
bcopy(buffer, PKTDATA(sd->osh, mypkt), buflen_u);
}
Status = sdioh_request_packet(sd, fix_inc, write, func, addr, mypkt);
/* For a read, copy the packet data back to the buffer. */
if (!write) {
bcopy(PKTDATA(sd->osh, mypkt), buffer, buflen_u);
}
#ifdef DHD_USE_STATIC_BUF
PKTFREE_STATIC(sd->osh, mypkt, write ? TRUE : FALSE);
#else
PKTFREE(sd->osh, mypkt, write ? TRUE : FALSE);
#endif /* DHD_USE_STATIC_BUF */
} else if (((uint32)(PKTDATA(sd->osh, pkt)) & DMA_ALIGN_MASK) != 0) {
/* Case 2: We have a packet, but it is unaligned. */
/* In this case, we cannot have a chain. */
ASSERT(PKTNEXT(sd->osh, pkt) == NULL);
sd_data(("%s: Creating aligned %s Packet, len=%d\n",
__FUNCTION__, write ? "TX" : "RX", PKTLEN(sd->osh, pkt)));
#ifdef DHD_USE_STATIC_BUF
if (!(mypkt = PKTGET_STATIC(sd->osh, PKTLEN(sd->osh, pkt), write ? TRUE : FALSE))) {
#else
if (!(mypkt = PKTGET(sd->osh, PKTLEN(sd->osh, pkt), write ? TRUE : FALSE))) {
#endif /* DHD_USE_STATIC_BUF */
sd_err(("%s: PKTGET failed: len %d\n",
__FUNCTION__, PKTLEN(sd->osh, pkt)));
return SDIOH_API_RC_FAIL;
}
/* For a write, copy the buffer data into the packet. */
if (write) {
bcopy(PKTDATA(sd->osh, pkt),
PKTDATA(sd->osh, mypkt),
PKTLEN(sd->osh, pkt));
}
Status = sdioh_request_packet(sd, fix_inc, write, func, addr, mypkt);
/* For a read, copy the packet data back to the buffer. */
if (!write) {
bcopy(PKTDATA(sd->osh, mypkt),
PKTDATA(sd->osh, pkt),
PKTLEN(sd->osh, mypkt));
}
#ifdef DHD_USE_STATIC_BUF
PKTFREE_STATIC(sd->osh, mypkt, write ? TRUE : FALSE);
#else
PKTFREE(sd->osh, mypkt, write ? TRUE : FALSE);
#endif /* DHD_USE_STATIC_BUF */
} else { /* case 3: We have a packet and it is aligned. */
sd_data(("%s: Aligned %s Packet, direct DMA\n",
__FUNCTION__, write ? "Tx" : "Rx"));
Status = sdioh_request_packet(sd, fix_inc, write, func, addr, pkt);
}
return (Status);
}
/* this function performs "abort" for both of host & device */
extern int
sdioh_abort(sdioh_info_t *sd, uint func)
{
#if defined(MMC_SDIO_ABORT)
char t_func = (char) func;
#endif /* defined(MMC_SDIO_ABORT) */
sd_trace(("%s: Enter\n", __FUNCTION__));
#if defined(MMC_SDIO_ABORT)
/* issue abort cmd52 command through F1 */
sdioh_request_byte(sd, SD_IO_OP_WRITE, SDIO_FUNC_0, SDIOD_CCCR_IOABORT, &t_func);
#endif /* defined(MMC_SDIO_ABORT) */
sd_trace(("%s: Exit\n", __FUNCTION__));
return SDIOH_API_RC_SUCCESS;
}
/* Reset and re-initialize the device */
int sdioh_sdio_reset(sdioh_info_t *si)
{
sd_trace(("%s: Enter\n", __FUNCTION__));
sd_trace(("%s: Exit\n", __FUNCTION__));
return SDIOH_API_RC_SUCCESS;
}
/* Disable device interrupt */
void
sdioh_sdmmc_devintr_off(sdioh_info_t *sd)
{
sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
sd->intmask &= ~CLIENT_INTR;
}
/* Enable device interrupt */
void
sdioh_sdmmc_devintr_on(sdioh_info_t *sd)
{
sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
sd->intmask |= CLIENT_INTR;
}
/* Read client card reg */
int
sdioh_sdmmc_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
{
if ((func == 0) || (regsize == 1)) {
uint8 temp = 0;
sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
*data = temp;
*data &= 0xff;
sd_data(("%s: byte read data=0x%02x\n",
__FUNCTION__, *data));
} else {
sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, data, regsize);
if (regsize == 2)
*data &= 0xffff;
sd_data(("%s: word read data=0x%08x\n",
__FUNCTION__, *data));
}
return SUCCESS;
}
#if !defined(OOB_INTR_ONLY)
/* bcmsdh_sdmmc interrupt handler */
static void IRQHandler(struct sdio_func *func)
{
sdioh_info_t *sd;
sd_trace(("bcmsdh_sdmmc: ***IRQHandler\n"));
sd = gInstance->sd;
ASSERT(sd != NULL);
sdio_release_host(gInstance->func[0]);
if (sd->use_client_ints) {
sd->intrcount++;
ASSERT(sd->intr_handler);
ASSERT(sd->intr_handler_arg);
(sd->intr_handler)(sd->intr_handler_arg);
} else {
sd_err(("bcmsdh_sdmmc: ***IRQHandler\n"));
sd_err(("%s: Not ready for intr: enabled %d, handler %p\n",
__FUNCTION__, sd->client_intr_enabled, sd->intr_handler));
}
sdio_claim_host(gInstance->func[0]);
}
/* bcmsdh_sdmmc interrupt handler for F2 (dummy handler) */
static void IRQHandlerF2(struct sdio_func *func)
{
sdioh_info_t *sd;
sd_trace(("bcmsdh_sdmmc: ***IRQHandlerF2\n"));
sd = gInstance->sd;
ASSERT(sd != NULL);
}
#endif /* !defined(OOB_INTR_ONLY) */
#ifdef NOTUSED
/* Write client card reg */
static int
sdioh_sdmmc_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data)
{
if ((func == 0) || (regsize == 1)) {
uint8 temp;
temp = data & 0xff;
sdioh_request_byte(sd, SDIOH_READ, func, regaddr, &temp);
sd_data(("%s: byte write data=0x%02x\n",
__FUNCTION__, data));
} else {
if (regsize == 2)
data &= 0xffff;
sdioh_request_word(sd, 0, SDIOH_READ, func, regaddr, &data, regsize);
sd_data(("%s: word write data=0x%08x\n",
__FUNCTION__, data));
}
return SUCCESS;
}
#endif /* NOTUSED */
int
sdioh_start(sdioh_info_t *si, int stage)
{
int ret;
sdioh_info_t *sd = gInstance->sd;
/* Need to do this stages as we can't enable the interrupt till
downloading of the firmware is complete, other wise polling
sdio access will come in way
*/
if (gInstance->func[0]) {
if (stage == 0) {
/* Since the power to the chip is killed, we will have
re enumerate the device again. Set the block size
and enable the fucntion 1 for in preparation for
downloading the code
*/
/* sdio_reset_comm() - has been fixed in latest kernel/msm.git for Linux
2.6.27. The implementation prior to that is buggy, and needs broadcom's
patch for it
*/
if ((ret = sdio_reset_comm(gInstance->func[0]->card)))
sd_err(("%s Failed, error = %d\n", __FUNCTION__, ret));
else {
sd->num_funcs = 2;
sd->sd_blockmode = TRUE;
sd->use_client_ints = TRUE;
sd->client_block_size[0] = 64;
/* Claim host controller */
sdio_claim_host(gInstance->func[1]);
sd->client_block_size[1] = 64;
if (sdio_set_block_size(gInstance->func[1], 64)) {
sd_err(("bcmsdh_sdmmc: Failed to set F1 blocksize\n"));
}
/* Release host controller F1 */
sdio_release_host(gInstance->func[1]);
if (gInstance->func[2]) {
/* Claim host controller F2 */
sdio_claim_host(gInstance->func[2]);
sd->client_block_size[2] = sd_f2_blocksize;
if (sdio_set_block_size(gInstance->func[2],
sd_f2_blocksize)) {
sd_err(("bcmsdh_sdmmc: Failed to set F2 "
"blocksize to %d\n", sd_f2_blocksize));
}
/* Release host controller F2 */
sdio_release_host(gInstance->func[2]);
}
sdioh_sdmmc_card_enablefuncs(sd);
}
} else {
#if !defined(OOB_INTR_ONLY)
sdio_claim_host(gInstance->func[0]);
sdio_claim_irq(gInstance->func[2], IRQHandlerF2);
sdio_claim_irq(gInstance->func[1], IRQHandler);
sdio_release_host(gInstance->func[0]);
#else /* defined(OOB_INTR_ONLY) */
#if defined(HW_OOB)
sdioh_enable_func_intr();
#endif
bcmsdh_oob_intr_set(TRUE);
#endif /* !defined(OOB_INTR_ONLY) */
}
}
else
sd_err(("%s Failed\n", __FUNCTION__));
return (0);
}
int
sdioh_stop(sdioh_info_t *si)
{
/* MSM7201A Android sdio stack has bug with interrupt
So internaly within SDIO stack they are polling
which cause issue when device is turned off. So
unregister interrupt with SDIO stack to stop the
polling
*/
if (gInstance->func[0]) {
#if !defined(OOB_INTR_ONLY)
sdio_claim_host(gInstance->func[0]);
sdio_release_irq(gInstance->func[1]);
sdio_release_irq(gInstance->func[2]);
sdio_release_host(gInstance->func[0]);
#else /* defined(OOB_INTR_ONLY) */
#if defined(HW_OOB)
sdioh_disable_func_intr();
#endif
bcmsdh_oob_intr_set(FALSE);
#endif /* !defined(OOB_INTR_ONLY) */
}
else
sd_err(("%s Failed\n", __FUNCTION__));
return (0);
}
| gpl-2.0 |
MoKee/android_kernel_lge_g3 | drivers/net/can/at91_can.c | 4321 | 34758 | /*
* at91_can.c - CAN network driver for AT91 SoC CAN controller
*
* (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
* (C) 2008, 2009, 2010, 2011 by Marc Kleine-Budde <kernel@pengutronix.de>
*
* This software may be distributed under the terms of the GNU General
* Public License ("GPL") version 2 as distributed in the 'COPYING'
* file from the main directory of the linux kernel source.
*
*
* Your platform definition file should specify something like:
*
* static struct at91_can_data ek_can_data = {
* transceiver_switch = sam9263ek_transceiver_switch,
* };
*
* at91_add_device_can(&ek_can_data);
*
*/
#include <linux/clk.h>
#include <linux/errno.h>
#include <linux/if_arp.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/platform_device.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
#include <mach/board.h>
#define AT91_MB_MASK(i) ((1 << (i)) - 1)
/* Common registers */
enum at91_reg {
AT91_MR = 0x000,
AT91_IER = 0x004,
AT91_IDR = 0x008,
AT91_IMR = 0x00C,
AT91_SR = 0x010,
AT91_BR = 0x014,
AT91_TIM = 0x018,
AT91_TIMESTP = 0x01C,
AT91_ECR = 0x020,
AT91_TCR = 0x024,
AT91_ACR = 0x028,
};
/* Mailbox registers (0 <= i <= 15) */
#define AT91_MMR(i) (enum at91_reg)(0x200 + ((i) * 0x20))
#define AT91_MAM(i) (enum at91_reg)(0x204 + ((i) * 0x20))
#define AT91_MID(i) (enum at91_reg)(0x208 + ((i) * 0x20))
#define AT91_MFID(i) (enum at91_reg)(0x20C + ((i) * 0x20))
#define AT91_MSR(i) (enum at91_reg)(0x210 + ((i) * 0x20))
#define AT91_MDL(i) (enum at91_reg)(0x214 + ((i) * 0x20))
#define AT91_MDH(i) (enum at91_reg)(0x218 + ((i) * 0x20))
#define AT91_MCR(i) (enum at91_reg)(0x21C + ((i) * 0x20))
/* Register bits */
#define AT91_MR_CANEN BIT(0)
#define AT91_MR_LPM BIT(1)
#define AT91_MR_ABM BIT(2)
#define AT91_MR_OVL BIT(3)
#define AT91_MR_TEOF BIT(4)
#define AT91_MR_TTM BIT(5)
#define AT91_MR_TIMFRZ BIT(6)
#define AT91_MR_DRPT BIT(7)
#define AT91_SR_RBSY BIT(29)
#define AT91_MMR_PRIO_SHIFT (16)
#define AT91_MID_MIDE BIT(29)
#define AT91_MSR_MRTR BIT(20)
#define AT91_MSR_MABT BIT(22)
#define AT91_MSR_MRDY BIT(23)
#define AT91_MSR_MMI BIT(24)
#define AT91_MCR_MRTR BIT(20)
#define AT91_MCR_MTCR BIT(23)
/* Mailbox Modes */
enum at91_mb_mode {
AT91_MB_MODE_DISABLED = 0,
AT91_MB_MODE_RX = 1,
AT91_MB_MODE_RX_OVRWR = 2,
AT91_MB_MODE_TX = 3,
AT91_MB_MODE_CONSUMER = 4,
AT91_MB_MODE_PRODUCER = 5,
};
/* Interrupt mask bits */
#define AT91_IRQ_ERRA (1 << 16)
#define AT91_IRQ_WARN (1 << 17)
#define AT91_IRQ_ERRP (1 << 18)
#define AT91_IRQ_BOFF (1 << 19)
#define AT91_IRQ_SLEEP (1 << 20)
#define AT91_IRQ_WAKEUP (1 << 21)
#define AT91_IRQ_TOVF (1 << 22)
#define AT91_IRQ_TSTP (1 << 23)
#define AT91_IRQ_CERR (1 << 24)
#define AT91_IRQ_SERR (1 << 25)
#define AT91_IRQ_AERR (1 << 26)
#define AT91_IRQ_FERR (1 << 27)
#define AT91_IRQ_BERR (1 << 28)
#define AT91_IRQ_ERR_ALL (0x1fff0000)
#define AT91_IRQ_ERR_FRAME (AT91_IRQ_CERR | AT91_IRQ_SERR | \
AT91_IRQ_AERR | AT91_IRQ_FERR | AT91_IRQ_BERR)
#define AT91_IRQ_ERR_LINE (AT91_IRQ_ERRA | AT91_IRQ_WARN | \
AT91_IRQ_ERRP | AT91_IRQ_BOFF)
#define AT91_IRQ_ALL (0x1fffffff)
enum at91_devtype {
AT91_DEVTYPE_SAM9263,
AT91_DEVTYPE_SAM9X5,
};
struct at91_devtype_data {
unsigned int rx_first;
unsigned int rx_split;
unsigned int rx_last;
unsigned int tx_shift;
enum at91_devtype type;
};
struct at91_priv {
struct can_priv can; /* must be the first member! */
struct net_device *dev;
struct napi_struct napi;
void __iomem *reg_base;
u32 reg_sr;
unsigned int tx_next;
unsigned int tx_echo;
unsigned int rx_next;
struct at91_devtype_data devtype_data;
struct clk *clk;
struct at91_can_data *pdata;
canid_t mb0_id;
};
static const struct at91_devtype_data at91_devtype_data[] __devinitconst = {
[AT91_DEVTYPE_SAM9263] = {
.rx_first = 1,
.rx_split = 8,
.rx_last = 11,
.tx_shift = 2,
},
[AT91_DEVTYPE_SAM9X5] = {
.rx_first = 0,
.rx_split = 4,
.rx_last = 5,
.tx_shift = 1,
},
};
static struct can_bittiming_const at91_bittiming_const = {
.name = KBUILD_MODNAME,
.tseg1_min = 4,
.tseg1_max = 16,
.tseg2_min = 2,
.tseg2_max = 8,
.sjw_max = 4,
.brp_min = 2,
.brp_max = 128,
.brp_inc = 1,
};
#define AT91_IS(_model) \
static inline int at91_is_sam##_model(const struct at91_priv *priv) \
{ \
return priv->devtype_data.type == AT91_DEVTYPE_SAM##_model; \
}
AT91_IS(9263);
AT91_IS(9X5);
static inline unsigned int get_mb_rx_first(const struct at91_priv *priv)
{
return priv->devtype_data.rx_first;
}
static inline unsigned int get_mb_rx_last(const struct at91_priv *priv)
{
return priv->devtype_data.rx_last;
}
static inline unsigned int get_mb_rx_split(const struct at91_priv *priv)
{
return priv->devtype_data.rx_split;
}
static inline unsigned int get_mb_rx_num(const struct at91_priv *priv)
{
return get_mb_rx_last(priv) - get_mb_rx_first(priv) + 1;
}
static inline unsigned int get_mb_rx_low_last(const struct at91_priv *priv)
{
return get_mb_rx_split(priv) - 1;
}
static inline unsigned int get_mb_rx_low_mask(const struct at91_priv *priv)
{
return AT91_MB_MASK(get_mb_rx_split(priv)) &
~AT91_MB_MASK(get_mb_rx_first(priv));
}
static inline unsigned int get_mb_tx_shift(const struct at91_priv *priv)
{
return priv->devtype_data.tx_shift;
}
static inline unsigned int get_mb_tx_num(const struct at91_priv *priv)
{
return 1 << get_mb_tx_shift(priv);
}
static inline unsigned int get_mb_tx_first(const struct at91_priv *priv)
{
return get_mb_rx_last(priv) + 1;
}
static inline unsigned int get_mb_tx_last(const struct at91_priv *priv)
{
return get_mb_tx_first(priv) + get_mb_tx_num(priv) - 1;
}
static inline unsigned int get_next_prio_shift(const struct at91_priv *priv)
{
return get_mb_tx_shift(priv);
}
static inline unsigned int get_next_prio_mask(const struct at91_priv *priv)
{
return 0xf << get_mb_tx_shift(priv);
}
static inline unsigned int get_next_mb_mask(const struct at91_priv *priv)
{
return AT91_MB_MASK(get_mb_tx_shift(priv));
}
static inline unsigned int get_next_mask(const struct at91_priv *priv)
{
return get_next_mb_mask(priv) | get_next_prio_mask(priv);
}
static inline unsigned int get_irq_mb_rx(const struct at91_priv *priv)
{
return AT91_MB_MASK(get_mb_rx_last(priv) + 1) &
~AT91_MB_MASK(get_mb_rx_first(priv));
}
static inline unsigned int get_irq_mb_tx(const struct at91_priv *priv)
{
return AT91_MB_MASK(get_mb_tx_last(priv) + 1) &
~AT91_MB_MASK(get_mb_tx_first(priv));
}
static inline unsigned int get_tx_next_mb(const struct at91_priv *priv)
{
return (priv->tx_next & get_next_mb_mask(priv)) + get_mb_tx_first(priv);
}
static inline unsigned int get_tx_next_prio(const struct at91_priv *priv)
{
return (priv->tx_next >> get_next_prio_shift(priv)) & 0xf;
}
static inline unsigned int get_tx_echo_mb(const struct at91_priv *priv)
{
return (priv->tx_echo & get_next_mb_mask(priv)) + get_mb_tx_first(priv);
}
static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg)
{
return __raw_readl(priv->reg_base + reg);
}
static inline void at91_write(const struct at91_priv *priv, enum at91_reg reg,
u32 value)
{
__raw_writel(value, priv->reg_base + reg);
}
static inline void set_mb_mode_prio(const struct at91_priv *priv,
unsigned int mb, enum at91_mb_mode mode, int prio)
{
at91_write(priv, AT91_MMR(mb), (mode << 24) | (prio << 16));
}
static inline void set_mb_mode(const struct at91_priv *priv, unsigned int mb,
enum at91_mb_mode mode)
{
set_mb_mode_prio(priv, mb, mode, 0);
}
static inline u32 at91_can_id_to_reg_mid(canid_t can_id)
{
u32 reg_mid;
if (can_id & CAN_EFF_FLAG)
reg_mid = (can_id & CAN_EFF_MASK) | AT91_MID_MIDE;
else
reg_mid = (can_id & CAN_SFF_MASK) << 18;
return reg_mid;
}
/*
* Swtich transceiver on or off
*/
static void at91_transceiver_switch(const struct at91_priv *priv, int on)
{
if (priv->pdata && priv->pdata->transceiver_switch)
priv->pdata->transceiver_switch(on);
}
static void at91_setup_mailboxes(struct net_device *dev)
{
struct at91_priv *priv = netdev_priv(dev);
unsigned int i;
u32 reg_mid;
/*
* Due to a chip bug (errata 50.2.6.3 & 50.3.5.3) the first
* mailbox is disabled. The next 11 mailboxes are used as a
* reception FIFO. The last mailbox is configured with
* overwrite option. The overwrite flag indicates a FIFO
* overflow.
*/
reg_mid = at91_can_id_to_reg_mid(priv->mb0_id);
for (i = 0; i < get_mb_rx_first(priv); i++) {
set_mb_mode(priv, i, AT91_MB_MODE_DISABLED);
at91_write(priv, AT91_MID(i), reg_mid);
at91_write(priv, AT91_MCR(i), 0x0); /* clear dlc */
}
for (i = get_mb_rx_first(priv); i < get_mb_rx_last(priv); i++)
set_mb_mode(priv, i, AT91_MB_MODE_RX);
set_mb_mode(priv, get_mb_rx_last(priv), AT91_MB_MODE_RX_OVRWR);
/* reset acceptance mask and id register */
for (i = get_mb_rx_first(priv); i <= get_mb_rx_last(priv); i++) {
at91_write(priv, AT91_MAM(i), 0x0);
at91_write(priv, AT91_MID(i), AT91_MID_MIDE);
}
/* The last 4 mailboxes are used for transmitting. */
for (i = get_mb_tx_first(priv); i <= get_mb_tx_last(priv); i++)
set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0);
/* Reset tx and rx helper pointers */
priv->tx_next = priv->tx_echo = 0;
priv->rx_next = get_mb_rx_first(priv);
}
static int at91_set_bittiming(struct net_device *dev)
{
const struct at91_priv *priv = netdev_priv(dev);
const struct can_bittiming *bt = &priv->can.bittiming;
u32 reg_br;
reg_br = ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 << 24 : 0) |
((bt->brp - 1) << 16) | ((bt->sjw - 1) << 12) |
((bt->prop_seg - 1) << 8) | ((bt->phase_seg1 - 1) << 4) |
((bt->phase_seg2 - 1) << 0);
netdev_info(dev, "writing AT91_BR: 0x%08x\n", reg_br);
at91_write(priv, AT91_BR, reg_br);
return 0;
}
static int at91_get_berr_counter(const struct net_device *dev,
struct can_berr_counter *bec)
{
const struct at91_priv *priv = netdev_priv(dev);
u32 reg_ecr = at91_read(priv, AT91_ECR);
bec->rxerr = reg_ecr & 0xff;
bec->txerr = reg_ecr >> 16;
return 0;
}
static void at91_chip_start(struct net_device *dev)
{
struct at91_priv *priv = netdev_priv(dev);
u32 reg_mr, reg_ier;
/* disable interrupts */
at91_write(priv, AT91_IDR, AT91_IRQ_ALL);
/* disable chip */
reg_mr = at91_read(priv, AT91_MR);
at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN);
at91_set_bittiming(dev);
at91_setup_mailboxes(dev);
at91_transceiver_switch(priv, 1);
/* enable chip */
at91_write(priv, AT91_MR, AT91_MR_CANEN);
priv->can.state = CAN_STATE_ERROR_ACTIVE;
/* Enable interrupts */
reg_ier = get_irq_mb_rx(priv) | AT91_IRQ_ERRP | AT91_IRQ_ERR_FRAME;
at91_write(priv, AT91_IDR, AT91_IRQ_ALL);
at91_write(priv, AT91_IER, reg_ier);
}
static void at91_chip_stop(struct net_device *dev, enum can_state state)
{
struct at91_priv *priv = netdev_priv(dev);
u32 reg_mr;
/* disable interrupts */
at91_write(priv, AT91_IDR, AT91_IRQ_ALL);
reg_mr = at91_read(priv, AT91_MR);
at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN);
at91_transceiver_switch(priv, 0);
priv->can.state = state;
}
/*
* theory of operation:
*
* According to the datasheet priority 0 is the highest priority, 15
* is the lowest. If two mailboxes have the same priority level the
* message of the mailbox with the lowest number is sent first.
*
* We use the first TX mailbox (AT91_MB_TX_FIRST) with prio 0, then
* the next mailbox with prio 0, and so on, until all mailboxes are
* used. Then we start from the beginning with mailbox
* AT91_MB_TX_FIRST, but with prio 1, mailbox AT91_MB_TX_FIRST + 1
* prio 1. When we reach the last mailbox with prio 15, we have to
* stop sending, waiting for all messages to be delivered, then start
* again with mailbox AT91_MB_TX_FIRST prio 0.
*
* We use the priv->tx_next as counter for the next transmission
* mailbox, but without the offset AT91_MB_TX_FIRST. The lower bits
* encode the mailbox number, the upper 4 bits the mailbox priority:
*
* priv->tx_next = (prio << get_next_prio_shift(priv)) |
* (mb - get_mb_tx_first(priv));
*
*/
static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct at91_priv *priv = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats;
struct can_frame *cf = (struct can_frame *)skb->data;
unsigned int mb, prio;
u32 reg_mid, reg_mcr;
if (can_dropped_invalid_skb(dev, skb))
return NETDEV_TX_OK;
mb = get_tx_next_mb(priv);
prio = get_tx_next_prio(priv);
if (unlikely(!(at91_read(priv, AT91_MSR(mb)) & AT91_MSR_MRDY))) {
netif_stop_queue(dev);
netdev_err(dev, "BUG! TX buffer full when queue awake!\n");
return NETDEV_TX_BUSY;
}
reg_mid = at91_can_id_to_reg_mid(cf->can_id);
reg_mcr = ((cf->can_id & CAN_RTR_FLAG) ? AT91_MCR_MRTR : 0) |
(cf->can_dlc << 16) | AT91_MCR_MTCR;
/* disable MB while writing ID (see datasheet) */
set_mb_mode(priv, mb, AT91_MB_MODE_DISABLED);
at91_write(priv, AT91_MID(mb), reg_mid);
set_mb_mode_prio(priv, mb, AT91_MB_MODE_TX, prio);
at91_write(priv, AT91_MDL(mb), *(u32 *)(cf->data + 0));
at91_write(priv, AT91_MDH(mb), *(u32 *)(cf->data + 4));
/* This triggers transmission */
at91_write(priv, AT91_MCR(mb), reg_mcr);
stats->tx_bytes += cf->can_dlc;
/* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */
can_put_echo_skb(skb, dev, mb - get_mb_tx_first(priv));
/*
* we have to stop the queue and deliver all messages in case
* of a prio+mb counter wrap around. This is the case if
* tx_next buffer prio and mailbox equals 0.
*
* also stop the queue if next buffer is still in use
* (== not ready)
*/
priv->tx_next++;
if (!(at91_read(priv, AT91_MSR(get_tx_next_mb(priv))) &
AT91_MSR_MRDY) ||
(priv->tx_next & get_next_mask(priv)) == 0)
netif_stop_queue(dev);
/* Enable interrupt for this mailbox */
at91_write(priv, AT91_IER, 1 << mb);
return NETDEV_TX_OK;
}
/**
* at91_activate_rx_low - activate lower rx mailboxes
* @priv: a91 context
*
* Reenables the lower mailboxes for reception of new CAN messages
*/
static inline void at91_activate_rx_low(const struct at91_priv *priv)
{
u32 mask = get_mb_rx_low_mask(priv);
at91_write(priv, AT91_TCR, mask);
}
/**
* at91_activate_rx_mb - reactive single rx mailbox
* @priv: a91 context
* @mb: mailbox to reactivate
*
* Reenables given mailbox for reception of new CAN messages
*/
static inline void at91_activate_rx_mb(const struct at91_priv *priv,
unsigned int mb)
{
u32 mask = 1 << mb;
at91_write(priv, AT91_TCR, mask);
}
/**
* at91_rx_overflow_err - send error frame due to rx overflow
* @dev: net device
*/
static void at91_rx_overflow_err(struct net_device *dev)
{
struct net_device_stats *stats = &dev->stats;
struct sk_buff *skb;
struct can_frame *cf;
netdev_dbg(dev, "RX buffer overflow\n");
stats->rx_over_errors++;
stats->rx_errors++;
skb = alloc_can_err_skb(dev, &cf);
if (unlikely(!skb))
return;
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
netif_receive_skb(skb);
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
}
/**
* at91_read_mb - read CAN msg from mailbox (lowlevel impl)
* @dev: net device
* @mb: mailbox number to read from
* @cf: can frame where to store message
*
* Reads a CAN message from the given mailbox and stores data into
* given can frame. "mb" and "cf" must be valid.
*/
static void at91_read_mb(struct net_device *dev, unsigned int mb,
struct can_frame *cf)
{
const struct at91_priv *priv = netdev_priv(dev);
u32 reg_msr, reg_mid;
reg_mid = at91_read(priv, AT91_MID(mb));
if (reg_mid & AT91_MID_MIDE)
cf->can_id = ((reg_mid >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG;
else
cf->can_id = (reg_mid >> 18) & CAN_SFF_MASK;
reg_msr = at91_read(priv, AT91_MSR(mb));
cf->can_dlc = get_can_dlc((reg_msr >> 16) & 0xf);
if (reg_msr & AT91_MSR_MRTR)
cf->can_id |= CAN_RTR_FLAG;
else {
*(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb));
*(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb));
}
/* allow RX of extended frames */
at91_write(priv, AT91_MID(mb), AT91_MID_MIDE);
if (unlikely(mb == get_mb_rx_last(priv) && reg_msr & AT91_MSR_MMI))
at91_rx_overflow_err(dev);
}
/**
* at91_read_msg - read CAN message from mailbox
* @dev: net device
* @mb: mail box to read from
*
* Reads a CAN message from given mailbox, and put into linux network
* RX queue, does all housekeeping chores (stats, ...)
*/
static void at91_read_msg(struct net_device *dev, unsigned int mb)
{
struct net_device_stats *stats = &dev->stats;
struct can_frame *cf;
struct sk_buff *skb;
skb = alloc_can_skb(dev, &cf);
if (unlikely(!skb)) {
stats->rx_dropped++;
return;
}
at91_read_mb(dev, mb, cf);
netif_receive_skb(skb);
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
}
/**
* at91_poll_rx - read multiple CAN messages from mailboxes
* @dev: net device
* @quota: max number of pkgs we're allowed to receive
*
* Theory of Operation:
*
* About 3/4 of the mailboxes (get_mb_rx_first()...get_mb_rx_last())
* on the chip are reserved for RX. We split them into 2 groups. The
* lower group ranges from get_mb_rx_first() to get_mb_rx_low_last().
*
* Like it or not, but the chip always saves a received CAN message
* into the first free mailbox it finds (starting with the
* lowest). This makes it very difficult to read the messages in the
* right order from the chip. This is how we work around that problem:
*
* The first message goes into mb nr. 1 and issues an interrupt. All
* rx ints are disabled in the interrupt handler and a napi poll is
* scheduled. We read the mailbox, but do _not_ reenable the mb (to
* receive another message).
*
* lower mbxs upper
* ____^______ __^__
* / \ / \
* +-+-+-+-+-+-+-+-++-+-+-+-+
* | |x|x|x|x|x|x|x|| | | | |
* +-+-+-+-+-+-+-+-++-+-+-+-+
* 0 0 0 0 0 0 0 0 0 0 1 1 \ mail
* 0 1 2 3 4 5 6 7 8 9 0 1 / box
* ^
* |
* \
* unused, due to chip bug
*
* The variable priv->rx_next points to the next mailbox to read a
* message from. As long we're in the lower mailboxes we just read the
* mailbox but not reenable it.
*
* With completion of the last of the lower mailboxes, we reenable the
* whole first group, but continue to look for filled mailboxes in the
* upper mailboxes. Imagine the second group like overflow mailboxes,
* which takes CAN messages if the lower goup is full. While in the
* upper group we reenable the mailbox right after reading it. Giving
* the chip more room to store messages.
*
* After finishing we look again in the lower group if we've still
* quota.
*
*/
static int at91_poll_rx(struct net_device *dev, int quota)
{
struct at91_priv *priv = netdev_priv(dev);
u32 reg_sr = at91_read(priv, AT91_SR);
const unsigned long *addr = (unsigned long *)®_sr;
unsigned int mb;
int received = 0;
if (priv->rx_next > get_mb_rx_low_last(priv) &&
reg_sr & get_mb_rx_low_mask(priv))
netdev_info(dev,
"order of incoming frames cannot be guaranteed\n");
again:
for (mb = find_next_bit(addr, get_mb_tx_first(priv), priv->rx_next);
mb < get_mb_tx_first(priv) && quota > 0;
reg_sr = at91_read(priv, AT91_SR),
mb = find_next_bit(addr, get_mb_tx_first(priv), ++priv->rx_next)) {
at91_read_msg(dev, mb);
/* reactivate mailboxes */
if (mb == get_mb_rx_low_last(priv))
/* all lower mailboxed, if just finished it */
at91_activate_rx_low(priv);
else if (mb > get_mb_rx_low_last(priv))
/* only the mailbox we read */
at91_activate_rx_mb(priv, mb);
received++;
quota--;
}
/* upper group completed, look again in lower */
if (priv->rx_next > get_mb_rx_low_last(priv) &&
quota > 0 && mb > get_mb_rx_last(priv)) {
priv->rx_next = get_mb_rx_first(priv);
goto again;
}
return received;
}
static void at91_poll_err_frame(struct net_device *dev,
struct can_frame *cf, u32 reg_sr)
{
struct at91_priv *priv = netdev_priv(dev);
/* CRC error */
if (reg_sr & AT91_IRQ_CERR) {
netdev_dbg(dev, "CERR irq\n");
dev->stats.rx_errors++;
priv->can.can_stats.bus_error++;
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
}
/* Stuffing Error */
if (reg_sr & AT91_IRQ_SERR) {
netdev_dbg(dev, "SERR irq\n");
dev->stats.rx_errors++;
priv->can.can_stats.bus_error++;
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
cf->data[2] |= CAN_ERR_PROT_STUFF;
}
/* Acknowledgement Error */
if (reg_sr & AT91_IRQ_AERR) {
netdev_dbg(dev, "AERR irq\n");
dev->stats.tx_errors++;
cf->can_id |= CAN_ERR_ACK;
}
/* Form error */
if (reg_sr & AT91_IRQ_FERR) {
netdev_dbg(dev, "FERR irq\n");
dev->stats.rx_errors++;
priv->can.can_stats.bus_error++;
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
cf->data[2] |= CAN_ERR_PROT_FORM;
}
/* Bit Error */
if (reg_sr & AT91_IRQ_BERR) {
netdev_dbg(dev, "BERR irq\n");
dev->stats.tx_errors++;
priv->can.can_stats.bus_error++;
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
cf->data[2] |= CAN_ERR_PROT_BIT;
}
}
static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr)
{
struct sk_buff *skb;
struct can_frame *cf;
if (quota == 0)
return 0;
skb = alloc_can_err_skb(dev, &cf);
if (unlikely(!skb))
return 0;
at91_poll_err_frame(dev, cf, reg_sr);
netif_receive_skb(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += cf->can_dlc;
return 1;
}
static int at91_poll(struct napi_struct *napi, int quota)
{
struct net_device *dev = napi->dev;
const struct at91_priv *priv = netdev_priv(dev);
u32 reg_sr = at91_read(priv, AT91_SR);
int work_done = 0;
if (reg_sr & get_irq_mb_rx(priv))
work_done += at91_poll_rx(dev, quota - work_done);
/*
* The error bits are clear on read,
* so use saved value from irq handler.
*/
reg_sr |= priv->reg_sr;
if (reg_sr & AT91_IRQ_ERR_FRAME)
work_done += at91_poll_err(dev, quota - work_done, reg_sr);
if (work_done < quota) {
/* enable IRQs for frame errors and all mailboxes >= rx_next */
u32 reg_ier = AT91_IRQ_ERR_FRAME;
reg_ier |= get_irq_mb_rx(priv) & ~AT91_MB_MASK(priv->rx_next);
napi_complete(napi);
at91_write(priv, AT91_IER, reg_ier);
}
return work_done;
}
/*
* theory of operation:
*
* priv->tx_echo holds the number of the oldest can_frame put for
* transmission into the hardware, but not yet ACKed by the CAN tx
* complete IRQ.
*
* We iterate from priv->tx_echo to priv->tx_next and check if the
* packet has been transmitted, echo it back to the CAN framework. If
* we discover a not yet transmitted package, stop looking for more.
*
*/
static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
{
struct at91_priv *priv = netdev_priv(dev);
u32 reg_msr;
unsigned int mb;
/* masking of reg_sr not needed, already done by at91_irq */
for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
mb = get_tx_echo_mb(priv);
/* no event in mailbox? */
if (!(reg_sr & (1 << mb)))
break;
/* Disable irq for this TX mailbox */
at91_write(priv, AT91_IDR, 1 << mb);
/*
* only echo if mailbox signals us a transfer
* complete (MSR_MRDY). Otherwise it's a tansfer
* abort. "can_bus_off()" takes care about the skbs
* parked in the echo queue.
*/
reg_msr = at91_read(priv, AT91_MSR(mb));
if (likely(reg_msr & AT91_MSR_MRDY &&
~reg_msr & AT91_MSR_MABT)) {
/* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */
can_get_echo_skb(dev, mb - get_mb_tx_first(priv));
dev->stats.tx_packets++;
}
}
/*
* restart queue if we don't have a wrap around but restart if
* we get a TX int for the last can frame directly before a
* wrap around.
*/
if ((priv->tx_next & get_next_mask(priv)) != 0 ||
(priv->tx_echo & get_next_mask(priv)) == 0)
netif_wake_queue(dev);
}
static void at91_irq_err_state(struct net_device *dev,
struct can_frame *cf, enum can_state new_state)
{
struct at91_priv *priv = netdev_priv(dev);
u32 reg_idr = 0, reg_ier = 0;
struct can_berr_counter bec;
at91_get_berr_counter(dev, &bec);
switch (priv->can.state) {
case CAN_STATE_ERROR_ACTIVE:
/*
* from: ERROR_ACTIVE
* to : ERROR_WARNING, ERROR_PASSIVE, BUS_OFF
* => : there was a warning int
*/
if (new_state >= CAN_STATE_ERROR_WARNING &&
new_state <= CAN_STATE_BUS_OFF) {
netdev_dbg(dev, "Error Warning IRQ\n");
priv->can.can_stats.error_warning++;
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = (bec.txerr > bec.rxerr) ?
CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
}
case CAN_STATE_ERROR_WARNING: /* fallthrough */
/*
* from: ERROR_ACTIVE, ERROR_WARNING
* to : ERROR_PASSIVE, BUS_OFF
* => : error passive int
*/
if (new_state >= CAN_STATE_ERROR_PASSIVE &&
new_state <= CAN_STATE_BUS_OFF) {
netdev_dbg(dev, "Error Passive IRQ\n");
priv->can.can_stats.error_passive++;
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = (bec.txerr > bec.rxerr) ?
CAN_ERR_CRTL_TX_PASSIVE :
CAN_ERR_CRTL_RX_PASSIVE;
}
break;
case CAN_STATE_BUS_OFF:
/*
* from: BUS_OFF
* to : ERROR_ACTIVE, ERROR_WARNING, ERROR_PASSIVE
*/
if (new_state <= CAN_STATE_ERROR_PASSIVE) {
cf->can_id |= CAN_ERR_RESTARTED;
netdev_dbg(dev, "restarted\n");
priv->can.can_stats.restarts++;
netif_carrier_on(dev);
netif_wake_queue(dev);
}
break;
default:
break;
}
/* process state changes depending on the new state */
switch (new_state) {
case CAN_STATE_ERROR_ACTIVE:
/*
* actually we want to enable AT91_IRQ_WARN here, but
* it screws up the system under certain
* circumstances. so just enable AT91_IRQ_ERRP, thus
* the "fallthrough"
*/
netdev_dbg(dev, "Error Active\n");
cf->can_id |= CAN_ERR_PROT;
cf->data[2] = CAN_ERR_PROT_ACTIVE;
case CAN_STATE_ERROR_WARNING: /* fallthrough */
reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_BOFF;
reg_ier = AT91_IRQ_ERRP;
break;
case CAN_STATE_ERROR_PASSIVE:
reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_ERRP;
reg_ier = AT91_IRQ_BOFF;
break;
case CAN_STATE_BUS_OFF:
reg_idr = AT91_IRQ_ERRA | AT91_IRQ_ERRP |
AT91_IRQ_WARN | AT91_IRQ_BOFF;
reg_ier = 0;
cf->can_id |= CAN_ERR_BUSOFF;
netdev_dbg(dev, "bus-off\n");
netif_carrier_off(dev);
priv->can.can_stats.bus_off++;
/* turn off chip, if restart is disabled */
if (!priv->can.restart_ms) {
at91_chip_stop(dev, CAN_STATE_BUS_OFF);
return;
}
break;
default:
break;
}
at91_write(priv, AT91_IDR, reg_idr);
at91_write(priv, AT91_IER, reg_ier);
}
static int at91_get_state_by_bec(const struct net_device *dev,
enum can_state *state)
{
struct can_berr_counter bec;
int err;
err = at91_get_berr_counter(dev, &bec);
if (err)
return err;
if (bec.txerr < 96 && bec.rxerr < 96)
*state = CAN_STATE_ERROR_ACTIVE;
else if (bec.txerr < 128 && bec.rxerr < 128)
*state = CAN_STATE_ERROR_WARNING;
else if (bec.txerr < 256 && bec.rxerr < 256)
*state = CAN_STATE_ERROR_PASSIVE;
else
*state = CAN_STATE_BUS_OFF;
return 0;
}
static void at91_irq_err(struct net_device *dev)
{
struct at91_priv *priv = netdev_priv(dev);
struct sk_buff *skb;
struct can_frame *cf;
enum can_state new_state;
u32 reg_sr;
int err;
if (at91_is_sam9263(priv)) {
reg_sr = at91_read(priv, AT91_SR);
/* we need to look at the unmasked reg_sr */
if (unlikely(reg_sr & AT91_IRQ_BOFF))
new_state = CAN_STATE_BUS_OFF;
else if (unlikely(reg_sr & AT91_IRQ_ERRP))
new_state = CAN_STATE_ERROR_PASSIVE;
else if (unlikely(reg_sr & AT91_IRQ_WARN))
new_state = CAN_STATE_ERROR_WARNING;
else if (likely(reg_sr & AT91_IRQ_ERRA))
new_state = CAN_STATE_ERROR_ACTIVE;
else {
netdev_err(dev, "BUG! hardware in undefined state\n");
return;
}
} else {
err = at91_get_state_by_bec(dev, &new_state);
if (err)
return;
}
/* state hasn't changed */
if (likely(new_state == priv->can.state))
return;
skb = alloc_can_err_skb(dev, &cf);
if (unlikely(!skb))
return;
at91_irq_err_state(dev, cf, new_state);
netif_rx(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += cf->can_dlc;
priv->can.state = new_state;
}
/*
* interrupt handler
*/
static irqreturn_t at91_irq(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct at91_priv *priv = netdev_priv(dev);
irqreturn_t handled = IRQ_NONE;
u32 reg_sr, reg_imr;
reg_sr = at91_read(priv, AT91_SR);
reg_imr = at91_read(priv, AT91_IMR);
/* Ignore masked interrupts */
reg_sr &= reg_imr;
if (!reg_sr)
goto exit;
handled = IRQ_HANDLED;
/* Receive or error interrupt? -> napi */
if (reg_sr & (get_irq_mb_rx(priv) | AT91_IRQ_ERR_FRAME)) {
/*
* The error bits are clear on read,
* save for later use.
*/
priv->reg_sr = reg_sr;
at91_write(priv, AT91_IDR,
get_irq_mb_rx(priv) | AT91_IRQ_ERR_FRAME);
napi_schedule(&priv->napi);
}
/* Transmission complete interrupt */
if (reg_sr & get_irq_mb_tx(priv))
at91_irq_tx(dev, reg_sr);
at91_irq_err(dev);
exit:
return handled;
}
static int at91_open(struct net_device *dev)
{
struct at91_priv *priv = netdev_priv(dev);
int err;
clk_enable(priv->clk);
/* check or determine and set bittime */
err = open_candev(dev);
if (err)
goto out;
/* register interrupt handler */
if (request_irq(dev->irq, at91_irq, IRQF_SHARED,
dev->name, dev)) {
err = -EAGAIN;
goto out_close;
}
/* start chip and queuing */
at91_chip_start(dev);
napi_enable(&priv->napi);
netif_start_queue(dev);
return 0;
out_close:
close_candev(dev);
out:
clk_disable(priv->clk);
return err;
}
/*
* stop CAN bus activity
*/
static int at91_close(struct net_device *dev)
{
struct at91_priv *priv = netdev_priv(dev);
netif_stop_queue(dev);
napi_disable(&priv->napi);
at91_chip_stop(dev, CAN_STATE_STOPPED);
free_irq(dev->irq, dev);
clk_disable(priv->clk);
close_candev(dev);
return 0;
}
static int at91_set_mode(struct net_device *dev, enum can_mode mode)
{
switch (mode) {
case CAN_MODE_START:
at91_chip_start(dev);
netif_wake_queue(dev);
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static const struct net_device_ops at91_netdev_ops = {
.ndo_open = at91_open,
.ndo_stop = at91_close,
.ndo_start_xmit = at91_start_xmit,
};
static ssize_t at91_sysfs_show_mb0_id(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct at91_priv *priv = netdev_priv(to_net_dev(dev));
if (priv->mb0_id & CAN_EFF_FLAG)
return snprintf(buf, PAGE_SIZE, "0x%08x\n", priv->mb0_id);
else
return snprintf(buf, PAGE_SIZE, "0x%03x\n", priv->mb0_id);
}
static ssize_t at91_sysfs_set_mb0_id(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct net_device *ndev = to_net_dev(dev);
struct at91_priv *priv = netdev_priv(ndev);
unsigned long can_id;
ssize_t ret;
int err;
rtnl_lock();
if (ndev->flags & IFF_UP) {
ret = -EBUSY;
goto out;
}
err = strict_strtoul(buf, 0, &can_id);
if (err) {
ret = err;
goto out;
}
if (can_id & CAN_EFF_FLAG)
can_id &= CAN_EFF_MASK | CAN_EFF_FLAG;
else
can_id &= CAN_SFF_MASK;
priv->mb0_id = can_id;
ret = count;
out:
rtnl_unlock();
return ret;
}
static DEVICE_ATTR(mb0_id, S_IWUSR | S_IRUGO,
at91_sysfs_show_mb0_id, at91_sysfs_set_mb0_id);
static struct attribute *at91_sysfs_attrs[] = {
&dev_attr_mb0_id.attr,
NULL,
};
static struct attribute_group at91_sysfs_attr_group = {
.attrs = at91_sysfs_attrs,
};
static int __devinit at91_can_probe(struct platform_device *pdev)
{
const struct at91_devtype_data *devtype_data;
enum at91_devtype devtype;
struct net_device *dev;
struct at91_priv *priv;
struct resource *res;
struct clk *clk;
void __iomem *addr;
int err, irq;
devtype = pdev->id_entry->driver_data;
devtype_data = &at91_devtype_data[devtype];
clk = clk_get(&pdev->dev, "can_clk");
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "no clock defined\n");
err = -ENODEV;
goto exit;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
if (!res || irq <= 0) {
err = -ENODEV;
goto exit_put;
}
if (!request_mem_region(res->start,
resource_size(res),
pdev->name)) {
err = -EBUSY;
goto exit_put;
}
addr = ioremap_nocache(res->start, resource_size(res));
if (!addr) {
err = -ENOMEM;
goto exit_release;
}
dev = alloc_candev(sizeof(struct at91_priv),
1 << devtype_data->tx_shift);
if (!dev) {
err = -ENOMEM;
goto exit_iounmap;
}
dev->netdev_ops = &at91_netdev_ops;
dev->irq = irq;
dev->flags |= IFF_ECHO;
priv = netdev_priv(dev);
priv->can.clock.freq = clk_get_rate(clk);
priv->can.bittiming_const = &at91_bittiming_const;
priv->can.do_set_mode = at91_set_mode;
priv->can.do_get_berr_counter = at91_get_berr_counter;
priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
priv->dev = dev;
priv->reg_base = addr;
priv->devtype_data = *devtype_data;
priv->devtype_data.type = devtype;
priv->clk = clk;
priv->pdata = pdev->dev.platform_data;
priv->mb0_id = 0x7ff;
netif_napi_add(dev, &priv->napi, at91_poll, get_mb_rx_num(priv));
if (at91_is_sam9263(priv))
dev->sysfs_groups[0] = &at91_sysfs_attr_group;
dev_set_drvdata(&pdev->dev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
err = register_candev(dev);
if (err) {
dev_err(&pdev->dev, "registering netdev failed\n");
goto exit_free;
}
dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n",
priv->reg_base, dev->irq);
return 0;
exit_free:
free_candev(dev);
exit_iounmap:
iounmap(addr);
exit_release:
release_mem_region(res->start, resource_size(res));
exit_put:
clk_put(clk);
exit:
return err;
}
static int __devexit at91_can_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct at91_priv *priv = netdev_priv(dev);
struct resource *res;
unregister_netdev(dev);
platform_set_drvdata(pdev, NULL);
iounmap(priv->reg_base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
clk_put(priv->clk);
free_candev(dev);
return 0;
}
static const struct platform_device_id at91_can_id_table[] = {
{
.name = "at91_can",
.driver_data = AT91_DEVTYPE_SAM9263,
}, {
.name = "at91sam9x5_can",
.driver_data = AT91_DEVTYPE_SAM9X5,
}, {
/* sentinel */
}
};
static struct platform_driver at91_can_driver = {
.probe = at91_can_probe,
.remove = __devexit_p(at91_can_remove),
.driver = {
.name = KBUILD_MODNAME,
.owner = THIS_MODULE,
},
.id_table = at91_can_id_table,
};
module_platform_driver(at91_can_driver);
MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION(KBUILD_MODNAME " CAN netdevice driver");
| gpl-2.0 |
vinay94185vinay/Hybrid | drivers/staging/media/go7007/go7007-driver.c | 5089 | 17050 | /*
* Copyright (C) 2005-2006 Micronas USA Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/unistd.h>
#include <linux/time.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/device.h>
#include <linux/i2c.h>
#include <linux/firmware.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
#include <media/tuner.h>
#include <media/v4l2-common.h>
#include "go7007-priv.h"
#include "wis-i2c.h"
/*
* Wait for an interrupt to be delivered from the GO7007SB and return
* the associated value and data.
*
* Must be called with the hw_lock held.
*/
int go7007_read_interrupt(struct go7007 *go, u16 *value, u16 *data)
{
go->interrupt_available = 0;
go->hpi_ops->read_interrupt(go);
if (wait_event_timeout(go->interrupt_waitq,
go->interrupt_available, 5*HZ) < 0) {
v4l2_err(&go->v4l2_dev, "timeout waiting for read interrupt\n");
return -1;
}
if (!go->interrupt_available)
return -1;
go->interrupt_available = 0;
*value = go->interrupt_value & 0xfffe;
*data = go->interrupt_data;
return 0;
}
EXPORT_SYMBOL(go7007_read_interrupt);
/*
* Read a register/address on the GO7007SB.
*
* Must be called with the hw_lock held.
*/
int go7007_read_addr(struct go7007 *go, u16 addr, u16 *data)
{
int count = 100;
u16 value;
if (go7007_write_interrupt(go, 0x0010, addr) < 0)
return -EIO;
while (count-- > 0) {
if (go7007_read_interrupt(go, &value, data) == 0 &&
value == 0xa000)
return 0;
}
return -EIO;
}
EXPORT_SYMBOL(go7007_read_addr);
/*
* Send the boot firmware to the encoder, which just wakes it up and lets
* us talk to the GPIO pins and on-board I2C adapter.
*
* Must be called with the hw_lock held.
*/
static int go7007_load_encoder(struct go7007 *go)
{
const struct firmware *fw_entry;
char fw_name[] = "go7007fw.bin";
void *bounce;
int fw_len, rv = 0;
u16 intr_val, intr_data;
if (request_firmware(&fw_entry, fw_name, go->dev)) {
v4l2_err(go, "unable to load firmware from file "
"\"%s\"\n", fw_name);
return -1;
}
if (fw_entry->size < 16 || memcmp(fw_entry->data, "WISGO7007FW", 11)) {
v4l2_err(go, "file \"%s\" does not appear to be "
"go7007 firmware\n", fw_name);
release_firmware(fw_entry);
return -1;
}
fw_len = fw_entry->size - 16;
bounce = kmalloc(fw_len, GFP_KERNEL);
if (bounce == NULL) {
v4l2_err(go, "unable to allocate %d bytes for "
"firmware transfer\n", fw_len);
release_firmware(fw_entry);
return -1;
}
memcpy(bounce, fw_entry->data + 16, fw_len);
release_firmware(fw_entry);
if (go7007_interface_reset(go) < 0 ||
go7007_send_firmware(go, bounce, fw_len) < 0 ||
go7007_read_interrupt(go, &intr_val, &intr_data) < 0 ||
(intr_val & ~0x1) != 0x5a5a) {
v4l2_err(go, "error transferring firmware\n");
rv = -1;
}
kfree(bounce);
return rv;
}
MODULE_FIRMWARE("go7007fw.bin");
/*
* Boot the encoder and register the I2C adapter if requested. Do the
* minimum initialization necessary, since the board-specific code may
* still need to probe the board ID.
*
* Must NOT be called with the hw_lock held.
*/
int go7007_boot_encoder(struct go7007 *go, int init_i2c)
{
int ret;
mutex_lock(&go->hw_lock);
ret = go7007_load_encoder(go);
mutex_unlock(&go->hw_lock);
if (ret < 0)
return -1;
if (!init_i2c)
return 0;
if (go7007_i2c_init(go) < 0)
return -1;
go->i2c_adapter_online = 1;
return 0;
}
EXPORT_SYMBOL(go7007_boot_encoder);
/*
* Configure any hardware-related registers in the GO7007, such as GPIO
* pins and bus parameters, which are board-specific. This assumes
* the boot firmware has already been downloaded.
*
* Must be called with the hw_lock held.
*/
static int go7007_init_encoder(struct go7007 *go)
{
if (go->board_info->audio_flags & GO7007_AUDIO_I2S_MASTER) {
go7007_write_addr(go, 0x1000, 0x0811);
go7007_write_addr(go, 0x1000, 0x0c11);
}
if (go->board_id == GO7007_BOARDID_MATRIX_REV) {
/* Set GPIO pin 0 to be an output (audio clock control) */
go7007_write_addr(go, 0x3c82, 0x0001);
go7007_write_addr(go, 0x3c80, 0x00fe);
}
return 0;
}
/*
* Send the boot firmware to the GO7007 and configure the registers. This
* is the only way to stop the encoder once it has started streaming video.
*
* Must be called with the hw_lock held.
*/
int go7007_reset_encoder(struct go7007 *go)
{
if (go7007_load_encoder(go) < 0)
return -1;
return go7007_init_encoder(go);
}
/*
* Attempt to instantiate an I2C client by ID, probably loading a module.
*/
static int init_i2c_module(struct i2c_adapter *adapter, const char *type,
int addr)
{
struct go7007 *go = i2c_get_adapdata(adapter);
struct v4l2_device *v4l2_dev = &go->v4l2_dev;
if (v4l2_i2c_new_subdev(v4l2_dev, adapter, type, addr, NULL))
return 0;
printk(KERN_INFO "go7007: probing for module i2c:%s failed\n", type);
return -1;
}
/*
* Finalize the GO7007 hardware setup, register the on-board I2C adapter
* (if used on this board), load the I2C client driver for the sensor
* (SAA7115 or whatever) and other devices, and register the ALSA and V4L2
* interfaces.
*
* Must NOT be called with the hw_lock held.
*/
int go7007_register_encoder(struct go7007 *go)
{
int i, ret;
printk(KERN_INFO "go7007: registering new %s\n", go->name);
mutex_lock(&go->hw_lock);
ret = go7007_init_encoder(go);
mutex_unlock(&go->hw_lock);
if (ret < 0)
return -1;
/* v4l2 init must happen before i2c subdevs */
ret = go7007_v4l2_init(go);
if (ret < 0)
return ret;
if (!go->i2c_adapter_online &&
go->board_info->flags & GO7007_BOARD_USE_ONBOARD_I2C) {
if (go7007_i2c_init(go) < 0)
return -1;
go->i2c_adapter_online = 1;
}
if (go->i2c_adapter_online) {
for (i = 0; i < go->board_info->num_i2c_devs; ++i)
init_i2c_module(&go->i2c_adapter,
go->board_info->i2c_devs[i].type,
go->board_info->i2c_devs[i].addr);
if (go->board_id == GO7007_BOARDID_ADLINK_MPG24)
i2c_clients_command(&go->i2c_adapter,
DECODER_SET_CHANNEL, &go->channel_number);
}
if (go->board_info->flags & GO7007_BOARD_HAS_AUDIO) {
go->audio_enabled = 1;
go7007_snd_init(go);
}
return 0;
}
EXPORT_SYMBOL(go7007_register_encoder);
/*
* Send the encode firmware to the encoder, which will cause it
* to immediately start delivering the video and audio streams.
*
* Must be called with the hw_lock held.
*/
int go7007_start_encoder(struct go7007 *go)
{
u8 *fw;
int fw_len, rv = 0, i;
u16 intr_val, intr_data;
go->modet_enable = 0;
if (!go->dvd_mode)
for (i = 0; i < 4; ++i) {
if (go->modet[i].enable) {
go->modet_enable = 1;
continue;
}
go->modet[i].pixel_threshold = 32767;
go->modet[i].motion_threshold = 32767;
go->modet[i].mb_threshold = 32767;
}
if (go7007_construct_fw_image(go, &fw, &fw_len) < 0)
return -1;
if (go7007_send_firmware(go, fw, fw_len) < 0 ||
go7007_read_interrupt(go, &intr_val, &intr_data) < 0) {
v4l2_err(&go->v4l2_dev, "error transferring firmware\n");
rv = -1;
goto start_error;
}
go->state = STATE_DATA;
go->parse_length = 0;
go->seen_frame = 0;
if (go7007_stream_start(go) < 0) {
v4l2_err(&go->v4l2_dev, "error starting stream transfer\n");
rv = -1;
goto start_error;
}
start_error:
kfree(fw);
return rv;
}
/*
* Store a byte in the current video buffer, if there is one.
*/
static inline void store_byte(struct go7007_buffer *gobuf, u8 byte)
{
if (gobuf != NULL && gobuf->bytesused < GO7007_BUF_SIZE) {
unsigned int pgidx = gobuf->offset >> PAGE_SHIFT;
unsigned int pgoff = gobuf->offset & ~PAGE_MASK;
*((u8 *)page_address(gobuf->pages[pgidx]) + pgoff) = byte;
++gobuf->offset;
++gobuf->bytesused;
}
}
/*
* Deliver the last video buffer and get a new one to start writing to.
*/
static void frame_boundary(struct go7007 *go)
{
struct go7007_buffer *gobuf;
int i;
if (go->active_buf) {
if (go->active_buf->modet_active) {
if (go->active_buf->bytesused + 216 < GO7007_BUF_SIZE) {
for (i = 0; i < 216; ++i)
store_byte(go->active_buf,
go->active_map[i]);
go->active_buf->bytesused -= 216;
} else
go->active_buf->modet_active = 0;
}
go->active_buf->state = BUF_STATE_DONE;
wake_up_interruptible(&go->frame_waitq);
go->active_buf = NULL;
}
list_for_each_entry(gobuf, &go->stream, stream)
if (gobuf->state == BUF_STATE_QUEUED) {
gobuf->seq = go->next_seq;
do_gettimeofday(&gobuf->timestamp);
go->active_buf = gobuf;
break;
}
++go->next_seq;
}
static void write_bitmap_word(struct go7007 *go)
{
int x, y, i, stride = ((go->width >> 4) + 7) >> 3;
for (i = 0; i < 16; ++i) {
y = (((go->parse_length - 1) << 3) + i) / (go->width >> 4);
x = (((go->parse_length - 1) << 3) + i) % (go->width >> 4);
if (stride * y + (x >> 3) < sizeof(go->active_map))
go->active_map[stride * y + (x >> 3)] |=
(go->modet_word & 1) << (x & 0x7);
go->modet_word >>= 1;
}
}
/*
* Parse a chunk of the video stream into frames. The frames are not
* delimited by the hardware, so we have to parse the frame boundaries
* based on the type of video stream we're receiving.
*/
void go7007_parse_video_stream(struct go7007 *go, u8 *buf, int length)
{
int i, seq_start_code = -1, frame_start_code = -1;
spin_lock(&go->spinlock);
switch (go->format) {
case GO7007_FORMAT_MPEG4:
seq_start_code = 0xB0;
frame_start_code = 0xB6;
break;
case GO7007_FORMAT_MPEG1:
case GO7007_FORMAT_MPEG2:
seq_start_code = 0xB3;
frame_start_code = 0x00;
break;
}
for (i = 0; i < length; ++i) {
if (go->active_buf != NULL &&
go->active_buf->bytesused >= GO7007_BUF_SIZE - 3) {
v4l2_info(&go->v4l2_dev, "dropping oversized frame\n");
go->active_buf->offset -= go->active_buf->bytesused;
go->active_buf->bytesused = 0;
go->active_buf->modet_active = 0;
go->active_buf = NULL;
}
switch (go->state) {
case STATE_DATA:
switch (buf[i]) {
case 0x00:
go->state = STATE_00;
break;
case 0xFF:
go->state = STATE_FF;
break;
default:
store_byte(go->active_buf, buf[i]);
break;
}
break;
case STATE_00:
switch (buf[i]) {
case 0x00:
go->state = STATE_00_00;
break;
case 0xFF:
store_byte(go->active_buf, 0x00);
go->state = STATE_FF;
break;
default:
store_byte(go->active_buf, 0x00);
store_byte(go->active_buf, buf[i]);
go->state = STATE_DATA;
break;
}
break;
case STATE_00_00:
switch (buf[i]) {
case 0x00:
store_byte(go->active_buf, 0x00);
/* go->state remains STATE_00_00 */
break;
case 0x01:
go->state = STATE_00_00_01;
break;
case 0xFF:
store_byte(go->active_buf, 0x00);
store_byte(go->active_buf, 0x00);
go->state = STATE_FF;
break;
default:
store_byte(go->active_buf, 0x00);
store_byte(go->active_buf, 0x00);
store_byte(go->active_buf, buf[i]);
go->state = STATE_DATA;
break;
}
break;
case STATE_00_00_01:
if (buf[i] == 0xF8 && go->modet_enable == 0) {
/* MODET start code, but MODET not enabled */
store_byte(go->active_buf, 0x00);
store_byte(go->active_buf, 0x00);
store_byte(go->active_buf, 0x01);
store_byte(go->active_buf, 0xF8);
go->state = STATE_DATA;
break;
}
/* If this is the start of a new MPEG frame,
* get a new buffer */
if ((go->format == GO7007_FORMAT_MPEG1 ||
go->format == GO7007_FORMAT_MPEG2 ||
go->format == GO7007_FORMAT_MPEG4) &&
(buf[i] == seq_start_code ||
buf[i] == 0xB8 || /* GOP code */
buf[i] == frame_start_code)) {
if (go->active_buf == NULL || go->seen_frame)
frame_boundary(go);
if (buf[i] == frame_start_code) {
if (go->active_buf != NULL)
go->active_buf->frame_offset =
go->active_buf->offset;
go->seen_frame = 1;
} else {
go->seen_frame = 0;
}
}
/* Handle any special chunk types, or just write the
* start code to the (potentially new) buffer */
switch (buf[i]) {
case 0xF5: /* timestamp */
go->parse_length = 12;
go->state = STATE_UNPARSED;
break;
case 0xF6: /* vbi */
go->state = STATE_VBI_LEN_A;
break;
case 0xF8: /* MD map */
go->parse_length = 0;
memset(go->active_map, 0,
sizeof(go->active_map));
go->state = STATE_MODET_MAP;
break;
case 0xFF: /* Potential JPEG start code */
store_byte(go->active_buf, 0x00);
store_byte(go->active_buf, 0x00);
store_byte(go->active_buf, 0x01);
go->state = STATE_FF;
break;
default:
store_byte(go->active_buf, 0x00);
store_byte(go->active_buf, 0x00);
store_byte(go->active_buf, 0x01);
store_byte(go->active_buf, buf[i]);
go->state = STATE_DATA;
break;
}
break;
case STATE_FF:
switch (buf[i]) {
case 0x00:
store_byte(go->active_buf, 0xFF);
go->state = STATE_00;
break;
case 0xFF:
store_byte(go->active_buf, 0xFF);
/* go->state remains STATE_FF */
break;
case 0xD8:
if (go->format == GO7007_FORMAT_MJPEG)
frame_boundary(go);
/* fall through */
default:
store_byte(go->active_buf, 0xFF);
store_byte(go->active_buf, buf[i]);
go->state = STATE_DATA;
break;
}
break;
case STATE_VBI_LEN_A:
go->parse_length = buf[i] << 8;
go->state = STATE_VBI_LEN_B;
break;
case STATE_VBI_LEN_B:
go->parse_length |= buf[i];
if (go->parse_length > 0)
go->state = STATE_UNPARSED;
else
go->state = STATE_DATA;
break;
case STATE_MODET_MAP:
if (go->parse_length < 204) {
if (go->parse_length & 1) {
go->modet_word |= buf[i];
write_bitmap_word(go);
} else
go->modet_word = buf[i] << 8;
} else if (go->parse_length == 207 && go->active_buf) {
go->active_buf->modet_active = buf[i];
}
if (++go->parse_length == 208)
go->state = STATE_DATA;
break;
case STATE_UNPARSED:
if (--go->parse_length == 0)
go->state = STATE_DATA;
break;
}
}
spin_unlock(&go->spinlock);
}
EXPORT_SYMBOL(go7007_parse_video_stream);
/*
* Allocate a new go7007 struct. Used by the hardware-specific probe.
*/
struct go7007 *go7007_alloc(struct go7007_board_info *board, struct device *dev)
{
struct go7007 *go;
int i;
go = kmalloc(sizeof(struct go7007), GFP_KERNEL);
if (go == NULL)
return NULL;
go->dev = dev;
go->board_info = board;
go->board_id = 0;
go->tuner_type = -1;
go->channel_number = 0;
go->name[0] = 0;
mutex_init(&go->hw_lock);
init_waitqueue_head(&go->frame_waitq);
spin_lock_init(&go->spinlock);
go->video_dev = NULL;
go->ref_count = 0;
go->status = STATUS_INIT;
memset(&go->i2c_adapter, 0, sizeof(go->i2c_adapter));
go->i2c_adapter_online = 0;
go->interrupt_available = 0;
init_waitqueue_head(&go->interrupt_waitq);
go->in_use = 0;
go->input = 0;
if (board->sensor_flags & GO7007_SENSOR_TV) {
go->standard = GO7007_STD_NTSC;
go->width = 720;
go->height = 480;
go->sensor_framerate = 30000;
} else {
go->standard = GO7007_STD_OTHER;
go->width = board->sensor_width;
go->height = board->sensor_height;
go->sensor_framerate = board->sensor_framerate;
}
go->encoder_v_offset = board->sensor_v_offset;
go->encoder_h_offset = board->sensor_h_offset;
go->encoder_h_halve = 0;
go->encoder_v_halve = 0;
go->encoder_subsample = 0;
go->streaming = 0;
go->format = GO7007_FORMAT_MJPEG;
go->bitrate = 1500000;
go->fps_scale = 1;
go->pali = 0;
go->aspect_ratio = GO7007_RATIO_1_1;
go->gop_size = 0;
go->ipb = 0;
go->closed_gop = 0;
go->repeat_seqhead = 0;
go->seq_header_enable = 0;
go->gop_header_enable = 0;
go->dvd_mode = 0;
go->interlace_coding = 0;
for (i = 0; i < 4; ++i)
go->modet[i].enable = 0;
for (i = 0; i < 1624; ++i)
go->modet_map[i] = 0;
go->audio_deliver = NULL;
go->audio_enabled = 0;
INIT_LIST_HEAD(&go->stream);
return go;
}
EXPORT_SYMBOL(go7007_alloc);
/*
* Detach and unregister the encoder. The go7007 struct won't be freed
* until v4l2 finishes releasing its resources and all associated fds are
* closed by applications.
*/
void go7007_remove(struct go7007 *go)
{
if (go->i2c_adapter_online) {
if (i2c_del_adapter(&go->i2c_adapter) == 0)
go->i2c_adapter_online = 0;
else
v4l2_err(&go->v4l2_dev,
"error removing I2C adapter!\n");
}
if (go->audio_enabled)
go7007_snd_remove(go);
go7007_v4l2_remove(go);
}
EXPORT_SYMBOL(go7007_remove);
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
santod/android_kernel_htc_dlxwl | drivers/net/wireless/atmel_cs.c | 5089 | 10247 | /*** -*- linux-c -*- **********************************************************
Driver for Atmel at76c502 at76c504 and at76c506 wireless cards.
Copyright 2000-2001 ATMEL Corporation.
Copyright 2003 Simon Kelley.
This code was developed from version 2.1.1 of the Atmel drivers,
released by Atmel corp. under the GPL in December 2002. It also
includes code from the Linux aironet drivers (C) Benjamin Reed,
and the Linux PCMCIA package, (C) David Hinds.
For all queries about this code, please contact the current author,
Simon Kelley <simon@thekelleys.org.uk> and not Atmel Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Atmel wireless lan drivers; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
******************************************************************************/
#ifdef __IN_PCMCIA_PACKAGE__
#include <pcmcia/k_compat.h>
#endif
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/netdevice.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
#include <pcmcia/ds.h>
#include <pcmcia/ciscode.h>
#include <asm/io.h>
#include <linux/wireless.h>
#include "atmel.h"
/*====================================================================*/
MODULE_AUTHOR("Simon Kelley");
MODULE_DESCRIPTION("Support for Atmel at76c50x 802.11 wireless ethernet cards.");
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("Atmel at76c50x PCMCIA cards");
/*====================================================================*/
static int atmel_config(struct pcmcia_device *link);
static void atmel_release(struct pcmcia_device *link);
static void atmel_detach(struct pcmcia_device *p_dev);
typedef struct local_info_t {
struct net_device *eth_dev;
} local_info_t;
static int atmel_probe(struct pcmcia_device *p_dev)
{
local_info_t *local;
dev_dbg(&p_dev->dev, "atmel_attach()\n");
/* Allocate space for private device-specific data */
local = kzalloc(sizeof(local_info_t), GFP_KERNEL);
if (!local) {
printk(KERN_ERR "atmel_cs: no memory for new device\n");
return -ENOMEM;
}
p_dev->priv = local;
return atmel_config(p_dev);
} /* atmel_attach */
static void atmel_detach(struct pcmcia_device *link)
{
dev_dbg(&link->dev, "atmel_detach\n");
atmel_release(link);
kfree(link->priv);
}
/* Call-back function to interrogate PCMCIA-specific information
about the current existence of the card */
static int card_present(void *arg)
{
struct pcmcia_device *link = (struct pcmcia_device *)arg;
if (pcmcia_dev_present(link))
return 1;
return 0;
}
static int atmel_config_check(struct pcmcia_device *p_dev, void *priv_data)
{
if (p_dev->config_index == 0)
return -EINVAL;
return pcmcia_request_io(p_dev);
}
static int atmel_config(struct pcmcia_device *link)
{
local_info_t *dev;
int ret;
const struct pcmcia_device_id *did;
dev = link->priv;
did = dev_get_drvdata(&link->dev);
dev_dbg(&link->dev, "atmel_config\n");
link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_VPP |
CONF_AUTO_AUDIO | CONF_AUTO_SET_IO;
if (pcmcia_loop_config(link, atmel_config_check, NULL))
goto failed;
if (!link->irq) {
dev_err(&link->dev, "atmel: cannot assign IRQ: check that CONFIG_ISA is set in kernel config.");
goto failed;
}
ret = pcmcia_enable_device(link);
if (ret)
goto failed;
((local_info_t*)link->priv)->eth_dev =
init_atmel_card(link->irq,
link->resource[0]->start,
did ? did->driver_info : ATMEL_FW_TYPE_NONE,
&link->dev,
card_present,
link);
if (!((local_info_t*)link->priv)->eth_dev)
goto failed;
return 0;
failed:
atmel_release(link);
return -ENODEV;
}
static void atmel_release(struct pcmcia_device *link)
{
struct net_device *dev = ((local_info_t*)link->priv)->eth_dev;
dev_dbg(&link->dev, "atmel_release\n");
if (dev)
stop_atmel_card(dev);
((local_info_t*)link->priv)->eth_dev = NULL;
pcmcia_disable_device(link);
}
static int atmel_suspend(struct pcmcia_device *link)
{
local_info_t *local = link->priv;
netif_device_detach(local->eth_dev);
return 0;
}
static int atmel_resume(struct pcmcia_device *link)
{
local_info_t *local = link->priv;
atmel_open(local->eth_dev);
netif_device_attach(local->eth_dev);
return 0;
}
/*====================================================================*/
/* We use the driver_info field to store the correct firmware type for a card. */
#define PCMCIA_DEVICE_MANF_CARD_INFO(manf, card, info) { \
.match_flags = PCMCIA_DEV_ID_MATCH_MANF_ID| \
PCMCIA_DEV_ID_MATCH_CARD_ID, \
.manf_id = (manf), \
.card_id = (card), \
.driver_info = (kernel_ulong_t)(info), }
#define PCMCIA_DEVICE_PROD_ID12_INFO(v1, v2, vh1, vh2, info) { \
.match_flags = PCMCIA_DEV_ID_MATCH_PROD_ID1| \
PCMCIA_DEV_ID_MATCH_PROD_ID2, \
.prod_id = { (v1), (v2), NULL, NULL }, \
.prod_id_hash = { (vh1), (vh2), 0, 0 }, \
.driver_info = (kernel_ulong_t)(info), }
static const struct pcmcia_device_id atmel_ids[] = {
PCMCIA_DEVICE_MANF_CARD_INFO(0x0101, 0x0620, ATMEL_FW_TYPE_502_3COM),
PCMCIA_DEVICE_MANF_CARD_INFO(0x0101, 0x0696, ATMEL_FW_TYPE_502_3COM),
PCMCIA_DEVICE_MANF_CARD_INFO(0x01bf, 0x3302, ATMEL_FW_TYPE_502E),
PCMCIA_DEVICE_MANF_CARD_INFO(0xd601, 0x0007, ATMEL_FW_TYPE_502),
PCMCIA_DEVICE_PROD_ID12_INFO("11WAVE", "11WP611AL-E", 0x9eb2da1f, 0xc9a0d3f9, ATMEL_FW_TYPE_502E),
PCMCIA_DEVICE_PROD_ID12_INFO("ATMEL", "AT76C502AR", 0xabda4164, 0x41b37e1f, ATMEL_FW_TYPE_502),
PCMCIA_DEVICE_PROD_ID12_INFO("ATMEL", "AT76C502AR_D", 0xabda4164, 0x3675d704, ATMEL_FW_TYPE_502D),
PCMCIA_DEVICE_PROD_ID12_INFO("ATMEL", "AT76C502AR_E", 0xabda4164, 0x4172e792, ATMEL_FW_TYPE_502E),
PCMCIA_DEVICE_PROD_ID12_INFO("ATMEL", "AT76C504_R", 0xabda4164, 0x917f3d72, ATMEL_FW_TYPE_504_2958),
PCMCIA_DEVICE_PROD_ID12_INFO("ATMEL", "AT76C504", 0xabda4164, 0x5040670a, ATMEL_FW_TYPE_504),
PCMCIA_DEVICE_PROD_ID12_INFO("ATMEL", "AT76C504A", 0xabda4164, 0xe15ed87f, ATMEL_FW_TYPE_504A_2958),
PCMCIA_DEVICE_PROD_ID12_INFO("BT", "Voyager 1020 Laptop Adapter", 0xae49b86a, 0x1e957cd5, ATMEL_FW_TYPE_502),
PCMCIA_DEVICE_PROD_ID12_INFO("CNet", "CNWLC 11Mbps Wireless PC Card V-5", 0xbc477dde, 0x502fae6b, ATMEL_FW_TYPE_502E),
PCMCIA_DEVICE_PROD_ID12_INFO("IEEE 802.11b", "Wireless LAN PC Card", 0x5b878724, 0x122f1df6, ATMEL_FW_TYPE_502),
PCMCIA_DEVICE_PROD_ID12_INFO("IEEE 802.11b", "Wireless LAN Card S", 0x5b878724, 0x5fba533a, ATMEL_FW_TYPE_504_2958),
PCMCIA_DEVICE_PROD_ID12_INFO("OEM", "11Mbps Wireless LAN PC Card V-3", 0xfea54c90, 0x1c5b0f68, ATMEL_FW_TYPE_502),
PCMCIA_DEVICE_PROD_ID12_INFO("SMC", "2632W", 0xc4f8b18b, 0x30f38774, ATMEL_FW_TYPE_502D),
PCMCIA_DEVICE_PROD_ID12_INFO("SMC", "2632W-V2", 0xc4f8b18b, 0x172d1377, ATMEL_FW_TYPE_502),
PCMCIA_DEVICE_PROD_ID12_INFO("Wireless", "PC_CARD", 0xa407ecdd, 0x119f6314, ATMEL_FW_TYPE_502D),
PCMCIA_DEVICE_PROD_ID12_INFO("WLAN", "802.11b PC CARD", 0x575c516c, 0xb1f6dbc4, ATMEL_FW_TYPE_502D),
PCMCIA_DEVICE_PROD_ID12_INFO("LG", "LW2100N", 0xb474d43a, 0x6b1fec94, ATMEL_FW_TYPE_502E),
PCMCIA_DEVICE_NULL
};
MODULE_DEVICE_TABLE(pcmcia, atmel_ids);
static struct pcmcia_driver atmel_driver = {
.owner = THIS_MODULE,
.name = "atmel_cs",
.probe = atmel_probe,
.remove = atmel_detach,
.id_table = atmel_ids,
.suspend = atmel_suspend,
.resume = atmel_resume,
};
static int __init atmel_cs_init(void)
{
return pcmcia_register_driver(&atmel_driver);
}
static void __exit atmel_cs_cleanup(void)
{
pcmcia_unregister_driver(&atmel_driver);
}
/*
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
In addition:
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
module_init(atmel_cs_init);
module_exit(atmel_cs_cleanup);
| gpl-2.0 |
dreikk91/android_kernel_motorola_omap4-common | drivers/infiniband/hw/qib/qib_mr.c | 8161 | 11915 | /*
* Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <rdma/ib_umem.h>
#include <rdma/ib_smi.h>
#include "qib.h"
/* Fast memory region */
struct qib_fmr {
struct ib_fmr ibfmr;
struct qib_mregion mr; /* must be last */
};
static inline struct qib_fmr *to_ifmr(struct ib_fmr *ibfmr)
{
return container_of(ibfmr, struct qib_fmr, ibfmr);
}
/**
* qib_get_dma_mr - get a DMA memory region
* @pd: protection domain for this memory region
* @acc: access flags
*
* Returns the memory region on success, otherwise returns an errno.
* Note that all DMA addresses should be created via the
* struct ib_dma_mapping_ops functions (see qib_dma.c).
*/
struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc)
{
struct qib_ibdev *dev = to_idev(pd->device);
struct qib_mr *mr;
struct ib_mr *ret;
unsigned long flags;
if (to_ipd(pd)->user) {
ret = ERR_PTR(-EPERM);
goto bail;
}
mr = kzalloc(sizeof *mr, GFP_KERNEL);
if (!mr) {
ret = ERR_PTR(-ENOMEM);
goto bail;
}
mr->mr.access_flags = acc;
atomic_set(&mr->mr.refcount, 0);
spin_lock_irqsave(&dev->lk_table.lock, flags);
if (!dev->dma_mr)
dev->dma_mr = &mr->mr;
spin_unlock_irqrestore(&dev->lk_table.lock, flags);
ret = &mr->ibmr;
bail:
return ret;
}
static struct qib_mr *alloc_mr(int count, struct qib_lkey_table *lk_table)
{
struct qib_mr *mr;
int m, i = 0;
/* Allocate struct plus pointers to first level page tables. */
m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
mr = kmalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL);
if (!mr)
goto done;
/* Allocate first level page tables. */
for (; i < m; i++) {
mr->mr.map[i] = kmalloc(sizeof *mr->mr.map[0], GFP_KERNEL);
if (!mr->mr.map[i])
goto bail;
}
mr->mr.mapsz = m;
mr->mr.page_shift = 0;
mr->mr.max_segs = count;
/*
* ib_reg_phys_mr() will initialize mr->ibmr except for
* lkey and rkey.
*/
if (!qib_alloc_lkey(lk_table, &mr->mr))
goto bail;
mr->ibmr.lkey = mr->mr.lkey;
mr->ibmr.rkey = mr->mr.lkey;
atomic_set(&mr->mr.refcount, 0);
goto done;
bail:
while (i)
kfree(mr->mr.map[--i]);
kfree(mr);
mr = NULL;
done:
return mr;
}
/**
* qib_reg_phys_mr - register a physical memory region
* @pd: protection domain for this memory region
* @buffer_list: pointer to the list of physical buffers to register
* @num_phys_buf: the number of physical buffers to register
* @iova_start: the starting address passed over IB which maps to this MR
*
* Returns the memory region on success, otherwise returns an errno.
*/
struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd,
struct ib_phys_buf *buffer_list,
int num_phys_buf, int acc, u64 *iova_start)
{
struct qib_mr *mr;
int n, m, i;
struct ib_mr *ret;
mr = alloc_mr(num_phys_buf, &to_idev(pd->device)->lk_table);
if (mr == NULL) {
ret = ERR_PTR(-ENOMEM);
goto bail;
}
mr->mr.pd = pd;
mr->mr.user_base = *iova_start;
mr->mr.iova = *iova_start;
mr->mr.length = 0;
mr->mr.offset = 0;
mr->mr.access_flags = acc;
mr->umem = NULL;
m = 0;
n = 0;
for (i = 0; i < num_phys_buf; i++) {
mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr;
mr->mr.map[m]->segs[n].length = buffer_list[i].size;
mr->mr.length += buffer_list[i].size;
n++;
if (n == QIB_SEGSZ) {
m++;
n = 0;
}
}
ret = &mr->ibmr;
bail:
return ret;
}
/**
* qib_reg_user_mr - register a userspace memory region
* @pd: protection domain for this memory region
* @start: starting userspace address
* @length: length of region to register
* @virt_addr: virtual address to use (from HCA's point of view)
* @mr_access_flags: access flags for this memory region
* @udata: unused by the QLogic_IB driver
*
* Returns the memory region on success, otherwise returns an errno.
*/
struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
struct ib_udata *udata)
{
struct qib_mr *mr;
struct ib_umem *umem;
struct ib_umem_chunk *chunk;
int n, m, i;
struct ib_mr *ret;
if (length == 0) {
ret = ERR_PTR(-EINVAL);
goto bail;
}
umem = ib_umem_get(pd->uobject->context, start, length,
mr_access_flags, 0);
if (IS_ERR(umem))
return (void *) umem;
n = 0;
list_for_each_entry(chunk, &umem->chunk_list, list)
n += chunk->nents;
mr = alloc_mr(n, &to_idev(pd->device)->lk_table);
if (!mr) {
ret = ERR_PTR(-ENOMEM);
ib_umem_release(umem);
goto bail;
}
mr->mr.pd = pd;
mr->mr.user_base = start;
mr->mr.iova = virt_addr;
mr->mr.length = length;
mr->mr.offset = umem->offset;
mr->mr.access_flags = mr_access_flags;
mr->umem = umem;
if (is_power_of_2(umem->page_size))
mr->mr.page_shift = ilog2(umem->page_size);
m = 0;
n = 0;
list_for_each_entry(chunk, &umem->chunk_list, list) {
for (i = 0; i < chunk->nents; i++) {
void *vaddr;
vaddr = page_address(sg_page(&chunk->page_list[i]));
if (!vaddr) {
ret = ERR_PTR(-EINVAL);
goto bail;
}
mr->mr.map[m]->segs[n].vaddr = vaddr;
mr->mr.map[m]->segs[n].length = umem->page_size;
n++;
if (n == QIB_SEGSZ) {
m++;
n = 0;
}
}
}
ret = &mr->ibmr;
bail:
return ret;
}
/**
* qib_dereg_mr - unregister and free a memory region
* @ibmr: the memory region to free
*
* Returns 0 on success.
*
* Note that this is called to free MRs created by qib_get_dma_mr()
* or qib_reg_user_mr().
*/
int qib_dereg_mr(struct ib_mr *ibmr)
{
struct qib_mr *mr = to_imr(ibmr);
struct qib_ibdev *dev = to_idev(ibmr->device);
int ret;
int i;
ret = qib_free_lkey(dev, &mr->mr);
if (ret)
return ret;
i = mr->mr.mapsz;
while (i)
kfree(mr->mr.map[--i]);
if (mr->umem)
ib_umem_release(mr->umem);
kfree(mr);
return 0;
}
/*
* Allocate a memory region usable with the
* IB_WR_FAST_REG_MR send work request.
*
* Return the memory region on success, otherwise return an errno.
*/
struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
{
struct qib_mr *mr;
mr = alloc_mr(max_page_list_len, &to_idev(pd->device)->lk_table);
if (mr == NULL)
return ERR_PTR(-ENOMEM);
mr->mr.pd = pd;
mr->mr.user_base = 0;
mr->mr.iova = 0;
mr->mr.length = 0;
mr->mr.offset = 0;
mr->mr.access_flags = 0;
mr->umem = NULL;
return &mr->ibmr;
}
struct ib_fast_reg_page_list *
qib_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len)
{
unsigned size = page_list_len * sizeof(u64);
struct ib_fast_reg_page_list *pl;
if (size > PAGE_SIZE)
return ERR_PTR(-EINVAL);
pl = kmalloc(sizeof *pl, GFP_KERNEL);
if (!pl)
return ERR_PTR(-ENOMEM);
pl->page_list = kmalloc(size, GFP_KERNEL);
if (!pl->page_list)
goto err_free;
return pl;
err_free:
kfree(pl);
return ERR_PTR(-ENOMEM);
}
void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl)
{
kfree(pl->page_list);
kfree(pl);
}
/**
* qib_alloc_fmr - allocate a fast memory region
* @pd: the protection domain for this memory region
* @mr_access_flags: access flags for this memory region
* @fmr_attr: fast memory region attributes
*
* Returns the memory region on success, otherwise returns an errno.
*/
struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
struct ib_fmr_attr *fmr_attr)
{
struct qib_fmr *fmr;
int m, i = 0;
struct ib_fmr *ret;
/* Allocate struct plus pointers to first level page tables. */
m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ;
fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
if (!fmr)
goto bail;
/* Allocate first level page tables. */
for (; i < m; i++) {
fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0],
GFP_KERNEL);
if (!fmr->mr.map[i])
goto bail;
}
fmr->mr.mapsz = m;
/*
* ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
* rkey.
*/
if (!qib_alloc_lkey(&to_idev(pd->device)->lk_table, &fmr->mr))
goto bail;
fmr->ibfmr.rkey = fmr->mr.lkey;
fmr->ibfmr.lkey = fmr->mr.lkey;
/*
* Resources are allocated but no valid mapping (RKEY can't be
* used).
*/
fmr->mr.pd = pd;
fmr->mr.user_base = 0;
fmr->mr.iova = 0;
fmr->mr.length = 0;
fmr->mr.offset = 0;
fmr->mr.access_flags = mr_access_flags;
fmr->mr.max_segs = fmr_attr->max_pages;
fmr->mr.page_shift = fmr_attr->page_shift;
atomic_set(&fmr->mr.refcount, 0);
ret = &fmr->ibfmr;
goto done;
bail:
while (i)
kfree(fmr->mr.map[--i]);
kfree(fmr);
ret = ERR_PTR(-ENOMEM);
done:
return ret;
}
/**
* qib_map_phys_fmr - set up a fast memory region
* @ibmfr: the fast memory region to set up
* @page_list: the list of pages to associate with the fast memory region
* @list_len: the number of pages to associate with the fast memory region
* @iova: the virtual address of the start of the fast memory region
*
* This may be called from interrupt context.
*/
int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
int list_len, u64 iova)
{
struct qib_fmr *fmr = to_ifmr(ibfmr);
struct qib_lkey_table *rkt;
unsigned long flags;
int m, n, i;
u32 ps;
int ret;
if (atomic_read(&fmr->mr.refcount))
return -EBUSY;
if (list_len > fmr->mr.max_segs) {
ret = -EINVAL;
goto bail;
}
rkt = &to_idev(ibfmr->device)->lk_table;
spin_lock_irqsave(&rkt->lock, flags);
fmr->mr.user_base = iova;
fmr->mr.iova = iova;
ps = 1 << fmr->mr.page_shift;
fmr->mr.length = list_len * ps;
m = 0;
n = 0;
for (i = 0; i < list_len; i++) {
fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
fmr->mr.map[m]->segs[n].length = ps;
if (++n == QIB_SEGSZ) {
m++;
n = 0;
}
}
spin_unlock_irqrestore(&rkt->lock, flags);
ret = 0;
bail:
return ret;
}
/**
* qib_unmap_fmr - unmap fast memory regions
* @fmr_list: the list of fast memory regions to unmap
*
* Returns 0 on success.
*/
int qib_unmap_fmr(struct list_head *fmr_list)
{
struct qib_fmr *fmr;
struct qib_lkey_table *rkt;
unsigned long flags;
list_for_each_entry(fmr, fmr_list, ibfmr.list) {
rkt = &to_idev(fmr->ibfmr.device)->lk_table;
spin_lock_irqsave(&rkt->lock, flags);
fmr->mr.user_base = 0;
fmr->mr.iova = 0;
fmr->mr.length = 0;
spin_unlock_irqrestore(&rkt->lock, flags);
}
return 0;
}
/**
* qib_dealloc_fmr - deallocate a fast memory region
* @ibfmr: the fast memory region to deallocate
*
* Returns 0 on success.
*/
int qib_dealloc_fmr(struct ib_fmr *ibfmr)
{
struct qib_fmr *fmr = to_ifmr(ibfmr);
int ret;
int i;
ret = qib_free_lkey(to_idev(ibfmr->device), &fmr->mr);
if (ret)
return ret;
i = fmr->mr.mapsz;
while (i)
kfree(fmr->mr.map[--i]);
kfree(fmr);
return 0;
}
| gpl-2.0 |
Andorreta/android_kernel_google_msm | drivers/infiniband/hw/qib/qib_mr.c | 8161 | 11915 | /*
* Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <rdma/ib_umem.h>
#include <rdma/ib_smi.h>
#include "qib.h"
/* Fast memory region */
struct qib_fmr {
struct ib_fmr ibfmr;
struct qib_mregion mr; /* must be last */
};
static inline struct qib_fmr *to_ifmr(struct ib_fmr *ibfmr)
{
return container_of(ibfmr, struct qib_fmr, ibfmr);
}
/**
* qib_get_dma_mr - get a DMA memory region
* @pd: protection domain for this memory region
* @acc: access flags
*
* Returns the memory region on success, otherwise returns an errno.
* Note that all DMA addresses should be created via the
* struct ib_dma_mapping_ops functions (see qib_dma.c).
*/
struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc)
{
struct qib_ibdev *dev = to_idev(pd->device);
struct qib_mr *mr;
struct ib_mr *ret;
unsigned long flags;
if (to_ipd(pd)->user) {
ret = ERR_PTR(-EPERM);
goto bail;
}
mr = kzalloc(sizeof *mr, GFP_KERNEL);
if (!mr) {
ret = ERR_PTR(-ENOMEM);
goto bail;
}
mr->mr.access_flags = acc;
atomic_set(&mr->mr.refcount, 0);
spin_lock_irqsave(&dev->lk_table.lock, flags);
if (!dev->dma_mr)
dev->dma_mr = &mr->mr;
spin_unlock_irqrestore(&dev->lk_table.lock, flags);
ret = &mr->ibmr;
bail:
return ret;
}
static struct qib_mr *alloc_mr(int count, struct qib_lkey_table *lk_table)
{
struct qib_mr *mr;
int m, i = 0;
/* Allocate struct plus pointers to first level page tables. */
m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
mr = kmalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL);
if (!mr)
goto done;
/* Allocate first level page tables. */
for (; i < m; i++) {
mr->mr.map[i] = kmalloc(sizeof *mr->mr.map[0], GFP_KERNEL);
if (!mr->mr.map[i])
goto bail;
}
mr->mr.mapsz = m;
mr->mr.page_shift = 0;
mr->mr.max_segs = count;
/*
* ib_reg_phys_mr() will initialize mr->ibmr except for
* lkey and rkey.
*/
if (!qib_alloc_lkey(lk_table, &mr->mr))
goto bail;
mr->ibmr.lkey = mr->mr.lkey;
mr->ibmr.rkey = mr->mr.lkey;
atomic_set(&mr->mr.refcount, 0);
goto done;
bail:
while (i)
kfree(mr->mr.map[--i]);
kfree(mr);
mr = NULL;
done:
return mr;
}
/**
* qib_reg_phys_mr - register a physical memory region
* @pd: protection domain for this memory region
* @buffer_list: pointer to the list of physical buffers to register
* @num_phys_buf: the number of physical buffers to register
* @iova_start: the starting address passed over IB which maps to this MR
*
* Returns the memory region on success, otherwise returns an errno.
*/
struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd,
struct ib_phys_buf *buffer_list,
int num_phys_buf, int acc, u64 *iova_start)
{
struct qib_mr *mr;
int n, m, i;
struct ib_mr *ret;
mr = alloc_mr(num_phys_buf, &to_idev(pd->device)->lk_table);
if (mr == NULL) {
ret = ERR_PTR(-ENOMEM);
goto bail;
}
mr->mr.pd = pd;
mr->mr.user_base = *iova_start;
mr->mr.iova = *iova_start;
mr->mr.length = 0;
mr->mr.offset = 0;
mr->mr.access_flags = acc;
mr->umem = NULL;
m = 0;
n = 0;
for (i = 0; i < num_phys_buf; i++) {
mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr;
mr->mr.map[m]->segs[n].length = buffer_list[i].size;
mr->mr.length += buffer_list[i].size;
n++;
if (n == QIB_SEGSZ) {
m++;
n = 0;
}
}
ret = &mr->ibmr;
bail:
return ret;
}
/**
* qib_reg_user_mr - register a userspace memory region
* @pd: protection domain for this memory region
* @start: starting userspace address
* @length: length of region to register
* @virt_addr: virtual address to use (from HCA's point of view)
* @mr_access_flags: access flags for this memory region
* @udata: unused by the QLogic_IB driver
*
* Returns the memory region on success, otherwise returns an errno.
*/
struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
struct ib_udata *udata)
{
struct qib_mr *mr;
struct ib_umem *umem;
struct ib_umem_chunk *chunk;
int n, m, i;
struct ib_mr *ret;
if (length == 0) {
ret = ERR_PTR(-EINVAL);
goto bail;
}
umem = ib_umem_get(pd->uobject->context, start, length,
mr_access_flags, 0);
if (IS_ERR(umem))
return (void *) umem;
n = 0;
list_for_each_entry(chunk, &umem->chunk_list, list)
n += chunk->nents;
mr = alloc_mr(n, &to_idev(pd->device)->lk_table);
if (!mr) {
ret = ERR_PTR(-ENOMEM);
ib_umem_release(umem);
goto bail;
}
mr->mr.pd = pd;
mr->mr.user_base = start;
mr->mr.iova = virt_addr;
mr->mr.length = length;
mr->mr.offset = umem->offset;
mr->mr.access_flags = mr_access_flags;
mr->umem = umem;
if (is_power_of_2(umem->page_size))
mr->mr.page_shift = ilog2(umem->page_size);
m = 0;
n = 0;
list_for_each_entry(chunk, &umem->chunk_list, list) {
for (i = 0; i < chunk->nents; i++) {
void *vaddr;
vaddr = page_address(sg_page(&chunk->page_list[i]));
if (!vaddr) {
ret = ERR_PTR(-EINVAL);
goto bail;
}
mr->mr.map[m]->segs[n].vaddr = vaddr;
mr->mr.map[m]->segs[n].length = umem->page_size;
n++;
if (n == QIB_SEGSZ) {
m++;
n = 0;
}
}
}
ret = &mr->ibmr;
bail:
return ret;
}
/**
* qib_dereg_mr - unregister and free a memory region
* @ibmr: the memory region to free
*
* Returns 0 on success.
*
* Note that this is called to free MRs created by qib_get_dma_mr()
* or qib_reg_user_mr().
*/
int qib_dereg_mr(struct ib_mr *ibmr)
{
struct qib_mr *mr = to_imr(ibmr);
struct qib_ibdev *dev = to_idev(ibmr->device);
int ret;
int i;
ret = qib_free_lkey(dev, &mr->mr);
if (ret)
return ret;
i = mr->mr.mapsz;
while (i)
kfree(mr->mr.map[--i]);
if (mr->umem)
ib_umem_release(mr->umem);
kfree(mr);
return 0;
}
/*
* Allocate a memory region usable with the
* IB_WR_FAST_REG_MR send work request.
*
* Return the memory region on success, otherwise return an errno.
*/
struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
{
struct qib_mr *mr;
mr = alloc_mr(max_page_list_len, &to_idev(pd->device)->lk_table);
if (mr == NULL)
return ERR_PTR(-ENOMEM);
mr->mr.pd = pd;
mr->mr.user_base = 0;
mr->mr.iova = 0;
mr->mr.length = 0;
mr->mr.offset = 0;
mr->mr.access_flags = 0;
mr->umem = NULL;
return &mr->ibmr;
}
struct ib_fast_reg_page_list *
qib_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len)
{
unsigned size = page_list_len * sizeof(u64);
struct ib_fast_reg_page_list *pl;
if (size > PAGE_SIZE)
return ERR_PTR(-EINVAL);
pl = kmalloc(sizeof *pl, GFP_KERNEL);
if (!pl)
return ERR_PTR(-ENOMEM);
pl->page_list = kmalloc(size, GFP_KERNEL);
if (!pl->page_list)
goto err_free;
return pl;
err_free:
kfree(pl);
return ERR_PTR(-ENOMEM);
}
void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl)
{
kfree(pl->page_list);
kfree(pl);
}
/**
* qib_alloc_fmr - allocate a fast memory region
* @pd: the protection domain for this memory region
* @mr_access_flags: access flags for this memory region
* @fmr_attr: fast memory region attributes
*
* Returns the memory region on success, otherwise returns an errno.
*/
struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
struct ib_fmr_attr *fmr_attr)
{
struct qib_fmr *fmr;
int m, i = 0;
struct ib_fmr *ret;
/* Allocate struct plus pointers to first level page tables. */
m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ;
fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
if (!fmr)
goto bail;
/* Allocate first level page tables. */
for (; i < m; i++) {
fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0],
GFP_KERNEL);
if (!fmr->mr.map[i])
goto bail;
}
fmr->mr.mapsz = m;
/*
* ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
* rkey.
*/
if (!qib_alloc_lkey(&to_idev(pd->device)->lk_table, &fmr->mr))
goto bail;
fmr->ibfmr.rkey = fmr->mr.lkey;
fmr->ibfmr.lkey = fmr->mr.lkey;
/*
* Resources are allocated but no valid mapping (RKEY can't be
* used).
*/
fmr->mr.pd = pd;
fmr->mr.user_base = 0;
fmr->mr.iova = 0;
fmr->mr.length = 0;
fmr->mr.offset = 0;
fmr->mr.access_flags = mr_access_flags;
fmr->mr.max_segs = fmr_attr->max_pages;
fmr->mr.page_shift = fmr_attr->page_shift;
atomic_set(&fmr->mr.refcount, 0);
ret = &fmr->ibfmr;
goto done;
bail:
while (i)
kfree(fmr->mr.map[--i]);
kfree(fmr);
ret = ERR_PTR(-ENOMEM);
done:
return ret;
}
/**
* qib_map_phys_fmr - set up a fast memory region
* @ibmfr: the fast memory region to set up
* @page_list: the list of pages to associate with the fast memory region
* @list_len: the number of pages to associate with the fast memory region
* @iova: the virtual address of the start of the fast memory region
*
* This may be called from interrupt context.
*/
int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
int list_len, u64 iova)
{
struct qib_fmr *fmr = to_ifmr(ibfmr);
struct qib_lkey_table *rkt;
unsigned long flags;
int m, n, i;
u32 ps;
int ret;
if (atomic_read(&fmr->mr.refcount))
return -EBUSY;
if (list_len > fmr->mr.max_segs) {
ret = -EINVAL;
goto bail;
}
rkt = &to_idev(ibfmr->device)->lk_table;
spin_lock_irqsave(&rkt->lock, flags);
fmr->mr.user_base = iova;
fmr->mr.iova = iova;
ps = 1 << fmr->mr.page_shift;
fmr->mr.length = list_len * ps;
m = 0;
n = 0;
for (i = 0; i < list_len; i++) {
fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
fmr->mr.map[m]->segs[n].length = ps;
if (++n == QIB_SEGSZ) {
m++;
n = 0;
}
}
spin_unlock_irqrestore(&rkt->lock, flags);
ret = 0;
bail:
return ret;
}
/**
* qib_unmap_fmr - unmap fast memory regions
* @fmr_list: the list of fast memory regions to unmap
*
* Returns 0 on success.
*/
int qib_unmap_fmr(struct list_head *fmr_list)
{
struct qib_fmr *fmr;
struct qib_lkey_table *rkt;
unsigned long flags;
list_for_each_entry(fmr, fmr_list, ibfmr.list) {
rkt = &to_idev(fmr->ibfmr.device)->lk_table;
spin_lock_irqsave(&rkt->lock, flags);
fmr->mr.user_base = 0;
fmr->mr.iova = 0;
fmr->mr.length = 0;
spin_unlock_irqrestore(&rkt->lock, flags);
}
return 0;
}
/**
* qib_dealloc_fmr - deallocate a fast memory region
* @ibfmr: the fast memory region to deallocate
*
* Returns 0 on success.
*/
int qib_dealloc_fmr(struct ib_fmr *ibfmr)
{
struct qib_fmr *fmr = to_ifmr(ibfmr);
int ret;
int i;
ret = qib_free_lkey(to_idev(ibfmr->device), &fmr->mr);
if (ret)
return ret;
i = fmr->mr.mapsz;
while (i)
kfree(fmr->mr.map[--i]);
kfree(fmr);
return 0;
}
| gpl-2.0 |
Metallice/GTab2-Kernel-TW | kernel/trace/trace_branch.c | 8929 | 9277 | /*
* unlikely profiler
*
* Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
*/
#include <linux/kallsyms.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/irqflags.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/ftrace.h>
#include <linux/hash.h>
#include <linux/fs.h>
#include <asm/local.h>
#include "trace.h"
#include "trace_stat.h"
#include "trace_output.h"
#ifdef CONFIG_BRANCH_TRACER
static struct tracer branch_trace;
static int branch_tracing_enabled __read_mostly;
static DEFINE_MUTEX(branch_tracing_mutex);
static struct trace_array *branch_tracer;
static void
probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
{
struct ftrace_event_call *call = &event_branch;
struct trace_array *tr = branch_tracer;
struct ring_buffer_event *event;
struct trace_branch *entry;
struct ring_buffer *buffer;
unsigned long flags;
int cpu, pc;
const char *p;
/*
* I would love to save just the ftrace_likely_data pointer, but
* this code can also be used by modules. Ugly things can happen
* if the module is unloaded, and then we go and read the
* pointer. This is slower, but much safer.
*/
if (unlikely(!tr))
return;
local_irq_save(flags);
cpu = raw_smp_processor_id();
if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
goto out;
pc = preempt_count();
buffer = tr->buffer;
event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH,
sizeof(*entry), flags, pc);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
/* Strip off the path, only save the file */
p = f->file + strlen(f->file);
while (p >= f->file && *p != '/')
p--;
p++;
strncpy(entry->func, f->func, TRACE_FUNC_SIZE);
strncpy(entry->file, p, TRACE_FILE_SIZE);
entry->func[TRACE_FUNC_SIZE] = 0;
entry->file[TRACE_FILE_SIZE] = 0;
entry->line = f->line;
entry->correct = val == expect;
if (!filter_check_discard(call, entry, buffer, event))
ring_buffer_unlock_commit(buffer, event);
out:
atomic_dec(&tr->data[cpu]->disabled);
local_irq_restore(flags);
}
static inline
void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
{
if (!branch_tracing_enabled)
return;
probe_likely_condition(f, val, expect);
}
int enable_branch_tracing(struct trace_array *tr)
{
mutex_lock(&branch_tracing_mutex);
branch_tracer = tr;
/*
* Must be seen before enabling. The reader is a condition
* where we do not need a matching rmb()
*/
smp_wmb();
branch_tracing_enabled++;
mutex_unlock(&branch_tracing_mutex);
return 0;
}
void disable_branch_tracing(void)
{
mutex_lock(&branch_tracing_mutex);
if (!branch_tracing_enabled)
goto out_unlock;
branch_tracing_enabled--;
out_unlock:
mutex_unlock(&branch_tracing_mutex);
}
static void start_branch_trace(struct trace_array *tr)
{
enable_branch_tracing(tr);
}
static void stop_branch_trace(struct trace_array *tr)
{
disable_branch_tracing();
}
static int branch_trace_init(struct trace_array *tr)
{
start_branch_trace(tr);
return 0;
}
static void branch_trace_reset(struct trace_array *tr)
{
stop_branch_trace(tr);
}
static enum print_line_t trace_branch_print(struct trace_iterator *iter,
int flags, struct trace_event *event)
{
struct trace_branch *field;
trace_assign_type(field, iter->ent);
if (trace_seq_printf(&iter->seq, "[%s] %s:%s:%d\n",
field->correct ? " ok " : " MISS ",
field->func,
field->file,
field->line))
return TRACE_TYPE_PARTIAL_LINE;
return TRACE_TYPE_HANDLED;
}
static void branch_print_header(struct seq_file *s)
{
seq_puts(s, "# TASK-PID CPU# TIMESTAMP CORRECT"
" FUNC:FILE:LINE\n");
seq_puts(s, "# | | | | | "
" |\n");
}
static struct trace_event_functions trace_branch_funcs = {
.trace = trace_branch_print,
};
static struct trace_event trace_branch_event = {
.type = TRACE_BRANCH,
.funcs = &trace_branch_funcs,
};
static struct tracer branch_trace __read_mostly =
{
.name = "branch",
.init = branch_trace_init,
.reset = branch_trace_reset,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_branch,
#endif /* CONFIG_FTRACE_SELFTEST */
.print_header = branch_print_header,
};
__init static int init_branch_tracer(void)
{
int ret;
ret = register_ftrace_event(&trace_branch_event);
if (!ret) {
printk(KERN_WARNING "Warning: could not register "
"branch events\n");
return 1;
}
return register_tracer(&branch_trace);
}
device_initcall(init_branch_tracer);
#else
static inline
void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
{
}
#endif /* CONFIG_BRANCH_TRACER */
void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect)
{
/*
* I would love to have a trace point here instead, but the
* trace point code is so inundated with unlikely and likely
* conditions that the recursive nightmare that exists is too
* much to try to get working. At least for now.
*/
trace_likely_condition(f, val, expect);
/* FIXME: Make this atomic! */
if (val == expect)
f->correct++;
else
f->incorrect++;
}
EXPORT_SYMBOL(ftrace_likely_update);
extern unsigned long __start_annotated_branch_profile[];
extern unsigned long __stop_annotated_branch_profile[];
static int annotated_branch_stat_headers(struct seq_file *m)
{
seq_printf(m, " correct incorrect %% ");
seq_printf(m, " Function "
" File Line\n"
" ------- --------- - "
" -------- "
" ---- ----\n");
return 0;
}
static inline long get_incorrect_percent(struct ftrace_branch_data *p)
{
long percent;
if (p->correct) {
percent = p->incorrect * 100;
percent /= p->correct + p->incorrect;
} else
percent = p->incorrect ? 100 : -1;
return percent;
}
static int branch_stat_show(struct seq_file *m, void *v)
{
struct ftrace_branch_data *p = v;
const char *f;
long percent;
/* Only print the file, not the path */
f = p->file + strlen(p->file);
while (f >= p->file && *f != '/')
f--;
f++;
/*
* The miss is overlayed on correct, and hit on incorrect.
*/
percent = get_incorrect_percent(p);
seq_printf(m, "%8lu %8lu ", p->correct, p->incorrect);
if (percent < 0)
seq_printf(m, " X ");
else
seq_printf(m, "%3ld ", percent);
seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line);
return 0;
}
static void *annotated_branch_stat_start(struct tracer_stat *trace)
{
return __start_annotated_branch_profile;
}
static void *
annotated_branch_stat_next(void *v, int idx)
{
struct ftrace_branch_data *p = v;
++p;
if ((void *)p >= (void *)__stop_annotated_branch_profile)
return NULL;
return p;
}
static int annotated_branch_stat_cmp(void *p1, void *p2)
{
struct ftrace_branch_data *a = p1;
struct ftrace_branch_data *b = p2;
long percent_a, percent_b;
percent_a = get_incorrect_percent(a);
percent_b = get_incorrect_percent(b);
if (percent_a < percent_b)
return -1;
if (percent_a > percent_b)
return 1;
if (a->incorrect < b->incorrect)
return -1;
if (a->incorrect > b->incorrect)
return 1;
/*
* Since the above shows worse (incorrect) cases
* first, we continue that by showing best (correct)
* cases last.
*/
if (a->correct > b->correct)
return -1;
if (a->correct < b->correct)
return 1;
return 0;
}
static struct tracer_stat annotated_branch_stats = {
.name = "branch_annotated",
.stat_start = annotated_branch_stat_start,
.stat_next = annotated_branch_stat_next,
.stat_cmp = annotated_branch_stat_cmp,
.stat_headers = annotated_branch_stat_headers,
.stat_show = branch_stat_show
};
__init static int init_annotated_branch_stats(void)
{
int ret;
ret = register_stat_tracer(&annotated_branch_stats);
if (!ret) {
printk(KERN_WARNING "Warning: could not register "
"annotated branches stats\n");
return 1;
}
return 0;
}
fs_initcall(init_annotated_branch_stats);
#ifdef CONFIG_PROFILE_ALL_BRANCHES
extern unsigned long __start_branch_profile[];
extern unsigned long __stop_branch_profile[];
static int all_branch_stat_headers(struct seq_file *m)
{
seq_printf(m, " miss hit %% ");
seq_printf(m, " Function "
" File Line\n"
" ------- --------- - "
" -------- "
" ---- ----\n");
return 0;
}
static void *all_branch_stat_start(struct tracer_stat *trace)
{
return __start_branch_profile;
}
static void *
all_branch_stat_next(void *v, int idx)
{
struct ftrace_branch_data *p = v;
++p;
if ((void *)p >= (void *)__stop_branch_profile)
return NULL;
return p;
}
static struct tracer_stat all_branch_stats = {
.name = "branch_all",
.stat_start = all_branch_stat_start,
.stat_next = all_branch_stat_next,
.stat_headers = all_branch_stat_headers,
.stat_show = branch_stat_show
};
__init static int all_annotated_branch_stats(void)
{
int ret;
ret = register_stat_tracer(&all_branch_stats);
if (!ret) {
printk(KERN_WARNING "Warning: could not register "
"all branches stats\n");
return 1;
}
return 0;
}
fs_initcall(all_annotated_branch_stats);
#endif /* CONFIG_PROFILE_ALL_BRANCHES */
| gpl-2.0 |
Juansheng/android_kernel_htc_vision | arch/sparc/kernel/sparc_ksyms_32.c | 9185 | 1127 | /*
* arch/sparc/kernel/ksyms.c: Sparc specific ksyms support.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
*/
#include <linux/module.h>
#include <linux/init.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
#include <asm/delay.h>
#include <asm/head.h>
#include <asm/dma.h>
struct poll {
int fd;
short events;
short revents;
};
/* from entry.S */
EXPORT_SYMBOL(__udelay);
EXPORT_SYMBOL(__ndelay);
/* from head_32.S */
EXPORT_SYMBOL(__ret_efault);
EXPORT_SYMBOL(empty_zero_page);
/* Defined using magic */
#ifndef CONFIG_SMP
EXPORT_SYMBOL(BTFIXUP_CALL(___xchg32));
#else
EXPORT_SYMBOL(BTFIXUP_CALL(__hard_smp_processor_id));
#endif
EXPORT_SYMBOL(BTFIXUP_CALL(mmu_unlockarea));
EXPORT_SYMBOL(BTFIXUP_CALL(mmu_lockarea));
EXPORT_SYMBOL(BTFIXUP_CALL(mmu_get_scsi_sgl));
EXPORT_SYMBOL(BTFIXUP_CALL(mmu_get_scsi_one));
EXPORT_SYMBOL(BTFIXUP_CALL(mmu_release_scsi_sgl));
EXPORT_SYMBOL(BTFIXUP_CALL(mmu_release_scsi_one));
EXPORT_SYMBOL(BTFIXUP_CALL(pgprot_noncached));
/* Exporting a symbol from /init/main.c */
EXPORT_SYMBOL(saved_command_line);
| gpl-2.0 |
simex31/BC_Kernel_LP_STOCK_D802 | drivers/net/wireless/brcm80211/brcmsmac/ucode_loader.c | 10977 | 3965 | /*
* Copyright (c) 2010 Broadcom Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <defs.h>
#include "types.h"
#include <ucode_loader.h>
enum {
D11UCODE_NAMETAG_START = 0,
D11LCN0BSINITVALS24,
D11LCN0INITVALS24,
D11LCN1BSINITVALS24,
D11LCN1INITVALS24,
D11LCN2BSINITVALS24,
D11LCN2INITVALS24,
D11N0ABSINITVALS16,
D11N0BSINITVALS16,
D11N0INITVALS16,
D11UCODE_OVERSIGHT16_MIMO,
D11UCODE_OVERSIGHT16_MIMOSZ,
D11UCODE_OVERSIGHT24_LCN,
D11UCODE_OVERSIGHT24_LCNSZ,
D11UCODE_OVERSIGHT_BOMMAJOR,
D11UCODE_OVERSIGHT_BOMMINOR
};
int brcms_ucode_data_init(struct brcms_info *wl, struct brcms_ucode *ucode)
{
int rc;
rc = brcms_check_firmwares(wl);
rc = rc < 0 ? rc :
brcms_ucode_init_buf(wl, (void **)&ucode->d11lcn0bsinitvals24,
D11LCN0BSINITVALS24);
rc = rc < 0 ?
rc : brcms_ucode_init_buf(wl, (void **)&ucode->d11lcn0initvals24,
D11LCN0INITVALS24);
rc = rc < 0 ?
rc : brcms_ucode_init_buf(wl, (void **)&ucode->d11lcn1bsinitvals24,
D11LCN1BSINITVALS24);
rc = rc < 0 ?
rc : brcms_ucode_init_buf(wl, (void **)&ucode->d11lcn1initvals24,
D11LCN1INITVALS24);
rc = rc < 0 ? rc :
brcms_ucode_init_buf(wl, (void **)&ucode->d11lcn2bsinitvals24,
D11LCN2BSINITVALS24);
rc = rc < 0 ?
rc : brcms_ucode_init_buf(wl, (void **)&ucode->d11lcn2initvals24,
D11LCN2INITVALS24);
rc = rc < 0 ?
rc : brcms_ucode_init_buf(wl, (void **)&ucode->d11n0absinitvals16,
D11N0ABSINITVALS16);
rc = rc < 0 ?
rc : brcms_ucode_init_buf(wl, (void **)&ucode->d11n0bsinitvals16,
D11N0BSINITVALS16);
rc = rc < 0 ?
rc : brcms_ucode_init_buf(wl, (void **)&ucode->d11n0initvals16,
D11N0INITVALS16);
rc = rc < 0 ?
rc : brcms_ucode_init_buf(wl, (void **)&ucode->bcm43xx_16_mimo,
D11UCODE_OVERSIGHT16_MIMO);
rc = rc < 0 ?
rc : brcms_ucode_init_uint(wl, &ucode->bcm43xx_16_mimosz,
D11UCODE_OVERSIGHT16_MIMOSZ);
rc = rc < 0 ?
rc : brcms_ucode_init_buf(wl, (void **)&ucode->bcm43xx_24_lcn,
D11UCODE_OVERSIGHT24_LCN);
rc = rc < 0 ?
rc : brcms_ucode_init_uint(wl, &ucode->bcm43xx_24_lcnsz,
D11UCODE_OVERSIGHT24_LCNSZ);
rc = rc < 0 ?
rc : brcms_ucode_init_buf(wl, (void **)&ucode->bcm43xx_bommajor,
D11UCODE_OVERSIGHT_BOMMAJOR);
rc = rc < 0 ?
rc : brcms_ucode_init_buf(wl, (void **)&ucode->bcm43xx_bomminor,
D11UCODE_OVERSIGHT_BOMMINOR);
return rc;
}
void brcms_ucode_data_free(struct brcms_ucode *ucode)
{
brcms_ucode_free_buf((void *)ucode->d11lcn0bsinitvals24);
brcms_ucode_free_buf((void *)ucode->d11lcn0initvals24);
brcms_ucode_free_buf((void *)ucode->d11lcn1bsinitvals24);
brcms_ucode_free_buf((void *)ucode->d11lcn1initvals24);
brcms_ucode_free_buf((void *)ucode->d11lcn2bsinitvals24);
brcms_ucode_free_buf((void *)ucode->d11lcn2initvals24);
brcms_ucode_free_buf((void *)ucode->d11n0absinitvals16);
brcms_ucode_free_buf((void *)ucode->d11n0bsinitvals16);
brcms_ucode_free_buf((void *)ucode->d11n0initvals16);
brcms_ucode_free_buf((void *)ucode->bcm43xx_16_mimo);
brcms_ucode_free_buf((void *)ucode->bcm43xx_24_lcn);
brcms_ucode_free_buf((void *)ucode->bcm43xx_bommajor);
brcms_ucode_free_buf((void *)ucode->bcm43xx_bomminor);
}
| gpl-2.0 |
kirananto/REDMI2_RAZOR | drivers/net/wireless/brcm80211/brcmsmac/ucode_loader.c | 10977 | 3965 | /*
* Copyright (c) 2010 Broadcom Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <defs.h>
#include "types.h"
#include <ucode_loader.h>
enum {
D11UCODE_NAMETAG_START = 0,
D11LCN0BSINITVALS24,
D11LCN0INITVALS24,
D11LCN1BSINITVALS24,
D11LCN1INITVALS24,
D11LCN2BSINITVALS24,
D11LCN2INITVALS24,
D11N0ABSINITVALS16,
D11N0BSINITVALS16,
D11N0INITVALS16,
D11UCODE_OVERSIGHT16_MIMO,
D11UCODE_OVERSIGHT16_MIMOSZ,
D11UCODE_OVERSIGHT24_LCN,
D11UCODE_OVERSIGHT24_LCNSZ,
D11UCODE_OVERSIGHT_BOMMAJOR,
D11UCODE_OVERSIGHT_BOMMINOR
};
int brcms_ucode_data_init(struct brcms_info *wl, struct brcms_ucode *ucode)
{
int rc;
rc = brcms_check_firmwares(wl);
rc = rc < 0 ? rc :
brcms_ucode_init_buf(wl, (void **)&ucode->d11lcn0bsinitvals24,
D11LCN0BSINITVALS24);
rc = rc < 0 ?
rc : brcms_ucode_init_buf(wl, (void **)&ucode->d11lcn0initvals24,
D11LCN0INITVALS24);
rc = rc < 0 ?
rc : brcms_ucode_init_buf(wl, (void **)&ucode->d11lcn1bsinitvals24,
D11LCN1BSINITVALS24);
rc = rc < 0 ?
rc : brcms_ucode_init_buf(wl, (void **)&ucode->d11lcn1initvals24,
D11LCN1INITVALS24);
rc = rc < 0 ? rc :
brcms_ucode_init_buf(wl, (void **)&ucode->d11lcn2bsinitvals24,
D11LCN2BSINITVALS24);
rc = rc < 0 ?
rc : brcms_ucode_init_buf(wl, (void **)&ucode->d11lcn2initvals24,
D11LCN2INITVALS24);
rc = rc < 0 ?
rc : brcms_ucode_init_buf(wl, (void **)&ucode->d11n0absinitvals16,
D11N0ABSINITVALS16);
rc = rc < 0 ?
rc : brcms_ucode_init_buf(wl, (void **)&ucode->d11n0bsinitvals16,
D11N0BSINITVALS16);
rc = rc < 0 ?
rc : brcms_ucode_init_buf(wl, (void **)&ucode->d11n0initvals16,
D11N0INITVALS16);
rc = rc < 0 ?
rc : brcms_ucode_init_buf(wl, (void **)&ucode->bcm43xx_16_mimo,
D11UCODE_OVERSIGHT16_MIMO);
rc = rc < 0 ?
rc : brcms_ucode_init_uint(wl, &ucode->bcm43xx_16_mimosz,
D11UCODE_OVERSIGHT16_MIMOSZ);
rc = rc < 0 ?
rc : brcms_ucode_init_buf(wl, (void **)&ucode->bcm43xx_24_lcn,
D11UCODE_OVERSIGHT24_LCN);
rc = rc < 0 ?
rc : brcms_ucode_init_uint(wl, &ucode->bcm43xx_24_lcnsz,
D11UCODE_OVERSIGHT24_LCNSZ);
rc = rc < 0 ?
rc : brcms_ucode_init_buf(wl, (void **)&ucode->bcm43xx_bommajor,
D11UCODE_OVERSIGHT_BOMMAJOR);
rc = rc < 0 ?
rc : brcms_ucode_init_buf(wl, (void **)&ucode->bcm43xx_bomminor,
D11UCODE_OVERSIGHT_BOMMINOR);
return rc;
}
void brcms_ucode_data_free(struct brcms_ucode *ucode)
{
brcms_ucode_free_buf((void *)ucode->d11lcn0bsinitvals24);
brcms_ucode_free_buf((void *)ucode->d11lcn0initvals24);
brcms_ucode_free_buf((void *)ucode->d11lcn1bsinitvals24);
brcms_ucode_free_buf((void *)ucode->d11lcn1initvals24);
brcms_ucode_free_buf((void *)ucode->d11lcn2bsinitvals24);
brcms_ucode_free_buf((void *)ucode->d11lcn2initvals24);
brcms_ucode_free_buf((void *)ucode->d11n0absinitvals16);
brcms_ucode_free_buf((void *)ucode->d11n0bsinitvals16);
brcms_ucode_free_buf((void *)ucode->d11n0initvals16);
brcms_ucode_free_buf((void *)ucode->bcm43xx_16_mimo);
brcms_ucode_free_buf((void *)ucode->bcm43xx_24_lcn);
brcms_ucode_free_buf((void *)ucode->bcm43xx_bommajor);
brcms_ucode_free_buf((void *)ucode->bcm43xx_bomminor);
}
| gpl-2.0 |
omnirom/android_kernel_lge_x3 | drivers/sfi/sfi_core.c | 11489 | 13280 | /* sfi_core.c Simple Firmware Interface - core internals */
/*
This file is provided under a dual BSD/GPLv2 license. When using or
redistributing this file, you may do so under either license.
GPL LICENSE SUMMARY
Copyright(c) 2009 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution
in the file called LICENSE.GPL.
BSD LICENSE
Copyright(c) 2009 Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define KMSG_COMPONENT "SFI"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/bootmem.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/acpi.h>
#include <linux/init.h>
#include <linux/sfi.h>
#include <linux/slab.h>
#include "sfi_core.h"
#define ON_SAME_PAGE(addr1, addr2) \
(((unsigned long)(addr1) & PAGE_MASK) == \
((unsigned long)(addr2) & PAGE_MASK))
#define TABLE_ON_PAGE(page, table, size) (ON_SAME_PAGE(page, table) && \
ON_SAME_PAGE(page, table + size))
int sfi_disabled __read_mostly;
EXPORT_SYMBOL(sfi_disabled);
static u64 syst_pa __read_mostly;
static struct sfi_table_simple *syst_va __read_mostly;
/*
* FW creates and saves the SFI tables in memory. When these tables get
* used, they may need to be mapped to virtual address space, and the mapping
* can happen before or after the ioremap() is ready, so a flag is needed
* to indicating this
*/
static u32 sfi_use_ioremap __read_mostly;
/*
* sfi_un/map_memory calls early_ioremap/iounmap which is a __init function
* and introduces section mismatch. So use __ref to make it calm.
*/
static void __iomem * __ref sfi_map_memory(u64 phys, u32 size)
{
if (!phys || !size)
return NULL;
if (sfi_use_ioremap)
return ioremap_cache(phys, size);
else
return early_ioremap(phys, size);
}
static void __ref sfi_unmap_memory(void __iomem *virt, u32 size)
{
if (!virt || !size)
return;
if (sfi_use_ioremap)
iounmap(virt);
else
early_iounmap(virt, size);
}
static void sfi_print_table_header(unsigned long long pa,
struct sfi_table_header *header)
{
pr_info("%4.4s %llX, %04X (v%d %6.6s %8.8s)\n",
header->sig, pa,
header->len, header->rev, header->oem_id,
header->oem_table_id);
}
/*
* sfi_verify_table()
* Sanity check table lengh, calculate checksum
*/
static int sfi_verify_table(struct sfi_table_header *table)
{
u8 checksum = 0;
u8 *puchar = (u8 *)table;
u32 length = table->len;
/* Sanity check table length against arbitrary 1MB limit */
if (length > 0x100000) {
pr_err("Invalid table length 0x%x\n", length);
return -1;
}
while (length--)
checksum += *puchar++;
if (checksum) {
pr_err("Checksum %2.2X should be %2.2X\n",
table->csum, table->csum - checksum);
return -1;
}
return 0;
}
/*
* sfi_map_table()
*
* Return address of mapped table
* Check for common case that we can re-use mapping to SYST,
* which requires syst_pa, syst_va to be initialized.
*/
struct sfi_table_header *sfi_map_table(u64 pa)
{
struct sfi_table_header *th;
u32 length;
if (!TABLE_ON_PAGE(syst_pa, pa, sizeof(struct sfi_table_header)))
th = sfi_map_memory(pa, sizeof(struct sfi_table_header));
else
th = (void *)syst_va + (pa - syst_pa);
/* If table fits on same page as its header, we are done */
if (TABLE_ON_PAGE(th, th, th->len))
return th;
/* Entire table does not fit on same page as SYST */
length = th->len;
if (!TABLE_ON_PAGE(syst_pa, pa, sizeof(struct sfi_table_header)))
sfi_unmap_memory(th, sizeof(struct sfi_table_header));
return sfi_map_memory(pa, length);
}
/*
* sfi_unmap_table()
*
* Undoes effect of sfi_map_table() by unmapping table
* if it did not completely fit on same page as SYST.
*/
void sfi_unmap_table(struct sfi_table_header *th)
{
if (!TABLE_ON_PAGE(syst_va, th, th->len))
sfi_unmap_memory(th, TABLE_ON_PAGE(th, th, th->len) ?
sizeof(*th) : th->len);
}
static int sfi_table_check_key(struct sfi_table_header *th,
struct sfi_table_key *key)
{
if (strncmp(th->sig, key->sig, SFI_SIGNATURE_SIZE)
|| (key->oem_id && strncmp(th->oem_id,
key->oem_id, SFI_OEM_ID_SIZE))
|| (key->oem_table_id && strncmp(th->oem_table_id,
key->oem_table_id, SFI_OEM_TABLE_ID_SIZE)))
return -1;
return 0;
}
/*
* This function will be used in 2 cases:
* 1. used to enumerate and verify the tables addressed by SYST/XSDT,
* thus no signature will be given (in kernel boot phase)
* 2. used to parse one specific table, signature must exist, and
* the mapped virt address will be returned, and the virt space
* will be released by call sfi_put_table() later
*
* This two cases are from two different functions with two different
* sections and causes section mismatch warning. So use __ref to tell
* modpost not to make any noise.
*
* Return value:
* NULL: when can't find a table matching the key
* ERR_PTR(error): error value
* virt table address: when a matched table is found
*/
struct sfi_table_header *
__ref sfi_check_table(u64 pa, struct sfi_table_key *key)
{
struct sfi_table_header *th;
void *ret = NULL;
th = sfi_map_table(pa);
if (!th)
return ERR_PTR(-ENOMEM);
if (!key->sig) {
sfi_print_table_header(pa, th);
if (sfi_verify_table(th))
ret = ERR_PTR(-EINVAL);
} else {
if (!sfi_table_check_key(th, key))
return th; /* Success */
}
sfi_unmap_table(th);
return ret;
}
/*
* sfi_get_table()
*
* Search SYST for the specified table with the signature in
* the key, and return the mapped table
*/
struct sfi_table_header *sfi_get_table(struct sfi_table_key *key)
{
struct sfi_table_header *th;
u32 tbl_cnt, i;
tbl_cnt = SFI_GET_NUM_ENTRIES(syst_va, u64);
for (i = 0; i < tbl_cnt; i++) {
th = sfi_check_table(syst_va->pentry[i], key);
if (!IS_ERR(th) && th)
return th;
}
return NULL;
}
void sfi_put_table(struct sfi_table_header *th)
{
sfi_unmap_table(th);
}
/* Find table with signature, run handler on it */
int sfi_table_parse(char *signature, char *oem_id, char *oem_table_id,
sfi_table_handler handler)
{
struct sfi_table_header *table = NULL;
struct sfi_table_key key;
int ret = -EINVAL;
if (sfi_disabled || !handler || !signature)
goto exit;
key.sig = signature;
key.oem_id = oem_id;
key.oem_table_id = oem_table_id;
table = sfi_get_table(&key);
if (!table)
goto exit;
ret = handler(table);
sfi_put_table(table);
exit:
return ret;
}
EXPORT_SYMBOL_GPL(sfi_table_parse);
/*
* sfi_parse_syst()
* Checksum all the tables in SYST and print their headers
*
* success: set syst_va, return 0
*/
static int __init sfi_parse_syst(void)
{
struct sfi_table_key key = SFI_ANY_KEY;
int tbl_cnt, i;
void *ret;
syst_va = sfi_map_memory(syst_pa, sizeof(struct sfi_table_simple));
if (!syst_va)
return -ENOMEM;
tbl_cnt = SFI_GET_NUM_ENTRIES(syst_va, u64);
for (i = 0; i < tbl_cnt; i++) {
ret = sfi_check_table(syst_va->pentry[i], &key);
if (IS_ERR(ret))
return PTR_ERR(ret);
}
return 0;
}
/*
* The OS finds the System Table by searching 16-byte boundaries between
* physical address 0x000E0000 and 0x000FFFFF. The OS shall search this region
* starting at the low address and shall stop searching when the 1st valid SFI
* System Table is found.
*
* success: set syst_pa, return 0
* fail: return -1
*/
static __init int sfi_find_syst(void)
{
unsigned long offset, len;
void *start;
len = SFI_SYST_SEARCH_END - SFI_SYST_SEARCH_BEGIN;
start = sfi_map_memory(SFI_SYST_SEARCH_BEGIN, len);
if (!start)
return -1;
for (offset = 0; offset < len; offset += 16) {
struct sfi_table_header *syst_hdr;
syst_hdr = start + offset;
if (strncmp(syst_hdr->sig, SFI_SIG_SYST,
SFI_SIGNATURE_SIZE))
continue;
if (syst_hdr->len > PAGE_SIZE)
continue;
sfi_print_table_header(SFI_SYST_SEARCH_BEGIN + offset,
syst_hdr);
if (sfi_verify_table(syst_hdr))
continue;
/*
* Enforce SFI spec mandate that SYST reside within a page.
*/
if (!ON_SAME_PAGE(syst_pa, syst_pa + syst_hdr->len)) {
pr_info("SYST 0x%llx + 0x%x crosses page\n",
syst_pa, syst_hdr->len);
continue;
}
/* Success */
syst_pa = SFI_SYST_SEARCH_BEGIN + offset;
sfi_unmap_memory(start, len);
return 0;
}
sfi_unmap_memory(start, len);
return -1;
}
static struct kobject *sfi_kobj;
static struct kobject *tables_kobj;
static ssize_t sfi_table_show(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t offset, size_t count)
{
struct sfi_table_attr *tbl_attr =
container_of(bin_attr, struct sfi_table_attr, attr);
struct sfi_table_header *th = NULL;
struct sfi_table_key key;
ssize_t cnt;
key.sig = tbl_attr->name;
key.oem_id = NULL;
key.oem_table_id = NULL;
if (strncmp(SFI_SIG_SYST, tbl_attr->name, SFI_SIGNATURE_SIZE)) {
th = sfi_get_table(&key);
if (!th)
return 0;
cnt = memory_read_from_buffer(buf, count, &offset,
th, th->len);
sfi_put_table(th);
} else
cnt = memory_read_from_buffer(buf, count, &offset,
syst_va, syst_va->header.len);
return cnt;
}
struct sfi_table_attr __init *sfi_sysfs_install_table(u64 pa)
{
struct sfi_table_attr *tbl_attr;
struct sfi_table_header *th;
int ret;
tbl_attr = kzalloc(sizeof(struct sfi_table_attr), GFP_KERNEL);
if (!tbl_attr)
return NULL;
th = sfi_map_table(pa);
if (!th || !th->sig[0]) {
kfree(tbl_attr);
return NULL;
}
sysfs_attr_init(&tbl_attr->attr.attr);
memcpy(tbl_attr->name, th->sig, SFI_SIGNATURE_SIZE);
tbl_attr->attr.size = 0;
tbl_attr->attr.read = sfi_table_show;
tbl_attr->attr.attr.name = tbl_attr->name;
tbl_attr->attr.attr.mode = 0400;
ret = sysfs_create_bin_file(tables_kobj,
&tbl_attr->attr);
if (ret) {
kfree(tbl_attr);
tbl_attr = NULL;
}
sfi_unmap_table(th);
return tbl_attr;
}
static int __init sfi_sysfs_init(void)
{
int tbl_cnt, i;
if (sfi_disabled)
return 0;
sfi_kobj = kobject_create_and_add("sfi", firmware_kobj);
if (!sfi_kobj)
return 0;
tables_kobj = kobject_create_and_add("tables", sfi_kobj);
if (!tables_kobj) {
kobject_put(sfi_kobj);
return 0;
}
sfi_sysfs_install_table(syst_pa);
tbl_cnt = SFI_GET_NUM_ENTRIES(syst_va, u64);
for (i = 0; i < tbl_cnt; i++)
sfi_sysfs_install_table(syst_va->pentry[i]);
sfi_acpi_sysfs_init();
kobject_uevent(sfi_kobj, KOBJ_ADD);
kobject_uevent(tables_kobj, KOBJ_ADD);
pr_info("SFI sysfs interfaces init success\n");
return 0;
}
void __init sfi_init(void)
{
if (!acpi_disabled)
disable_sfi();
if (sfi_disabled)
return;
pr_info("Simple Firmware Interface v0.81 http://simplefirmware.org\n");
if (sfi_find_syst() || sfi_parse_syst() || sfi_platform_init())
disable_sfi();
return;
}
void __init sfi_init_late(void)
{
int length;
if (sfi_disabled)
return;
length = syst_va->header.len;
sfi_unmap_memory(syst_va, sizeof(struct sfi_table_simple));
/* Use ioremap now after it is ready */
sfi_use_ioremap = 1;
syst_va = sfi_map_memory(syst_pa, length);
sfi_acpi_init();
}
/*
* The reason we put it here because we need wait till the /sys/firmware
* is setup, then our interface can be registered in /sys/firmware/sfi
*/
core_initcall(sfi_sysfs_init);
| gpl-2.0 |
emercs/BeagleBone-linux | drivers/uwb/drp-avail.c | 14817 | 8822 | /*
* Ultra Wide Band
* DRP availability management
*
* Copyright (C) 2005-2006 Intel Corporation
* Reinette Chatre <reinette.chatre@intel.com>
* Copyright (C) 2008 Cambridge Silicon Radio Ltd.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*
* Manage DRP Availability (the MAS available for DRP
* reservations). Thus:
*
* - Handle DRP Availability Change notifications
*
* - Allow the reservation manager to indicate MAS reserved/released
* by local (owned by/targeted at the radio controller)
* reservations.
*
* - Based on the two sources above, generate a DRP Availability IE to
* be included in the beacon.
*
* See also the documentation for struct uwb_drp_avail.
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/bitmap.h>
#include "uwb-internal.h"
/**
* uwb_drp_avail_init - initialize an RC's MAS availability
*
* All MAS are available initially. The RC will inform use which
* slots are used for the BP (it may change in size).
*/
void uwb_drp_avail_init(struct uwb_rc *rc)
{
bitmap_fill(rc->drp_avail.global, UWB_NUM_MAS);
bitmap_fill(rc->drp_avail.local, UWB_NUM_MAS);
bitmap_fill(rc->drp_avail.pending, UWB_NUM_MAS);
}
/*
* Determine MAS available for new local reservations.
*
* avail = global & local & pending
*/
void uwb_drp_available(struct uwb_rc *rc, struct uwb_mas_bm *avail)
{
bitmap_and(avail->bm, rc->drp_avail.global, rc->drp_avail.local, UWB_NUM_MAS);
bitmap_and(avail->bm, avail->bm, rc->drp_avail.pending, UWB_NUM_MAS);
}
/**
* uwb_drp_avail_reserve_pending - reserve MAS for a new reservation
* @rc: the radio controller
* @mas: the MAS to reserve
*
* Returns 0 on success, or -EBUSY if the MAS requested aren't available.
*/
int uwb_drp_avail_reserve_pending(struct uwb_rc *rc, struct uwb_mas_bm *mas)
{
struct uwb_mas_bm avail;
uwb_drp_available(rc, &avail);
if (!bitmap_subset(mas->bm, avail.bm, UWB_NUM_MAS))
return -EBUSY;
bitmap_andnot(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS);
return 0;
}
/**
* uwb_drp_avail_reserve - reserve MAS for an established reservation
* @rc: the radio controller
* @mas: the MAS to reserve
*/
void uwb_drp_avail_reserve(struct uwb_rc *rc, struct uwb_mas_bm *mas)
{
bitmap_or(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS);
bitmap_andnot(rc->drp_avail.local, rc->drp_avail.local, mas->bm, UWB_NUM_MAS);
rc->drp_avail.ie_valid = false;
}
/**
* uwb_drp_avail_release - release MAS from a pending or established reservation
* @rc: the radio controller
* @mas: the MAS to release
*/
void uwb_drp_avail_release(struct uwb_rc *rc, struct uwb_mas_bm *mas)
{
bitmap_or(rc->drp_avail.local, rc->drp_avail.local, mas->bm, UWB_NUM_MAS);
bitmap_or(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS);
rc->drp_avail.ie_valid = false;
uwb_rsv_handle_drp_avail_change(rc);
}
/**
* uwb_drp_avail_ie_update - update the DRP Availability IE
* @rc: the radio controller
*
* avail = global & local
*/
void uwb_drp_avail_ie_update(struct uwb_rc *rc)
{
struct uwb_mas_bm avail;
bitmap_and(avail.bm, rc->drp_avail.global, rc->drp_avail.local, UWB_NUM_MAS);
rc->drp_avail.ie.hdr.element_id = UWB_IE_DRP_AVAILABILITY;
rc->drp_avail.ie.hdr.length = UWB_NUM_MAS / 8;
uwb_mas_bm_copy_le(rc->drp_avail.ie.bmp, &avail);
rc->drp_avail.ie_valid = true;
}
/**
* Create an unsigned long from a buffer containing a byte stream.
*
* @array: pointer to buffer
* @itr: index of buffer from where we start
* @len: the buffer's remaining size may not be exact multiple of
* sizeof(unsigned long), @len is the length of buffer that needs
* to be converted. This will be sizeof(unsigned long) or smaller
* (BUG if not). If it is smaller then we will pad the remaining
* space of the result with zeroes.
*/
static
unsigned long get_val(u8 *array, size_t itr, size_t len)
{
unsigned long val = 0;
size_t top = itr + len;
BUG_ON(len > sizeof(val));
while (itr < top) {
val <<= 8;
val |= array[top - 1];
top--;
}
val <<= 8 * (sizeof(val) - len); /* padding */
return val;
}
/**
* Initialize bitmap from data buffer.
*
* The bitmap to be converted could come from a IE, for example a
* DRP Availability IE.
* From ECMA-368 1.0 [16.8.7]: "
* octets: 1 1 N * (0 to 32)
* Element ID Length (=N) DRP Availability Bitmap
*
* The DRP Availability Bitmap field is up to 256 bits long, one
* bit for each MAS in the superframe, where the least-significant
* bit of the field corresponds to the first MAS in the superframe
* and successive bits correspond to successive MASs."
*
* The DRP Availability bitmap is in octets from 0 to 32, so octet
* 32 contains bits for MAS 1-8, etc. If the bitmap is smaller than 32
* octets, the bits in octets not included at the end of the bitmap are
* treated as zero. In this case (when the bitmap is smaller than 32
* octets) the MAS represented range from MAS 1 to MAS (size of bitmap)
* with the last octet still containing bits for MAS 1-8, etc.
*
* For example:
* F00F0102 03040506 0708090A 0B0C0D0E 0F010203
* ^^^^
* ||||
* ||||
* |||\LSB of byte is MAS 9
* ||\MSB of byte is MAS 16
* |\LSB of first byte is MAS 1
* \ MSB of byte is MAS 8
*
* An example of this encoding can be found in ECMA-368 Annex-D [Table D.11]
*
* The resulting bitmap will have the following mapping:
* bit position 0 == MAS 1
* bit position 1 == MAS 2
* ...
* bit position (UWB_NUM_MAS - 1) == MAS UWB_NUM_MAS
*
* @bmp_itr: pointer to bitmap (can be declared with DECLARE_BITMAP)
* @buffer: pointer to buffer containing bitmap data in big endian
* format (MSB first)
* @buffer_size:number of bytes with which bitmap should be initialized
*/
static
void buffer_to_bmp(unsigned long *bmp_itr, void *_buffer,
size_t buffer_size)
{
u8 *buffer = _buffer;
size_t itr, len;
unsigned long val;
itr = 0;
while (itr < buffer_size) {
len = buffer_size - itr >= sizeof(val) ?
sizeof(val) : buffer_size - itr;
val = get_val(buffer, itr, len);
bmp_itr[itr / sizeof(val)] = val;
itr += sizeof(val);
}
}
/**
* Extract DRP Availability bitmap from the notification.
*
* The notification that comes in contains a bitmap of (UWB_NUM_MAS / 8) bytes
* We convert that to our internal representation.
*/
static
int uwbd_evt_get_drp_avail(struct uwb_event *evt, unsigned long *bmp)
{
struct device *dev = &evt->rc->uwb_dev.dev;
struct uwb_rc_evt_drp_avail *drp_evt;
int result = -EINVAL;
/* Is there enough data to decode the event? */
if (evt->notif.size < sizeof(*drp_evt)) {
dev_err(dev, "DRP Availability Change: Not enough "
"data to decode event [%zu bytes, %zu "
"needed]\n", evt->notif.size, sizeof(*drp_evt));
goto error;
}
drp_evt = container_of(evt->notif.rceb, struct uwb_rc_evt_drp_avail, rceb);
buffer_to_bmp(bmp, drp_evt->bmp, UWB_NUM_MAS/8);
result = 0;
error:
return result;
}
/**
* Process an incoming DRP Availability notification.
*
* @evt: Event information (packs the actual event data, which
* radio controller it came to, etc).
*
* @returns: 0 on success (so uwbd() frees the event buffer), < 0
* on error.
*
* According to ECMA-368 1.0 [16.8.7], bits set to ONE indicate that
* the MAS slot is available, bits set to ZERO indicate that the slot
* is busy.
*
* So we clear available slots, we set used slots :)
*
* The notification only marks non-availability based on the BP and
* received DRP IEs that are not for this radio controller. A copy of
* this bitmap is needed to generate the real availability (which
* includes local and pending reservations).
*
* The DRP Availability IE that this radio controller emits will need
* to be updated.
*/
int uwbd_evt_handle_rc_drp_avail(struct uwb_event *evt)
{
int result;
struct uwb_rc *rc = evt->rc;
DECLARE_BITMAP(bmp, UWB_NUM_MAS);
result = uwbd_evt_get_drp_avail(evt, bmp);
if (result < 0)
return result;
mutex_lock(&rc->rsvs_mutex);
bitmap_copy(rc->drp_avail.global, bmp, UWB_NUM_MAS);
rc->drp_avail.ie_valid = false;
uwb_rsv_handle_drp_avail_change(rc);
mutex_unlock(&rc->rsvs_mutex);
uwb_rsv_sched_update(rc);
return 0;
}
| gpl-2.0 |
diverger/uboot-lpc32xx | board/esd/hh405/logo_320_240_4bpp.c | 226 | 18826 | 0x1f,0x8b,0x08,0x08,0x9c,0x03,0x94,0x3f,0x00,0x03,0x48,0x6f,0x6c,0x7a,0x48,0x65,
0x72,0x5f,0x4c,0x6f,0x67,0x6f,0x5f,0x33,0x32,0x30,0x78,0x32,0x34,0x30,0x5f,0x6d,
0x69,0x74,0x74,0x65,0x5f,0x31,0x36,0x67,0x2e,0x62,0x6d,0x70,0x00,0xed,0x9c,0xfd,
0x6b,0x1b,0x47,0x1a,0xc7,0x9f,0xae,0xa2,0xe8,0x8c,0x4b,0x42,0xff,0x03,0xb1,0x26,
0x2f,0x26,0xa5,0x67,0x2c,0x52,0xd7,0xf8,0xb8,0x1e,0xb1,0xfb,0x62,0xc2,0x81,0xa9,
0xa3,0xe6,0x42,0x43,0x02,0x21,0xf1,0x35,0x57,0x38,0x4c,0x8a,0xbd,0x71,0x1d,0xe2,
0xa3,0x70,0x89,0x4c,0x8f,0x62,0xdc,0x4b,0xed,0x8d,0x84,0xfd,0xd3,0x71,0x77,0x0e,
0x94,0x90,0x50,0x7c,0x3a,0x09,0x29,0xb4,0xe4,0x87,0x9c,0x85,0xfc,0x17,0x14,0x42,
0x8a,0x31,0xc5,0x62,0xd7,0xb4,0x04,0x63,0x24,0x76,0xee,0x79,0x66,0x66,0x57,0x2b,
0xeb,0xd5,0x24,0x75,0x0b,0x9d,0x2f,0x28,0xfb,0x36,0x3b,0xf3,0xd9,0x67,0xe6,0x79,
0x66,0x76,0x66,0x9d,0x13,0xbf,0x1f,0xff,0x1c,0xb8,0xc6,0xf1,0x77,0x14,0x7f,0xbf,
0x7b,0x01,0x60,0x13,0xb7,0x2f,0xc0,0x3e,0x70,0x15,0x6b,0x17,0x3f,0xbf,0x5e,0x7a,
0xe9,0x25,0xd0,0x75,0x1d,0x22,0x91,0x08,0x0c,0x0c,0x0c,0x40,0x34,0x1a,0x85,0x91,
0x91,0x11,0x30,0x0c,0x03,0x62,0xb1,0x18,0x98,0xa6,0x09,0x4b,0x4b,0x4b,0x90,0x4a,
0xa5,0x20,0x97,0xcb,0xc1,0x37,0xdf,0x7c,0x03,0x96,0x65,0x01,0x63,0x8c,0x7e,0x4a,
0x4a,0x4a,0x4a,0x4a,0x4a,0x4a,0x4a,0x4a,0x4a,0x4a,0x4a,0x4a,0x4a,0x4a,0x4a,0x4a,
0x4a,0x4a,0x4a,0x4a,0x4a,0x4a,0x4a,0x4a,0x4a,0x4a,0x4a,0x4a,0x4a,0x4a,0x4a,0x4a,
0x4a,0x4a,0x4a,0x4a,0x4a,0x4a,0x4a,0x4a,0x4a,0x4a,0x4a,0x4a,0xbf,0x6c,0x95,0x72,
0xf9,0x9f,0x1a,0xa1,0xa1,0xbe,0x87,0xd0,0x4f,0x8d,0xd0,0x50,0x9b,0x8a,0xef,0x99,
0xa4,0xf8,0xb8,0x9c,0xdd,0xdf,0xe1,0xd8,0xb4,0x21,0x3e,0xb1,0x87,0xb2,0xaa,0x53,
0xd9,0x55,0xa7,0x76,0xc8,0xb2,0x5b,0x48,0xe9,0xd8,0x56,0xd3,0x8c,0x2a,0xf5,0x34,
0x0c,0x10,0xc8,0x72,0xbe,0xfb,0x00,0xbf,0xa1,0x3c,0x70,0x0b,0x93,0xec,0x07,0x9d,
0x6b,0x75,0x5b,0x3f,0xc6,0x9c,0xaf,0x00,0x12,0xf7,0xf4,0x29,0xa7,0x5b,0xb7,0xd9,
0x6c,0x47,0x96,0x6d,0x77,0x77,0x32,0xf6,0xad,0x7e,0x8e,0x15,0xaf,0x80,0xb6,0x3a,
0x7b,0x88,0x3d,0x04,0x71,0xf3,0xc6,0x10,0x40,0x90,0xe9,0x3a,0xe6,0xf8,0x8e,0x3e,
0xc5,0x66,0x71,0xe7,0x89,0xfe,0x3a,0x63,0x9f,0xe9,0xbf,0xc6,0x47,0xc0,0x8b,0x81,
0x38,0x2b,0xe9,0x47,0xb0,0x14,0xbd,0xa3,0x25,0x52,0xfa,0x93,0x1d,0x02,0xdc,0x84,
0x20,0xee,0x68,0x98,0xef,0x53,0xfe,0x67,0x3a,0x18,0x70,0xb8,0xec,0x2d,0x68,0x67,
0x5b,0xf4,0x10,0x33,0x30,0xe5,0xe0,0x31,0x9b,0xd1,0x16,0xd9,0x76,0xb8,0x8d,0xb1,
0x27,0x70,0x0e,0x7f,0x00,0x47,0x46,0x83,0xec,0x01,0xdd,0x7c,0x9d,0xb1,0x2e,0x20,
0x3e,0x00,0xcc,0x67,0x08,0xce,0xb3,0x19,0x58,0xc4,0x9c,0x11,0x6d,0x16,0x0e,0x32,
0xe7,0x0a,0x2f,0x8b,0x95,0x20,0x68,0x33,0x07,0xb4,0x16,0xf9,0x0e,0x75,0x03,0x1c,
0x20,0x4e,0x2d,0x1a,0xc6,0xbc,0xb1,0x88,0x4e,0x73,0x8c,0xf3,0x91,0xfd,0xec,0x6d,
0xe4,0xdb,0x84,0x80,0x89,0x25,0x2f,0x56,0xf1,0x39,0x43,0xd0,0x33,0x07,0xd8,0x34,
0x1e,0xf7,0x24,0x47,0xf1,0x66,0x7c,0xb6,0xd0,0x87,0x15,0x7c,0x59,0xf6,0xd8,0xe5,
0x2b,0x02,0x4c,0xcc,0x0d,0x69,0xc4,0xc7,0x5a,0xe7,0xeb,0x4d,0x27,0xc3,0xf0,0x22,
0xf1,0xf5,0x59,0x33,0xa0,0x61,0x2e,0x58,0x7e,0x61,0x88,0xfd,0x80,0xb9,0x99,0xa6,
0xc9,0xf9,0x1e,0x40,0x1f,0xbb,0x5f,0x93,0xaf,0x0b,0x32,0xf8,0x0b,0xb1,0xb5,0x3c,
0xde,0xa0,0xb1,0xaf,0x20,0x30,0x55,0x34,0xed,0xda,0x7c,0x6b,0x64,0xb7,0xf5,0x8e,
0xdd,0xf1,0x15,0x31,0xd5,0xd7,0x10,0xb2,0x91,0x8f,0x8a,0x00,0xcc,0xa5,0x1d,0xef,
0x2e,0xb0,0x62,0x17,0xbc,0x4c,0x09,0x88,0xef,0x2e,0x16,0x83,0x6c,0x8b,0xac,0x8a,
0x2f,0x0c,0x96,0xbd,0x4e,0xae,0x45,0x09,0xc1,0x19,0xc5,0x27,0x45,0x27,0xa8,0xcd,
0x77,0x17,0xae,0x3b,0xcc,0xd9,0xd8,0x1d,0x9f,0x40,0x08,0xac,0x6e,0xe2,0xd3,0xb3,
0x52,0x17,0xac,0x8e,0x52,0x76,0xa4,0xef,0x09,0x46,0xf0,0x3d,0x80,0x29,0xaa,0xf7,
0x6c,0x2d,0xfb,0x61,0x53,0xea,0xe2,0xa1,0x89,0x1e,0x2e,0x8c,0x4c,0xa8,0x1d,0x7c,
0x07,0x05,0xdf,0x90,0xfc,0xfb,0xb1,0x12,0x85,0xb2,0xdd,0xf0,0x6d,0x71,0xbe,0x00,
0xde,0x34,0x04,0xd9,0x51,0x78,0xdd,0xe2,0x61,0x60,0x9b,0x0c,0x26,0xf8,0x1e,0x63,
0xfd,0x62,0xf6,0xd5,0x7c,0x74,0x12,0xc9,0x89,0xaf,0x70,0x17,0x9f,0x10,0xe8,0x41,
0xea,0xf0,0x61,0x85,0x60,0xbe,0x8e,0x6d,0xef,0x9a,0x0f,0x5b,0xb6,0x2d,0xe2,0x33,
0xfa,0xdb,0x90,0xf0,0xdb,0x73,0x0c,0x63,0x86,0x26,0xf9,0xc8,0x7f,0x23,0xc0,0xf9,
0x58,0x25,0xdf,0x63,0x74,0xab,0x48,0x18,0xeb,0xf7,0x1d,0xbc,0xe7,0x98,0x00,0xab,
0xe2,0x13,0xf5,0xbb,0x1d,0x16,0x39,0x4f,0xb9,0xf5,0xdb,0x2a,0xde,0x32,0x70,0x17,
0xad,0xe2,0x73,0x78,0x31,0xc4,0xe7,0xdc,0xe5,0xe7,0x6a,0xd8,0xaf,0xc8,0x93,0x87,
0xc8,0xd8,0x10,0xca,0xd4,0xe1,0x93,0xf6,0x93,0x7c,0x8b,0xbb,0x6b,0x7f,0x5b,0x14,
0xb2,0xce,0x33,0x5e,0xbf,0x16,0xaf,0xdf,0xdf,0x5a,0xd6,0x77,0xbc,0x9e,0xbe,0x86,
0xe0,0x2a,0xe7,0x63,0x8e,0x95,0x5b,0xa1,0xaa,0xe4,0x7c,0x78,0x6d,0x1b,0x2d,0x6e,
0x7d,0x4b,0x0f,0xc1,0xac,0x95,0xd4,0x50,0x08,0xdb,0x7e,0xe0,0xba,0xcd,0xca,0xf5,
0x3b,0x45,0xed,0xd5,0xc7,0x37,0x03,0x07,0x4b,0x43,0xf4,0x77,0x8c,0x8f,0x21,0x5b,
0x22,0xb4,0x96,0xf9,0xee,0x8a,0xb0,0xbc,0x49,0x55,0x87,0x4d,0x64,0x75,0x06,0x5e,
0xa1,0x16,0xb9,0xc8,0xab,0x96,0xa3,0xb4,0x8b,0x84,0x5e,0xfb,0x43,0x2c,0xf2,0x28,
0x6e,0x3f,0x92,0xc3,0xf9,0x8e,0x51,0x71,0x5d,0xf0,0xba,0xe0,0x3b,0xcf,0xb8,0xaf,
0x08,0xbe,0x03,0xd4,0x84,0x0e,0xa2,0x73,0xe3,0xa5,0x07,0x80,0xed,0x0f,0x56,0x5b,
0x8f,0x2f,0x64,0xf5,0x04,0xef,0x47,0x6c,0x8c,0x2d,0x60,0x3f,0xa4,0xec,0xee,0x63,
0x0e,0xbc,0x9c,0x03,0x82,0x8f,0xb2,0x0a,0xc3,0xaa,0xe4,0x7b,0x19,0xf9,0xb1,0x92,
0xc9,0xab,0xa9,0xb7,0x75,0xb0,0xb2,0x67,0x84,0xe1,0x46,0xe9,0x66,0x7e,0x9f,0xcd,
0x8d,0x48,0x7c,0x9b,0xd0,0x46,0x0f,0x47,0xf1,0x25,0xc3,0x9c,0x19,0x40,0xff,0xc5,
0x2b,0x25,0x08,0xb4,0xda,0xbf,0xf5,0xd8,0x62,0x1b,0x9c,0x18,0xc2,0x46,0xeb,0x74,
0x69,0xd7,0x1e,0xe1,0x76,0x3b,0x1a,0x1d,0xee,0xc2,0x6a,0xe4,0x7c,0x57,0x4e,0xe7,
0x6e,0xe1,0xbe,0xe4,0x03,0xea,0x4c,0x42,0x26,0xc0,0x02,0xdb,0xd2,0x26,0xb1,0xe2,
0x0f,0x60,0x60,0x0e,0x26,0x96,0xcf,0xb2,0x6f,0x01,0x7a,0xe7,0xce,0x22,0x9a,0x76,
0x66,0x8c,0xac,0x24,0xec,0x07,0x3d,0xb7,0x00,0x0d,0xbb,0x05,0xc1,0xdc,0x3d,0xde,
0xbf,0x61,0x51,0x5d,0x44,0xdd,0x82,0x1e,0x43,0x20,0x23,0x38,0x35,0xd1,0xd2,0xc9,
0x9b,0xbb,0xb1,0x05,0x6f,0xf1,0xc6,0x1c,0x14,0xf6,0x1b,0x85,0xe3,0x80,0x19,0x4a,
0xbe,0x30,0x26,0xd5,0x00,0x0d,0x9f,0x21,0x43,0x0e,0x03,0xda,0x06,0x83,0xdf,0xa1,
0xae,0x17,0xb9,0x9b,0x84,0xdb,0x91,0x8f,0x92,0x04,0x6c,0x97,0x8f,0xbb,0x2d,0x71,
0x1d,0xc7,0xce,0x13,0xb7,0x1a,0x15,0xd6,0xd7,0x12,0xdf,0x03,0x19,0x8e,0x37,0xa1,
0x13,0x8d,0x82,0x1d,0x10,0x0f,0x26,0x98,0xad,0xe4,0xe3,0xf6,0xc3,0x5a,0x41,0xa2,
0xf3,0x92,0x2f,0xb4,0x8e,0x07,0x0b,0x33,0x3c,0x35,0x0f,0x1a,0x21,0x6c,0x0c,0x14,
0x5f,0xf0,0x41,0x3e,0x03,0x7a,0x0e,0x00,0x1a,0x03,0x65,0xe8,0xb9,0x16,0x91,0xef,
0x74,0x58,0xd0,0x7c,0x49,0x69,0x2d,0xe4,0x6b,0xc3,0x9b,0x5b,0x74,0x0f,0x97,0x6f,
0xcb,0xb8,0xf6,0xdd,0x3b,0x87,0xa6,0x70,0xcf,0x59,0xeb,0xee,0x98,0xb4,0xd8,0xb6,
0x41,0x9a,0x60,0x45,0x63,0x12,0x87,0x4d,0x63,0xdd,0x91,0x04,0xf7,0x4f,0x9b,0x2d,
0x4f,0xd8,0x63,0x1d,0x7d,0xf6,0xf6,0x70,0x07,0x02,0x38,0x1b,0x51,0xfd,0x38,0x8e,
0x5b,0x58,0x61,0x58,0xef,0xa1,0xed,0x97,0xdd,0xc7,0x27,0x31,0x59,0xe6,0xbe,0x7e,
0x1a,0x8f,0x96,0x8d,0x55,0xb6,0x6e,0x2c,0x2c,0x77,0x1f,0xa7,0x21,0xa5,0x3d,0xa6,
0x1f,0x5f,0xc0,0xad,0x31,0x59,0xfc,0x50,0xbf,0xd6,0x1a,0x1f,0xf7,0x7e,0xa9,0x62,
0x5a,0x6c,0x93,0xa9,0xea,0x74,0xfc,0x9c,0x23,0xba,0x3c,0x56,0x8a,0x67,0x91,0x59,
0xa4,0x2e,0xa5,0x56,0xc5,0xcd,0xa9,0x2c,0x3f,0x4c,0x66,0xc9,0x35,0xb2,0xee,0x69,
0xc6,0x47,0xac,0xa9,0x2c,0x1f,0xb6,0xf2,0xfb,0x48,0x76,0xb1,0x46,0x11,0x75,0xf8,
0xda,0x1c,0x7b,0x25,0xdb,0x5a,0x62,0xde,0x7f,0xb4,0x20,0x19,0xa6,0x9f,0x87,0x70,
0x14,0xd0,0x31,0x2c,0x7a,0xf5,0xda,0x2a,0x9a,0x09,0x6f,0xdf,0xb5,0x5f,0x33,0x79,
0x7c,0x49,0x73,0xb5,0x61,0xc2,0xe6,0xc2,0xa8,0x09,0x20,0xa2,0x6a,0x6d,0x79,0xf1,
0x59,0x14,0xbc,0x3b,0xbe,0x51,0x1e,0xe7,0x9f,0x49,0x3f,0x70,0xbe,0xfa,0xc5,0x6e,
0xf9,0xf8,0x76,0x6d,0xbf,0x99,0x67,0xe7,0x73,0x96,0x87,0x8f,0x4f,0x34,0xa8,0x05,
0xbf,0xfd,0x70,0x38,0xba,0xe7,0xf6,0x6b,0x2a,0x9f,0xf3,0x38,0x63,0xd1,0x96,0xf8,
0xa2,0x51,0xf9,0xc0,0x85,0xdc,0x2e,0x5f,0x27,0x9f,0x51,0xcd,0x5f,0x83,0xb9,0xac,
0xea,0x17,0x68,0x25,0x25,0x25,0xa5,0x5f,0x9e,0xf6,0x36,0x42,0x3f,0x77,0xd5,0x9e,
0x51,0x75,0x0a,0x49,0xe3,0x47,0xd7,0x55,0xb3,0xb9,0xe2,0x59,0xab,0xba,0x53,0x2a,
0xde,0x82,0x3d,0x92,0x3e,0x10,0x69,0xa6,0xee,0x23,0xf1,0x1d,0x80,0xc5,0xa1,0xbd,
0xc2,0x23,0xc0,0xa6,0x8a,0x44,0xae,0x55,0xe0,0xf1,0x19,0x8d,0x3d,0x93,0xf6,0xe6,
0x48,0x63,0x5d,0x42,0xc2,0x41,0xff,0xb4,0xf9,0x95,0xbd,0xc4,0x03,0x38,0x6c,0x18,
0x7f,0x6c,0xd0,0x48,0x47,0x46,0x2e,0x0e,0xf4,0xeb,0x9f,0x96,0xf9,0xd6,0xf7,0x16,
0x0f,0xb4,0xf7,0x63,0x8d,0x74,0x13,0x09,0x07,0x22,0x9d,0xde,0x88,0xac,0xa8,0xef,
0x31,0x1f,0xec,0x6f,0xc8,0x87,0x32,0x46,0x06,0xfa,0x5f,0x76,0x0d,0xf8,0xf5,0x9e,
0xf3,0x35,0x31,0x20,0x6a,0xfc,0x52,0xff,0xab,0x72,0xc4,0xed,0xe8,0xe1,0xbd,0xe6,
0x6b,0x6e,0xc0,0x98,0x71,0xb1,0x5f,0xce,0x7a,0xaf,0x77,0xef,0x3d,0x9f,0x76,0xb9,
0x19,0xdf,0xcd,0xf1,0x8b,0xaf,0xf1,0x16,0xb8,0x1d,0xe9,0x6e,0x94,0x53,0xcf,0x84,
0x11,0xfe,0x31,0x00,0xe7,0x9b,0x03,0xf6,0x0f,0x12,0xdf,0x53,0x1f,0x1f,0xfd,0x2f,
0x73,0x74,0x33,0x6e,0x25,0x54,0x67,0x96,0xb1,0xe5,0x2e,0xef,0xaa,0xce,0xaf,0x68,
0xba,0xee,0xed,0xbb,0xe5,0xc9,0x03,0xff,0xd6,0x4d,0xe1,0x65,0xe7,0xbf,0xa1,0x69,
0x05,0xc7,0xc6,0xfb,0x5f,0x25,0x0f,0x59,0x1b,0xf6,0xf8,0xda,0x32,0x16,0x2d,0xd8,
0x41,0x5b,0x9a,0x66,0x85,0x49,0x21,0xee,0x43,0xeb,0x3c,0xd7,0x5e,0xbc,0x4a,0x7a,
0x02,0x67,0xf2,0x96,0xd4,0x9a,0x7b,0x6b,0x2f,0x9e,0x7a,0x40,0xcf,0xb3,0x60,0x59,
0x5b,0x88,0x34,0xe9,0xa6,0x98,0x11,0xd9,0xd2,0x74,0x5c,0x27,0x1e,0x6e,0xba,0x37,
0x34,0xad,0xe0,0xd8,0x8d,0x8b,0x27,0xc8,0x3b,0x6e,0x95,0xf9,0xda,0xb3,0xf4,0xfe,
0x4c,0x19,0xca,0x55,0xc2,0x3e,0xe9,0xe2,0x7c,0xca,0xb7,0x37,0x2b,0x0e,0xbe,0x87,
0x5e,0x2f,0x74,0x3e,0x2d,0xf3,0x31,0x3e,0xed,0x18,0x5a,0xa0,0xf9,0x05,0xd0,0xbc,
0xb9,0x32,0xe2,0xcb,0x8a,0x39,0xed,0x84,0x38,0x14,0x6a,0xee,0x21,0xb1,0xf1,0x81,
0x4f,0xb1,0xe8,0xb1,0x53,0x7a,0x5d,0xbe,0x45,0x46,0x6f,0xae,0xb6,0xc8,0xb6,0x37,
0xe3,0xf1,0x49,0xc7,0xb7,0xec,0x2d,0xf7,0xd6,0xd7,0x7c,0x7c,0xdb,0x64,0xbf,0x6a,
0x3e,0x2d,0xed,0xce,0xed,0x90,0xf6,0xb5,0xc0,0x77,0xea,0x24,0x3e,0xec,0x78,0x99,
0x8f,0x32,0x9a,0xf1,0xf1,0x75,0x62,0x21,0x85,0x54,0x3c,0xed,0xf0,0x49,0x51,0xce,
0xb7,0x92,0x4a,0xdd,0x13,0x7b,0x56,0x0a,0xf5,0xa5,0x7b,0xeb,0x91,0x4a,0x3e,0x78,
0x8f,0x66,0xc9,0x31,0x71,0xaa,0xcc,0x17,0xca,0x33,0xe6,0x3d,0x4f,0x2b,0x0d,0xf0,
0xc6,0xc8,0x80,0x5d,0x9b,0x2f,0xcd,0x67,0xd9,0x00,0xcb,0x72,0xb6,0x71,0x44,0x96,
0xe1,0xcb,0x31,0x3d,0x44,0x95,0x34,0xcd,0x59,0xe8,0xa1,0x79,0x48,0x8b,0xc6,0x6a,
0xb7,0x1a,0xf1,0x95,0x30,0xb1,0x39,0x2a,0xb2,0xc5,0xb6,0xd9,0x6e,0x33,0x77,0x09,
0xb9,0x35,0xbe,0x9b,0x23,0xfd,0xd8,0xf6,0x8d,0x53,0xe1,0x7a,0x7c,0x18,0x7f,0x9c,
0xa4,0x11,0x33,0x13,0xb6,0xb3,0x56,0xc1,0xc7,0x87,0x67,0x54,0xfa,0x5f,0x77,0xf2,
0xed,0xf7,0xf1,0x15,0x2a,0xf9,0x5e,0xb4,0x65,0xab,0x11,0x6a,0xee,0x20,0xe8,0xc1,
0x36,0x4b,0xce,0xf9,0xf8,0x32,0xb4,0xec,0x84,0x46,0x48,0xf3,0x49,0xb6,0x10,0xf2,
0xad,0x63,0x6c,0x39,0x1d,0xe7,0x6b,0xa9,0x47,0xf0,0xaa,0xf0,0xd7,0x80,0x99,0xb1,
0x7d,0x35,0xc5,0x9b,0x7b,0x46,0xf0,0x05,0x91,0xaf,0x88,0xdb,0x93,0x7c,0x6a,0xba,
0xfc,0xd8,0x78,0xed,0x15,0xc6,0x31,0xbd,0x27,0x6a,0xc1,0x80,0x17,0x17,0x1c,0x73,
0x07,0x1f,0x4d,0xb3,0xff,0x4a,0xf0,0xd1,0xfa,0x0f,0x95,0x19,0x98,0xc8,0xd0,0x5a,
0x0c,0xf1,0x49,0x7f,0x9d,0x88,0x5b,0x4e,0xe5,0x98,0x2c,0x98,0x17,0xc6,0x09,0x66,
0x04,0x5f,0x5f,0x35,0xdf,0x39,0xbf,0xfb,0xe2,0x13,0x4d,0xb7,0xc0,0xf7,0xe7,0x9a,
0x7c,0xed,0x16,0x27,0xc1,0x31,0xac,0xc3,0x43,0x73,0x5b,0x82,0xec,0x7a,0xe4,0x0b,
0xaf,0x7d,0x07,0xd1,0x80,0x4e,0x05,0x5f,0x48,0x56,0x1e,0xd9,0xbc,0x24,0xf9,0xdc,
0x7c,0x89,0xef,0x09,0x2d,0xdc,0x95,0x4f,0xa1,0xb4,0xe6,0xf6,0x8b,0x8d,0x9c,0x2c,
0x99,0x73,0xd1,0xb2,0x7f,0x64,0x68,0x05,0x50,0xb4,0xe4,0xa7,0x10,0x48,0x08,0x53,
0x60,0xa1,0x09,0x9b,0x3d,0xe4,0x55,0x27,0xf9,0x34,0x03,0xcf,0x54,0xbc,0x12,0xd0,
0x52,0xe7,0xa6,0xe4,0x74,0x6a,0xf2,0xb5,0xd9,0x6e,0x76,0xbb,0xe0,0xeb,0x2b,0xc5,
0xe7,0x2e,0xf9,0xfa,0x0f,0xc6,0x36,0xb0,0x4d,0xd3,0x8a,0xc4,0x16,0x04,0xd3,0x72,
0xaa,0x17,0x02,0x9f,0xd0,0xda,0x74,0x50,0x36,0x7d,0x52,0x0f,0x56,0xf9,0xb6,0x57,
0xd2,0x19,0x93,0xcf,0x98,0x93,0xab,0xc7,0x25,0xdf,0x31,0x4b,0x38,0x90,0xf9,0x19,
0x5f,0x65,0x65,0x6b,0x3a,0x35,0xbf,0xfb,0xfe,0x47,0x6a,0x81,0x6f,0x7c,0xb0,0x94,
0xfa,0xcf,0x58,0x05,0x9f,0xf7,0x4a,0xc2,0xbd,0x44,0xc4,0x03,0x8a,0xb6,0xa5,0x0a,
0x3e,0x6d,0x22,0x61,0x39,0xae,0x7d,0xb4,0x1d,0x0b,0x2b,0x2e,0x1f,0xd7,0x9a,0xe0,
0x73,0x28,0xca,0x3b,0xbb,0xe7,0x2b,0xa6,0x92,0x8d,0xf8,0x64,0x3c,0x78,0x8f,0x56,
0x67,0xfd,0x7c,0xd0,0x1b,0x77,0xec,0x2b,0xbb,0xe0,0x13,0x17,0x36,0x86,0x76,0xc9,
0x77,0x79,0x23,0x95,0x34,0x3c,0xbe,0xd0,0x17,0x8c,0x7f,0x70,0xe4,0x58,0x44,0x12,
0x8a,0x7b,0x7c,0x7d,0xc4,0x17,0xf0,0xf3,0x05,0x27,0xd2,0x9e,0x87,0x68,0x93,0xf2,
0x23,0x25,0xc7,0xe2,0x9d,0x21,0x9d,0x6f,0xcb,0xcb,0xe3,0x2d,0x11,0x73,0x28,0x5b,
0xca,0xb5,0x6b,0x57,0x7c,0xc6,0xe5,0x52,0x2a,0x79,0x33,0xe2,0xf1,0x51,0x46,0x05,
0xec,0x94,0xf2,0x3b,0xf8,0xce,0x52,0xb4,0x08,0x2c,0xd8,0xbe,0x06,0xde,0x63,0x5a,
0x6c,0xd4,0xe5,0x13,0xb7,0xf0,0x2e,0x2f,0x63,0x7b,0x7c,0x4e,0x11,0x8f,0x1f,0x4a,
0x3e,0xca,0xd6,0x12,0xe3,0x84,0xd6,0xf9,0x6e,0x5e,0x2e,0xa5,0x57,0xe6,0x87,0xc3,
0x7e,0xbe,0x47,0xa2,0x95,0x17,0xc3,0xc1,0xdb,0xb6,0x1b,0xef,0xae,0x93,0xeb,0x55,
0xf2,0x05,0xe7,0xd2,0x76,0x49,0x56,0xd7,0x1f,0xa8,0x0f,0x24,0xf7,0x94,0x0e,0xe2,
0xf2,0x3d,0x12,0x3d,0x20,0xe7,0x23,0xbf,0x4b,0xf8,0x06,0x3c,0xad,0xf1,0x7d,0xe0,
0xa4,0x57,0xcc,0x61,0xdd,0xc7,0xe7,0xd0,0x2c,0x0c,0xf1,0x75,0x05,0x4d,0x5b,0xc6,
0x13,0x8d,0x77,0x0a,0x5a,0x05,0x9f,0x46,0x89,0xa4,0x35,0x4e,0xe3,0x2d,0x69,0x5b,
0xdc,0xfa,0x49,0x99,0xaf,0xb0,0x8c,0xc7,0x63,0x82,0xcf,0x79,0x64,0x18,0x57,0xe3,
0xbe,0x06,0xd2,0x52,0x7c,0xe1,0x7c,0xc9,0x31,0x3f,0x1f,0x11,0x04,0x38,0x5f,0xc0,
0xcc,0xb3,0x12,0x37,0x2d,0x85,0xd6,0x2d,0xd0,0x12,0x36,0x0f,0xbd,0x52,0x87,0xcc,
0xbc,0x0c,0x3f,0xbc,0xb4,0xb4,0x7c,0x98,0xf7,0xca,0x7c,0xb3,0xf2,0xda,0x3e,0x6a,
0xb8,0x98,0x51,0x47,0xc2,0x1f,0x01,0x91,0x6f,0x7a,0xde,0x6c,0x02,0xf8,0x01,0x4b,
0x17,0x52,0x63,0x11,0x59,0xc1,0x6e,0xe7,0xa9,0x21,0x1f,0xd6,0xdd,0x60,0x5a,0xb6,
0x17,0xfa,0x82,0x60,0x96,0xf3,0xf9,0x3b,0x8d,0x78,0xde,0xdf,0x5f,0x25,0xaa,0xf9,
0xdc,0x8b,0x01,0xc9,0x07,0x79,0xbe,0xde,0xe4,0x52,0xc7,0x3e,0xd7,0xbb,0xb5,0xcb,
0x8d,0xdf,0x43,0xae,0xb3,0x5c,0x21,0x35,0xe7,0xe7,0x2b,0x49,0x3e,0x67,0x08,0x63,
0x08,0xe3,0x5f,0xcb,0xd1,0x7a,0x35,0x1e,0xc2,0x0e,0xbe,0xb3,0x16,0x73,0x57,0xea,
0x7d,0x7c,0xb2,0xe3,0xa5,0x50,0xe5,0xfa,0x02,0x6f,0xb8,0x54,0x84,0xed,0xe7,0xdb,
0x3f,0x3d,0x7b,0x24,0x76,0xe1,0x68,0x63,0xfb,0x7d,0xca,0x0a,0x56,0xce,0x8c,0xca,
0x37,0x4c,0xde,0x50,0xb8,0x69,0x6c,0x1a,0xe9,0x06,0xb1,0x06,0x69,0xe0,0x77,0x86,
0x5c,0x13,0xcf,0x5e,0xf3,0xf5,0xf9,0x20,0x96,0xe3,0x4b,0x5e,0x40,0x4b,0xd8,0xa2,
0x6d,0xc9,0x8e,0x8d,0xf8,0xee,0xf9,0xf9,0x30,0xb0,0x1c,0x62,0xcc,0xd7,0x40,0xde,
0xbe,0x81,0x6c,0x23,0x17,0x1a,0xc0,0x99,0xf3,0xb1,0x3c,0xf2,0x15,0x92,0x6e,0x05,
0x7b,0x7c,0xe8,0xb9,0x14,0x3c,0xae,0x62,0xa3,0x2a,0x1a,0xa7,0xe9,0xb1,0xbf,0xab,
0xe0,0x8b,0xe7,0x50,0x2b,0x14,0x72,0x3d,0x03,0xde,0xa9,0xe6,0x2b,0xe0,0x3b,0x53,
0x6e,0x05,0xb8,0x63,0x61,0x28,0xcc,0x53,0xc8,0x2e,0xfb,0xc7,0xd5,0x8f,0x8f,0xbe,
0x39,0xaf,0xff,0xbd,0xfe,0x30,0x70,0xda,0x34,0x63,0xd8,0xce,0x2c,0x2b,0x65,0x48,
0x0f,0xa6,0x11,0x01,0xe7,0xfb,0x48,0xf2,0xc5,0x99,0x5c,0x60,0x73,0x1e,0x56,0xf0,
0xa5,0xbd,0xbe,0xc2,0xe3,0x8b,0x57,0xf3,0xb9,0x9d,0x09,0xf1,0xb9,0xf2,0x5e,0xf8,
0x20,0x76,0xe1,0xed,0x37,0xcd,0x7d,0x9f,0xbf,0x5d,0xdf,0x7c,0xa6,0x79,0x13,0x0d,
0x9e,0xb7,0x56,0x4c,0x69,0xc0,0x80,0xdb,0xc2,0x3e,0x12,0xaf,0x44,0xc1,0xe9,0x84,
0xc8,0xd9,0x79,0x44,0x71,0x7f,0xd2,0xe3,0x4b,0xd4,0xe4,0xe3,0xbe,0x79,0x6c,0x95,
0x7f,0xd2,0xd9,0x96,0x2e,0xf3,0x41,0xc2,0xe5,0x73,0x0a,0x5e,0x7c,0xd6,0x62,0x27,
0x62,0x03,0x17,0xde,0x32,0x0f,0xd7,0x73,0x10,0xd3,0xbc,0xbd,0x34,0x8d,0xb7,0xe4,
0xb1,0x82,0x0d,0xf1,0x8e,0x19,0xc0,0x61,0xa7,0x4d,0x3b,0x13,0x79,0xc7,0x26,0xef,
0x9b,0x30,0x33,0x54,0x29,0x4e,0x91,0x37,0xa5,0xc9,0xbc,0xb8,0x4a,0x30,0xf4,0xc1,
0x1d,0xfd,0xbc,0x01,0x7b,0x7c,0xd5,0xda,0xa0,0x6d,0x27,0x26,0x22,0xbe,0x94,0x4d,
0x9d,0x9d,0xc8,0xef,0x6f,0x16,0xef,0x34,0x2d,0xab,0x94,0xf4,0xda,0xeb,0xfe,0xf9,
0xfe,0xd8,0x89,0xee,0xf0,0x74,0x3d,0x3e,0xb4,0xde,0xd2,0x12,0x05,0x76,0xac,0x60,
0x34,0x20,0x07,0xd4,0xe8,0xf3,0x4d,0xce,0x87,0xfd,0x00,0x75,0x5e,0x01,0xc3,0xa4,
0xa6,0x96,0xfa,0x07,0x37,0xef,0x19,0x79,0x15,0xa0,0x3c,0x99,0xed,0x05,0x18,0x53,
0x5e,0x6c,0xc3,0x2d,0xff,0x96,0xc8,0x95,0xc8,0x4e,0xea,0x2f,0x5e,0xf5,0x0e,0xc6,
0x2e,0xbc,0xff,0x96,0xf9,0x71,0xec,0xe8,0x7c,0x6c,0xc7,0xfc,0xb8,0x68,0x7a,0xe6,
0xed,0x3b,0xa9,0xa4,0x2d,0xf8,0x72,0xc9,0xb9,0xe1,0x88,0xee,0xe3,0x3b,0x23,0x5e,
0x6c,0x00,0x8e,0x4e,0x9b,0xd8,0x6f,0x9a,0x62,0xa4,0xf2,0xec,0x7c,0x63,0xe5,0xe1,
0xc1,0xc4,0xf4,0xc7,0x83,0x51,0xe3,0xc4,0x8d,0xc1,0xf9,0xe9,0xda,0xd3,0xf8,0x4b,
0xa9,0x54,0x92,0x37,0x0a,0xb4,0xfb,0x4a,0x72,0x2e,0x1a,0xd1,0xc3,0xf4,0x81,0x12,
0xff,0xa6,0x09,0xff,0x91,0x7d,0x4a,0x10,0xf3,0x36,0x64,0xae,0x9a,0xbc,0x0a,0x10,
0xf6,0x8a,0x09,0x97,0x77,0x34,0x37,0x91,0xd8,0x96,0xd3,0xf8,0x92,0x7b,0xda,0x67,
0x9a,0xff,0xc4,0xf8,0x32,0x7d,0xe1,0xdf,0x75,0xe9,0x52,0xff,0xcd,0xb8,0x7c,0x85,
0x95,0x79,0x23,0x1a,0xe9,0xf6,0xa6,0x7d,0x08,0x4e,0xcc,0xea,0x84,0x03,0x11,0xf4,
0x1d,0xbd,0x4a,0x6e,0x91,0xd5,0x57,0xe4,0x75,0x7f,0x0a,0x77,0xb7,0x3c,0xaf,0x04,
0x97,0x6f,0x9b,0x4b,0x57,0xfe,0x74,0xe7,0x5f,0x87,0x97,0xb0,0x99,0xd5,0x10,0xbd,
0xfc,0xa7,0xa4,0x73,0x0a,0x03,0x8e,0x9d,0x8a,0x44,0xea,0x95,0xf6,0xdc,0xd5,0x11,
0x5f,0x4a,0xdd,0xb9,0x7f,0xd8,0x38,0x31,0x78,0xa7,0x1e,0x5c,0x7a,0x25,0x23,0xdd,
0x9e,0xa6,0x99,0x56,0x52,0xa6,0x71,0x29,0x1a,0x8d,0x9e,0xda,0xb1,0x12,0xf1,0xc6,
0x40,0x74,0x20,0xfa,0x46,0xf3,0x15,0x8b,0x5d,0xea,0x24,0x47,0x48,0xc6,0x52,0x75,
0x94,0xcb,0x61,0x64,0x77,0x3f,0x21,0x71,0x78,0x0d,0x23,0xa0,0x61,0x8c,0x10,0x23,
0xca,0x5b,0x8a,0x88,0xbe,0x1b,0x8d,0x5e,0x92,0x27,0xbd,0x2b,0xb8,0x79,0x57,0xae,
0x54,0xb8,0x2b,0x16,0xd1,0x2a,0xbd,0xeb,0xe6,0x50,0xb1,0x2b,0xee,0x7f,0x3f,0x4d,
0x1d,0xd0,0xff,0x72,0xf5,0x85,0xdd,0x5a,0xda,0x8b,0xea,0x18,0xa8,0x1c,0x22,0x4c,
0xd6,0x6e,0xab,0xcf,0x5f,0x19,0xab,0xb9,0x7c,0xdf,0xa1,0x38,0xc4,0x67,0xe5,0xea,
0x19,0xfb,0xf9,0xab,0x05,0xbc,0x42,0xa6,0xcc,0xc7,0x24,0x31,0x75,0xfa,0x3f,0x3e,
0x5c,0x2e,0x97,0x6f,0x48,0x56,0x65,0x3e,0xd9,0x04,0x7f,0x4e,0x2a,0xec,0xfc,0xd3,
0xb6,0x9f,0x1b,0x60,0xf5,0x12,0x3b,0x75,0xf8,0x96,0xe8,0xc8,0x6d,0xb7,0x3f,0x77,
0xc4,0x38,0xc0,0xf2,0x6f,0x7c,0xe7,0x9d,0xca,0xf3,0x4d,0x2f,0x57,0xca,0xa9,0x95,
0xab,0xd8,0x54,0xc1,0xb9,0x88,0x75,0x2e,0x3c,0x2f,0x59,0xf4,0xed,0x01,0xfe,0x53,
0x63,0x81,0x5c,0x49,0x49,0x49,0x49,0x49,0x49,0x49,0x49,0x49,0x49,0x49,0x49,0x49,
0x49,0x49,0x49,0x49,0x49,0x49,0x49,0x49,0x49,0x49,0x49,0x49,0x49,0x49,0x49,0x49,
0x49,0x49,0x49,0x49,0x49,0x49,0x49,0x49,0x49,0x49,0x49,0x49,0x49,0x49,0x49,0x49,
0xe9,0x67,0xac,0xff,0x03,0xdc,0x41,0xd4,0x19,0x76,0x96,0x00,0x00,
| gpl-2.0 |
backup-kb/pnotify-linux-4.1.6 | drivers/staging/lustre/lustre/mdc/mdc_request.c | 226 | 68839 | /*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 only,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License version 2 for more details (a copy is included
* in the LICENSE file that accompanied this code).
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
* http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
* GPL HEADER END
*/
/*
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
* Copyright (c) 2011, 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Lustre is a trademark of Sun Microsystems, Inc.
*/
#define DEBUG_SUBSYSTEM S_MDC
# include <linux/module.h>
# include <linux/pagemap.h>
# include <linux/miscdevice.h>
# include <linux/init.h>
# include <linux/utsname.h>
#include "../include/lustre_acl.h"
#include "../include/obd_class.h"
#include "../include/lustre_fid.h"
#include "../include/lprocfs_status.h"
#include "../include/lustre_param.h"
#include "../include/lustre_log.h"
#include "mdc_internal.h"
#define REQUEST_MINOR 244
struct mdc_renew_capa_args {
struct obd_capa *ra_oc;
renew_capa_cb_t ra_cb;
};
static int mdc_cleanup(struct obd_device *obd);
static int mdc_unpack_capa(struct obd_export *exp, struct ptlrpc_request *req,
const struct req_msg_field *field, struct obd_capa **oc)
{
struct lustre_capa *capa;
struct obd_capa *c;
/* swabbed already in mdc_enqueue */
capa = req_capsule_server_get(&req->rq_pill, field);
if (capa == NULL)
return -EPROTO;
c = alloc_capa(CAPA_SITE_CLIENT);
if (IS_ERR(c)) {
CDEBUG(D_INFO, "alloc capa failed!\n");
return PTR_ERR(c);
} else {
c->c_capa = *capa;
*oc = c;
return 0;
}
}
static inline int mdc_queue_wait(struct ptlrpc_request *req)
{
struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
int rc;
/* mdc_enter_request() ensures that this client has no more
* than cl_max_rpcs_in_flight RPCs simultaneously inf light
* against an MDT. */
rc = mdc_enter_request(cli);
if (rc != 0)
return rc;
rc = ptlrpc_queue_wait(req);
mdc_exit_request(cli);
return rc;
}
/* Helper that implements most of mdc_getstatus and signal_completed_replay. */
/* XXX this should become mdc_get_info("key"), sending MDS_GET_INFO RPC */
static int send_getstatus(struct obd_import *imp, struct lu_fid *rootfid,
struct obd_capa **pc, int level, int msg_flags)
{
struct ptlrpc_request *req;
struct mdt_body *body;
int rc;
req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_GETSTATUS,
LUSTRE_MDS_VERSION, MDS_GETSTATUS);
if (req == NULL)
return -ENOMEM;
mdc_pack_body(req, NULL, NULL, 0, 0, -1, 0);
lustre_msg_add_flags(req->rq_reqmsg, msg_flags);
req->rq_send_state = level;
ptlrpc_request_set_replen(req);
rc = ptlrpc_queue_wait(req);
if (rc)
goto out;
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
if (body == NULL) {
rc = -EPROTO;
goto out;
}
if (body->valid & OBD_MD_FLMDSCAPA) {
rc = mdc_unpack_capa(NULL, req, &RMF_CAPA1, pc);
if (rc)
goto out;
}
*rootfid = body->fid1;
CDEBUG(D_NET,
"root fid="DFID", last_committed=%llu\n",
PFID(rootfid),
lustre_msg_get_last_committed(req->rq_repmsg));
out:
ptlrpc_req_finished(req);
return rc;
}
/* This should be mdc_get_info("rootfid") */
static int mdc_getstatus(struct obd_export *exp, struct lu_fid *rootfid,
struct obd_capa **pc)
{
return send_getstatus(class_exp2cliimp(exp), rootfid, pc,
LUSTRE_IMP_FULL, 0);
}
/*
* This function now is known to always saying that it will receive 4 buffers
* from server. Even for cases when acl_size and md_size is zero, RPC header
* will contain 4 fields and RPC itself will contain zero size fields. This is
* because mdt_getattr*() _always_ returns 4 fields, but if acl is not needed
* and thus zero, it shrinks it, making zero size. The same story about
* md_size. And this is course of problem when client waits for smaller number
* of fields. This issue will be fixed later when client gets aware of RPC
* layouts. --umka
*/
static int mdc_getattr_common(struct obd_export *exp,
struct ptlrpc_request *req)
{
struct req_capsule *pill = &req->rq_pill;
struct mdt_body *body;
void *eadata;
int rc;
/* Request message already built. */
rc = ptlrpc_queue_wait(req);
if (rc != 0)
return rc;
/* sanity check for the reply */
body = req_capsule_server_get(pill, &RMF_MDT_BODY);
if (body == NULL)
return -EPROTO;
CDEBUG(D_NET, "mode: %o\n", body->mode);
if (body->eadatasize != 0) {
mdc_update_max_ea_from_body(exp, body);
eadata = req_capsule_server_sized_get(pill, &RMF_MDT_MD,
body->eadatasize);
if (eadata == NULL)
return -EPROTO;
}
if (body->valid & OBD_MD_FLRMTPERM) {
struct mdt_remote_perm *perm;
LASSERT(client_is_remote(exp));
perm = req_capsule_server_swab_get(pill, &RMF_ACL,
lustre_swab_mdt_remote_perm);
if (perm == NULL)
return -EPROTO;
}
if (body->valid & OBD_MD_FLMDSCAPA) {
struct lustre_capa *capa;
capa = req_capsule_server_get(pill, &RMF_CAPA1);
if (capa == NULL)
return -EPROTO;
}
return 0;
}
static int mdc_getattr(struct obd_export *exp, struct md_op_data *op_data,
struct ptlrpc_request **request)
{
struct ptlrpc_request *req;
int rc;
/* Single MDS without an LMV case */
if (op_data->op_flags & MF_GET_MDT_IDX) {
op_data->op_mds = 0;
return 0;
}
*request = NULL;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_GETATTR);
if (req == NULL)
return -ENOMEM;
mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GETATTR);
if (rc) {
ptlrpc_request_free(req);
return rc;
}
mdc_pack_body(req, &op_data->op_fid1, op_data->op_capa1,
op_data->op_valid, op_data->op_mode, -1, 0);
req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
op_data->op_mode);
if (op_data->op_valid & OBD_MD_FLRMTPERM) {
LASSERT(client_is_remote(exp));
req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER,
sizeof(struct mdt_remote_perm));
}
ptlrpc_request_set_replen(req);
rc = mdc_getattr_common(exp, req);
if (rc)
ptlrpc_req_finished(req);
else
*request = req;
return rc;
}
static int mdc_getattr_name(struct obd_export *exp, struct md_op_data *op_data,
struct ptlrpc_request **request)
{
struct ptlrpc_request *req;
int rc;
*request = NULL;
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_MDS_GETATTR_NAME);
if (req == NULL)
return -ENOMEM;
mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
op_data->op_namelen + 1);
rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GETATTR_NAME);
if (rc) {
ptlrpc_request_free(req);
return rc;
}
mdc_pack_body(req, &op_data->op_fid1, op_data->op_capa1,
op_data->op_valid, op_data->op_mode,
op_data->op_suppgids[0], 0);
if (op_data->op_name) {
char *name = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
LASSERT(strnlen(op_data->op_name, op_data->op_namelen) ==
op_data->op_namelen);
memcpy(name, op_data->op_name, op_data->op_namelen);
}
req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
op_data->op_mode);
ptlrpc_request_set_replen(req);
rc = mdc_getattr_common(exp, req);
if (rc)
ptlrpc_req_finished(req);
else
*request = req;
return rc;
}
static int mdc_is_subdir(struct obd_export *exp,
const struct lu_fid *pfid,
const struct lu_fid *cfid,
struct ptlrpc_request **request)
{
struct ptlrpc_request *req;
int rc;
*request = NULL;
req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
&RQF_MDS_IS_SUBDIR, LUSTRE_MDS_VERSION,
MDS_IS_SUBDIR);
if (req == NULL)
return -ENOMEM;
mdc_is_subdir_pack(req, pfid, cfid, 0);
ptlrpc_request_set_replen(req);
rc = ptlrpc_queue_wait(req);
if (rc && rc != -EREMOTE)
ptlrpc_req_finished(req);
else
*request = req;
return rc;
}
static int mdc_xattr_common(struct obd_export *exp,
const struct req_format *fmt,
const struct lu_fid *fid,
struct obd_capa *oc, int opcode, u64 valid,
const char *xattr_name, const char *input,
int input_size, int output_size, int flags,
__u32 suppgid, struct ptlrpc_request **request)
{
struct ptlrpc_request *req;
int xattr_namelen = 0;
char *tmp;
int rc;
*request = NULL;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), fmt);
if (req == NULL)
return -ENOMEM;
mdc_set_capa_size(req, &RMF_CAPA1, oc);
if (xattr_name) {
xattr_namelen = strlen(xattr_name) + 1;
req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
xattr_namelen);
}
if (input_size) {
LASSERT(input);
req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT,
input_size);
}
/* Flush local XATTR locks to get rid of a possible cancel RPC */
if (opcode == MDS_REINT && fid_is_sane(fid) &&
exp->exp_connect_data.ocd_ibits_known & MDS_INODELOCK_XATTR) {
LIST_HEAD(cancels);
int count;
/* Without that packing would fail */
if (input_size == 0)
req_capsule_set_size(&req->rq_pill, &RMF_EADATA,
RCL_CLIENT, 0);
count = mdc_resource_get_unused(exp, fid,
&cancels, LCK_EX,
MDS_INODELOCK_XATTR);
rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
if (rc) {
ptlrpc_request_free(req);
return rc;
}
} else {
rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, opcode);
if (rc) {
ptlrpc_request_free(req);
return rc;
}
}
if (opcode == MDS_REINT) {
struct mdt_rec_setxattr *rec;
CLASSERT(sizeof(struct mdt_rec_setxattr) ==
sizeof(struct mdt_rec_reint));
rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
rec->sx_opcode = REINT_SETXATTR;
rec->sx_fsuid = from_kuid(&init_user_ns, current_fsuid());
rec->sx_fsgid = from_kgid(&init_user_ns, current_fsgid());
rec->sx_cap = cfs_curproc_cap_pack();
rec->sx_suppgid1 = suppgid;
rec->sx_suppgid2 = -1;
rec->sx_fid = *fid;
rec->sx_valid = valid | OBD_MD_FLCTIME;
rec->sx_time = get_seconds();
rec->sx_size = output_size;
rec->sx_flags = flags;
mdc_pack_capa(req, &RMF_CAPA1, oc);
} else {
mdc_pack_body(req, fid, oc, valid, output_size, suppgid, flags);
}
if (xattr_name) {
tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
memcpy(tmp, xattr_name, xattr_namelen);
}
if (input_size) {
tmp = req_capsule_client_get(&req->rq_pill, &RMF_EADATA);
memcpy(tmp, input, input_size);
}
if (req_capsule_has_field(&req->rq_pill, &RMF_EADATA, RCL_SERVER))
req_capsule_set_size(&req->rq_pill, &RMF_EADATA,
RCL_SERVER, output_size);
ptlrpc_request_set_replen(req);
/* make rpc */
if (opcode == MDS_REINT)
mdc_get_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
rc = ptlrpc_queue_wait(req);
if (opcode == MDS_REINT)
mdc_put_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
if (rc)
ptlrpc_req_finished(req);
else
*request = req;
return rc;
}
static int mdc_setxattr(struct obd_export *exp, const struct lu_fid *fid,
struct obd_capa *oc, u64 valid, const char *xattr_name,
const char *input, int input_size, int output_size,
int flags, __u32 suppgid, struct ptlrpc_request **request)
{
return mdc_xattr_common(exp, &RQF_MDS_REINT_SETXATTR,
fid, oc, MDS_REINT, valid, xattr_name,
input, input_size, output_size, flags,
suppgid, request);
}
static int mdc_getxattr(struct obd_export *exp, const struct lu_fid *fid,
struct obd_capa *oc, u64 valid, const char *xattr_name,
const char *input, int input_size, int output_size,
int flags, struct ptlrpc_request **request)
{
return mdc_xattr_common(exp, &RQF_MDS_GETXATTR,
fid, oc, MDS_GETXATTR, valid, xattr_name,
input, input_size, output_size, flags,
-1, request);
}
#ifdef CONFIG_FS_POSIX_ACL
static int mdc_unpack_acl(struct ptlrpc_request *req, struct lustre_md *md)
{
struct req_capsule *pill = &req->rq_pill;
struct mdt_body *body = md->body;
struct posix_acl *acl;
void *buf;
int rc;
if (!body->aclsize)
return 0;
buf = req_capsule_server_sized_get(pill, &RMF_ACL, body->aclsize);
if (!buf)
return -EPROTO;
acl = posix_acl_from_xattr(&init_user_ns, buf, body->aclsize);
if (acl == NULL)
return 0;
if (IS_ERR(acl)) {
rc = PTR_ERR(acl);
CERROR("convert xattr to acl: %d\n", rc);
return rc;
}
rc = posix_acl_valid(acl);
if (rc) {
CERROR("validate acl: %d\n", rc);
posix_acl_release(acl);
return rc;
}
md->posix_acl = acl;
return 0;
}
#else
#define mdc_unpack_acl(req, md) 0
#endif
int mdc_get_lustre_md(struct obd_export *exp, struct ptlrpc_request *req,
struct obd_export *dt_exp, struct obd_export *md_exp,
struct lustre_md *md)
{
struct req_capsule *pill = &req->rq_pill;
int rc;
LASSERT(md);
memset(md, 0, sizeof(*md));
md->body = req_capsule_server_get(pill, &RMF_MDT_BODY);
LASSERT(md->body != NULL);
if (md->body->valid & OBD_MD_FLEASIZE) {
int lmmsize;
struct lov_mds_md *lmm;
if (!S_ISREG(md->body->mode)) {
CDEBUG(D_INFO,
"OBD_MD_FLEASIZE set, should be a regular file, but is not\n");
rc = -EPROTO;
goto out;
}
if (md->body->eadatasize == 0) {
CDEBUG(D_INFO,
"OBD_MD_FLEASIZE set, but eadatasize 0\n");
rc = -EPROTO;
goto out;
}
lmmsize = md->body->eadatasize;
lmm = req_capsule_server_sized_get(pill, &RMF_MDT_MD, lmmsize);
if (!lmm) {
rc = -EPROTO;
goto out;
}
rc = obd_unpackmd(dt_exp, &md->lsm, lmm, lmmsize);
if (rc < 0)
goto out;
if (rc < sizeof(*md->lsm)) {
CDEBUG(D_INFO,
"lsm size too small: rc < sizeof (*md->lsm) (%d < %d)\n",
rc, (int)sizeof(*md->lsm));
rc = -EPROTO;
goto out;
}
} else if (md->body->valid & OBD_MD_FLDIREA) {
int lmvsize;
struct lov_mds_md *lmv;
if (!S_ISDIR(md->body->mode)) {
CDEBUG(D_INFO,
"OBD_MD_FLDIREA set, should be a directory, but is not\n");
rc = -EPROTO;
goto out;
}
if (md->body->eadatasize == 0) {
CDEBUG(D_INFO,
"OBD_MD_FLDIREA is set, but eadatasize 0\n");
return -EPROTO;
}
if (md->body->valid & OBD_MD_MEA) {
lmvsize = md->body->eadatasize;
lmv = req_capsule_server_sized_get(pill, &RMF_MDT_MD,
lmvsize);
if (!lmv) {
rc = -EPROTO;
goto out;
}
rc = obd_unpackmd(md_exp, (void *)&md->mea, lmv,
lmvsize);
if (rc < 0)
goto out;
if (rc < sizeof(*md->mea)) {
CDEBUG(D_INFO,
"size too small: rc < sizeof(*md->mea) (%d < %d)\n",
rc, (int)sizeof(*md->mea));
rc = -EPROTO;
goto out;
}
}
}
rc = 0;
if (md->body->valid & OBD_MD_FLRMTPERM) {
/* remote permission */
LASSERT(client_is_remote(exp));
md->remote_perm = req_capsule_server_swab_get(pill, &RMF_ACL,
lustre_swab_mdt_remote_perm);
if (!md->remote_perm) {
rc = -EPROTO;
goto out;
}
} else if (md->body->valid & OBD_MD_FLACL) {
/* for ACL, it's possible that FLACL is set but aclsize is zero.
* only when aclsize != 0 there's an actual segment for ACL
* in reply buffer.
*/
if (md->body->aclsize) {
rc = mdc_unpack_acl(req, md);
if (rc)
goto out;
#ifdef CONFIG_FS_POSIX_ACL
} else {
md->posix_acl = NULL;
#endif
}
}
if (md->body->valid & OBD_MD_FLMDSCAPA) {
struct obd_capa *oc = NULL;
rc = mdc_unpack_capa(NULL, req, &RMF_CAPA1, &oc);
if (rc)
goto out;
md->mds_capa = oc;
}
if (md->body->valid & OBD_MD_FLOSSCAPA) {
struct obd_capa *oc = NULL;
rc = mdc_unpack_capa(NULL, req, &RMF_CAPA2, &oc);
if (rc)
goto out;
md->oss_capa = oc;
}
out:
if (rc) {
if (md->oss_capa) {
capa_put(md->oss_capa);
md->oss_capa = NULL;
}
if (md->mds_capa) {
capa_put(md->mds_capa);
md->mds_capa = NULL;
}
#ifdef CONFIG_FS_POSIX_ACL
posix_acl_release(md->posix_acl);
#endif
if (md->lsm)
obd_free_memmd(dt_exp, &md->lsm);
}
return rc;
}
int mdc_free_lustre_md(struct obd_export *exp, struct lustre_md *md)
{
return 0;
}
/**
* Handles both OPEN and SETATTR RPCs for OPEN-CLOSE and SETATTR-DONE_WRITING
* RPC chains.
*/
void mdc_replay_open(struct ptlrpc_request *req)
{
struct md_open_data *mod = req->rq_cb_data;
struct ptlrpc_request *close_req;
struct obd_client_handle *och;
struct lustre_handle old;
struct mdt_body *body;
if (mod == NULL) {
DEBUG_REQ(D_ERROR, req,
"Can't properly replay without open data.");
return;
}
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
LASSERT(body != NULL);
och = mod->mod_och;
if (och != NULL) {
struct lustre_handle *file_fh;
LASSERT(och->och_magic == OBD_CLIENT_HANDLE_MAGIC);
file_fh = &och->och_fh;
CDEBUG(D_HA, "updating handle from %#llx to %#llx\n",
file_fh->cookie, body->handle.cookie);
old = *file_fh;
*file_fh = body->handle;
}
close_req = mod->mod_close_req;
if (close_req != NULL) {
__u32 opc = lustre_msg_get_opc(close_req->rq_reqmsg);
struct mdt_ioepoch *epoch;
LASSERT(opc == MDS_CLOSE || opc == MDS_DONE_WRITING);
epoch = req_capsule_client_get(&close_req->rq_pill,
&RMF_MDT_EPOCH);
LASSERT(epoch);
if (och != NULL)
LASSERT(!memcmp(&old, &epoch->handle, sizeof(old)));
DEBUG_REQ(D_HA, close_req, "updating close body with new fh");
epoch->handle = body->handle;
}
}
void mdc_commit_open(struct ptlrpc_request *req)
{
struct md_open_data *mod = req->rq_cb_data;
if (mod == NULL)
return;
/**
* No need to touch md_open_data::mod_och, it holds a reference on
* \var mod and will zero references to each other, \var mod will be
* freed after that when md_open_data::mod_och will put the reference.
*/
/**
* Do not let open request to disappear as it still may be needed
* for close rpc to happen (it may happen on evict only, otherwise
* ptlrpc_request::rq_replay does not let mdc_commit_open() to be
* called), just mark this rpc as committed to distinguish these 2
* cases, see mdc_close() for details. The open request reference will
* be put along with freeing \var mod.
*/
ptlrpc_request_addref(req);
spin_lock(&req->rq_lock);
req->rq_committed = 1;
spin_unlock(&req->rq_lock);
req->rq_cb_data = NULL;
obd_mod_put(mod);
}
int mdc_set_open_replay_data(struct obd_export *exp,
struct obd_client_handle *och,
struct lookup_intent *it)
{
struct md_open_data *mod;
struct mdt_rec_create *rec;
struct mdt_body *body;
struct ptlrpc_request *open_req = it->d.lustre.it_data;
struct obd_import *imp = open_req->rq_import;
if (!open_req->rq_replay)
return 0;
rec = req_capsule_client_get(&open_req->rq_pill, &RMF_REC_REINT);
body = req_capsule_server_get(&open_req->rq_pill, &RMF_MDT_BODY);
LASSERT(rec != NULL);
/* Incoming message in my byte order (it's been swabbed). */
/* Outgoing messages always in my byte order. */
LASSERT(body != NULL);
/* Only if the import is replayable, we set replay_open data */
if (och && imp->imp_replayable) {
mod = obd_mod_alloc();
if (mod == NULL) {
DEBUG_REQ(D_ERROR, open_req,
"Can't allocate md_open_data");
return 0;
}
/**
* Take a reference on \var mod, to be freed on mdc_close().
* It protects \var mod from being freed on eviction (commit
* callback is called despite rq_replay flag).
* Another reference for \var och.
*/
obd_mod_get(mod);
obd_mod_get(mod);
spin_lock(&open_req->rq_lock);
och->och_mod = mod;
mod->mod_och = och;
mod->mod_is_create = it_disposition(it, DISP_OPEN_CREATE) ||
it_disposition(it, DISP_OPEN_STRIPE);
mod->mod_open_req = open_req;
open_req->rq_cb_data = mod;
open_req->rq_commit_cb = mdc_commit_open;
spin_unlock(&open_req->rq_lock);
}
rec->cr_fid2 = body->fid1;
rec->cr_ioepoch = body->ioepoch;
rec->cr_old_handle.cookie = body->handle.cookie;
open_req->rq_replay_cb = mdc_replay_open;
if (!fid_is_sane(&body->fid1)) {
DEBUG_REQ(D_ERROR, open_req,
"Saving replay request with insane fid");
LBUG();
}
DEBUG_REQ(D_RPCTRACE, open_req, "Set up open replay data");
return 0;
}
static void mdc_free_open(struct md_open_data *mod)
{
int committed = 0;
if (mod->mod_is_create == 0 &&
imp_connect_disp_stripe(mod->mod_open_req->rq_import))
committed = 1;
LASSERT(mod->mod_open_req->rq_replay == 0);
DEBUG_REQ(D_RPCTRACE, mod->mod_open_req, "free open request\n");
ptlrpc_request_committed(mod->mod_open_req, committed);
if (mod->mod_close_req)
ptlrpc_request_committed(mod->mod_close_req, committed);
}
int mdc_clear_open_replay_data(struct obd_export *exp,
struct obd_client_handle *och)
{
struct md_open_data *mod = och->och_mod;
/**
* It is possible to not have \var mod in a case of eviction between
* lookup and ll_file_open().
**/
if (mod == NULL)
return 0;
LASSERT(mod != LP_POISON);
LASSERT(mod->mod_open_req != NULL);
mdc_free_open(mod);
mod->mod_och = NULL;
och->och_mod = NULL;
obd_mod_put(mod);
return 0;
}
/* Prepares the request for the replay by the given reply */
static void mdc_close_handle_reply(struct ptlrpc_request *req,
struct md_op_data *op_data, int rc) {
struct mdt_body *repbody;
struct mdt_ioepoch *epoch;
if (req && rc == -EAGAIN) {
repbody = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH);
epoch->flags |= MF_SOM_AU;
if (repbody->valid & OBD_MD_FLGETATTRLOCK)
op_data->op_flags |= MF_GETATTR_LOCK;
}
}
static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
struct md_open_data *mod, struct ptlrpc_request **request)
{
struct obd_device *obd = class_exp2obd(exp);
struct ptlrpc_request *req;
struct req_format *req_fmt;
int rc;
int saved_rc = 0;
req_fmt = &RQF_MDS_CLOSE;
if (op_data->op_bias & MDS_HSM_RELEASE) {
req_fmt = &RQF_MDS_RELEASE_CLOSE;
/* allocate a FID for volatile file */
rc = mdc_fid_alloc(exp, &op_data->op_fid2, op_data);
if (rc < 0) {
CERROR("%s: "DFID" failed to allocate FID: %d\n",
obd->obd_name, PFID(&op_data->op_fid1), rc);
/* save the errcode and proceed to close */
saved_rc = rc;
}
}
*request = NULL;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), req_fmt);
if (req == NULL)
return -ENOMEM;
mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_CLOSE);
if (rc) {
ptlrpc_request_free(req);
return rc;
}
/* To avoid a livelock (bug 7034), we need to send CLOSE RPCs to a
* portal whose threads are not taking any DLM locks and are therefore
* always progressing */
req->rq_request_portal = MDS_READPAGE_PORTAL;
ptlrpc_at_set_req_timeout(req);
/* Ensure that this close's handle is fixed up during replay. */
if (likely(mod != NULL)) {
LASSERTF(mod->mod_open_req != NULL &&
mod->mod_open_req->rq_type != LI_POISON,
"POISONED open %p!\n", mod->mod_open_req);
mod->mod_close_req = req;
DEBUG_REQ(D_HA, mod->mod_open_req, "matched open");
/* We no longer want to preserve this open for replay even
* though the open was committed. b=3632, b=3633 */
spin_lock(&mod->mod_open_req->rq_lock);
mod->mod_open_req->rq_replay = 0;
spin_unlock(&mod->mod_open_req->rq_lock);
} else {
CDEBUG(D_HA,
"couldn't find open req; expecting close error\n");
}
mdc_close_pack(req, op_data);
req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
obd->u.cli.cl_default_mds_easize);
req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_SERVER,
obd->u.cli.cl_default_mds_cookiesize);
ptlrpc_request_set_replen(req);
mdc_get_rpc_lock(obd->u.cli.cl_close_lock, NULL);
rc = ptlrpc_queue_wait(req);
mdc_put_rpc_lock(obd->u.cli.cl_close_lock, NULL);
if (req->rq_repmsg == NULL) {
CDEBUG(D_RPCTRACE, "request failed to send: %p, %d\n", req,
req->rq_status);
if (rc == 0)
rc = req->rq_status ?: -EIO;
} else if (rc == 0 || rc == -EAGAIN) {
struct mdt_body *body;
rc = lustre_msg_get_status(req->rq_repmsg);
if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR) {
DEBUG_REQ(D_ERROR, req,
"type == PTL_RPC_MSG_ERR, err = %d", rc);
if (rc > 0)
rc = -rc;
}
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
if (body == NULL)
rc = -EPROTO;
} else if (rc == -ESTALE) {
/**
* it can be allowed error after 3633 if open was committed and
* server failed before close was sent. Let's check if mod
* exists and return no error in that case
*/
if (mod) {
DEBUG_REQ(D_HA, req, "Reset ESTALE = %d", rc);
LASSERT(mod->mod_open_req != NULL);
if (mod->mod_open_req->rq_committed)
rc = 0;
}
}
if (mod) {
if (rc != 0)
mod->mod_close_req = NULL;
/* Since now, mod is accessed through open_req only,
* thus close req does not keep a reference on mod anymore. */
obd_mod_put(mod);
}
*request = req;
mdc_close_handle_reply(req, op_data, rc);
return rc < 0 ? rc : saved_rc;
}
static int mdc_done_writing(struct obd_export *exp, struct md_op_data *op_data,
struct md_open_data *mod)
{
struct obd_device *obd = class_exp2obd(exp);
struct ptlrpc_request *req;
int rc;
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_MDS_DONE_WRITING);
if (req == NULL)
return -ENOMEM;
mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_DONE_WRITING);
if (rc) {
ptlrpc_request_free(req);
return rc;
}
if (mod != NULL) {
LASSERTF(mod->mod_open_req != NULL &&
mod->mod_open_req->rq_type != LI_POISON,
"POISONED setattr %p!\n", mod->mod_open_req);
mod->mod_close_req = req;
DEBUG_REQ(D_HA, mod->mod_open_req, "matched setattr");
/* We no longer want to preserve this setattr for replay even
* though the open was committed. b=3632, b=3633 */
spin_lock(&mod->mod_open_req->rq_lock);
mod->mod_open_req->rq_replay = 0;
spin_unlock(&mod->mod_open_req->rq_lock);
}
mdc_close_pack(req, op_data);
ptlrpc_request_set_replen(req);
mdc_get_rpc_lock(obd->u.cli.cl_close_lock, NULL);
rc = ptlrpc_queue_wait(req);
mdc_put_rpc_lock(obd->u.cli.cl_close_lock, NULL);
if (rc == -ESTALE) {
/**
* it can be allowed error after 3633 if open or setattr were
* committed and server failed before close was sent.
* Let's check if mod exists and return no error in that case
*/
if (mod) {
LASSERT(mod->mod_open_req != NULL);
if (mod->mod_open_req->rq_committed)
rc = 0;
}
}
if (mod) {
if (rc != 0)
mod->mod_close_req = NULL;
LASSERT(mod->mod_open_req != NULL);
mdc_free_open(mod);
/* Since now, mod is accessed through setattr req only,
* thus DW req does not keep a reference on mod anymore. */
obd_mod_put(mod);
}
mdc_close_handle_reply(req, op_data, rc);
ptlrpc_req_finished(req);
return rc;
}
static int mdc_readpage(struct obd_export *exp, struct md_op_data *op_data,
struct page **pages, struct ptlrpc_request **request)
{
struct ptlrpc_request *req;
struct ptlrpc_bulk_desc *desc;
int i;
wait_queue_head_t waitq;
int resends = 0;
struct l_wait_info lwi;
int rc;
*request = NULL;
init_waitqueue_head(&waitq);
restart_bulk:
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_READPAGE);
if (req == NULL)
return -ENOMEM;
mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_READPAGE);
if (rc) {
ptlrpc_request_free(req);
return rc;
}
req->rq_request_portal = MDS_READPAGE_PORTAL;
ptlrpc_at_set_req_timeout(req);
desc = ptlrpc_prep_bulk_imp(req, op_data->op_npages, 1, BULK_PUT_SINK,
MDS_BULK_PORTAL);
if (desc == NULL) {
ptlrpc_request_free(req);
return -ENOMEM;
}
/* NB req now owns desc and will free it when it gets freed */
for (i = 0; i < op_data->op_npages; i++)
ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE);
mdc_readdir_pack(req, op_data->op_offset,
PAGE_CACHE_SIZE * op_data->op_npages,
&op_data->op_fid1, op_data->op_capa1);
ptlrpc_request_set_replen(req);
rc = ptlrpc_queue_wait(req);
if (rc) {
ptlrpc_req_finished(req);
if (rc != -ETIMEDOUT)
return rc;
resends++;
if (!client_should_resend(resends, &exp->exp_obd->u.cli)) {
CERROR("too many resend retries, returning error\n");
return -EIO;
}
lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(resends),
NULL, NULL, NULL);
l_wait_event(waitq, 0, &lwi);
goto restart_bulk;
}
rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk,
req->rq_bulk->bd_nob_transferred);
if (rc < 0) {
ptlrpc_req_finished(req);
return rc;
}
if (req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK) {
CERROR("Unexpected # bytes transferred: %d (%ld expected)\n",
req->rq_bulk->bd_nob_transferred,
PAGE_CACHE_SIZE * op_data->op_npages);
ptlrpc_req_finished(req);
return -EPROTO;
}
*request = req;
return 0;
}
static int mdc_statfs(const struct lu_env *env,
struct obd_export *exp, struct obd_statfs *osfs,
__u64 max_age, __u32 flags)
{
struct obd_device *obd = class_exp2obd(exp);
struct ptlrpc_request *req;
struct obd_statfs *msfs;
struct obd_import *imp = NULL;
int rc;
/*
* Since the request might also come from lprocfs, so we need
* sync this with client_disconnect_export Bug15684
*/
down_read(&obd->u.cli.cl_sem);
if (obd->u.cli.cl_import)
imp = class_import_get(obd->u.cli.cl_import);
up_read(&obd->u.cli.cl_sem);
if (!imp)
return -ENODEV;
req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_STATFS,
LUSTRE_MDS_VERSION, MDS_STATFS);
if (req == NULL) {
rc = -ENOMEM;
goto output;
}
ptlrpc_request_set_replen(req);
if (flags & OBD_STATFS_NODELAY) {
/* procfs requests not want stay in wait for avoid deadlock */
req->rq_no_resend = 1;
req->rq_no_delay = 1;
}
rc = ptlrpc_queue_wait(req);
if (rc) {
/* check connection error first */
if (imp->imp_connect_error)
rc = imp->imp_connect_error;
goto out;
}
msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
if (msfs == NULL) {
rc = -EPROTO;
goto out;
}
*osfs = *msfs;
out:
ptlrpc_req_finished(req);
output:
class_import_put(imp);
return rc;
}
static int mdc_ioc_fid2path(struct obd_export *exp, struct getinfo_fid2path *gf)
{
__u32 keylen, vallen;
void *key;
int rc;
if (gf->gf_pathlen > PATH_MAX)
return -ENAMETOOLONG;
if (gf->gf_pathlen < 2)
return -EOVERFLOW;
/* Key is KEY_FID2PATH + getinfo_fid2path description */
keylen = cfs_size_round(sizeof(KEY_FID2PATH)) + sizeof(*gf);
OBD_ALLOC(key, keylen);
if (key == NULL)
return -ENOMEM;
memcpy(key, KEY_FID2PATH, sizeof(KEY_FID2PATH));
memcpy(key + cfs_size_round(sizeof(KEY_FID2PATH)), gf, sizeof(*gf));
CDEBUG(D_IOCTL, "path get "DFID" from %llu #%d\n",
PFID(&gf->gf_fid), gf->gf_recno, gf->gf_linkno);
if (!fid_is_sane(&gf->gf_fid)) {
rc = -EINVAL;
goto out;
}
/* Val is struct getinfo_fid2path result plus path */
vallen = sizeof(*gf) + gf->gf_pathlen;
rc = obd_get_info(NULL, exp, keylen, key, &vallen, gf, NULL);
if (rc != 0 && rc != -EREMOTE)
goto out;
if (vallen <= sizeof(*gf)) {
rc = -EPROTO;
goto out;
} else if (vallen > sizeof(*gf) + gf->gf_pathlen) {
rc = -EOVERFLOW;
goto out;
}
CDEBUG(D_IOCTL, "path get "DFID" from %llu #%d\n%s\n",
PFID(&gf->gf_fid), gf->gf_recno, gf->gf_linkno, gf->gf_path);
out:
OBD_FREE(key, keylen);
return rc;
}
static int mdc_ioc_hsm_progress(struct obd_export *exp,
struct hsm_progress_kernel *hpk)
{
struct obd_import *imp = class_exp2cliimp(exp);
struct hsm_progress_kernel *req_hpk;
struct ptlrpc_request *req;
int rc;
req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_PROGRESS,
LUSTRE_MDS_VERSION, MDS_HSM_PROGRESS);
if (req == NULL) {
rc = -ENOMEM;
goto out;
}
mdc_pack_body(req, NULL, NULL, OBD_MD_FLRMTPERM, 0, 0, 0);
/* Copy hsm_progress struct */
req_hpk = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_PROGRESS);
if (req_hpk == NULL) {
rc = -EPROTO;
goto out;
}
*req_hpk = *hpk;
req_hpk->hpk_errval = lustre_errno_hton(hpk->hpk_errval);
ptlrpc_request_set_replen(req);
rc = mdc_queue_wait(req);
goto out;
out:
ptlrpc_req_finished(req);
return rc;
}
static int mdc_ioc_hsm_ct_register(struct obd_import *imp, __u32 archives)
{
__u32 *archive_mask;
struct ptlrpc_request *req;
int rc;
req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_CT_REGISTER,
LUSTRE_MDS_VERSION,
MDS_HSM_CT_REGISTER);
if (req == NULL) {
rc = -ENOMEM;
goto out;
}
mdc_pack_body(req, NULL, NULL, OBD_MD_FLRMTPERM, 0, 0, 0);
/* Copy hsm_progress struct */
archive_mask = req_capsule_client_get(&req->rq_pill,
&RMF_MDS_HSM_ARCHIVE);
if (archive_mask == NULL) {
rc = -EPROTO;
goto out;
}
*archive_mask = archives;
ptlrpc_request_set_replen(req);
rc = mdc_queue_wait(req);
goto out;
out:
ptlrpc_req_finished(req);
return rc;
}
static int mdc_ioc_hsm_current_action(struct obd_export *exp,
struct md_op_data *op_data)
{
struct hsm_current_action *hca = op_data->op_data;
struct hsm_current_action *req_hca;
struct ptlrpc_request *req;
int rc;
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_MDS_HSM_ACTION);
if (req == NULL)
return -ENOMEM;
mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_ACTION);
if (rc) {
ptlrpc_request_free(req);
return rc;
}
mdc_pack_body(req, &op_data->op_fid1, op_data->op_capa1,
OBD_MD_FLRMTPERM, 0, op_data->op_suppgids[0], 0);
ptlrpc_request_set_replen(req);
rc = mdc_queue_wait(req);
if (rc)
goto out;
req_hca = req_capsule_server_get(&req->rq_pill,
&RMF_MDS_HSM_CURRENT_ACTION);
if (req_hca == NULL) {
rc = -EPROTO;
goto out;
}
*hca = *req_hca;
out:
ptlrpc_req_finished(req);
return rc;
}
static int mdc_ioc_hsm_ct_unregister(struct obd_import *imp)
{
struct ptlrpc_request *req;
int rc;
req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_CT_UNREGISTER,
LUSTRE_MDS_VERSION,
MDS_HSM_CT_UNREGISTER);
if (req == NULL) {
rc = -ENOMEM;
goto out;
}
mdc_pack_body(req, NULL, NULL, OBD_MD_FLRMTPERM, 0, 0, 0);
ptlrpc_request_set_replen(req);
rc = mdc_queue_wait(req);
goto out;
out:
ptlrpc_req_finished(req);
return rc;
}
static int mdc_ioc_hsm_state_get(struct obd_export *exp,
struct md_op_data *op_data)
{
struct hsm_user_state *hus = op_data->op_data;
struct hsm_user_state *req_hus;
struct ptlrpc_request *req;
int rc;
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_MDS_HSM_STATE_GET);
if (req == NULL)
return -ENOMEM;
mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_STATE_GET);
if (rc != 0) {
ptlrpc_request_free(req);
return rc;
}
mdc_pack_body(req, &op_data->op_fid1, op_data->op_capa1,
OBD_MD_FLRMTPERM, 0, op_data->op_suppgids[0], 0);
ptlrpc_request_set_replen(req);
rc = mdc_queue_wait(req);
if (rc)
goto out;
req_hus = req_capsule_server_get(&req->rq_pill, &RMF_HSM_USER_STATE);
if (req_hus == NULL) {
rc = -EPROTO;
goto out;
}
*hus = *req_hus;
out:
ptlrpc_req_finished(req);
return rc;
}
static int mdc_ioc_hsm_state_set(struct obd_export *exp,
struct md_op_data *op_data)
{
struct hsm_state_set *hss = op_data->op_data;
struct hsm_state_set *req_hss;
struct ptlrpc_request *req;
int rc;
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_MDS_HSM_STATE_SET);
if (req == NULL)
return -ENOMEM;
mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_STATE_SET);
if (rc) {
ptlrpc_request_free(req);
return rc;
}
mdc_pack_body(req, &op_data->op_fid1, op_data->op_capa1,
OBD_MD_FLRMTPERM, 0, op_data->op_suppgids[0], 0);
/* Copy states */
req_hss = req_capsule_client_get(&req->rq_pill, &RMF_HSM_STATE_SET);
if (req_hss == NULL) {
rc = -EPROTO;
goto out;
}
*req_hss = *hss;
ptlrpc_request_set_replen(req);
rc = mdc_queue_wait(req);
goto out;
out:
ptlrpc_req_finished(req);
return rc;
}
static int mdc_ioc_hsm_request(struct obd_export *exp,
struct hsm_user_request *hur)
{
struct obd_import *imp = class_exp2cliimp(exp);
struct ptlrpc_request *req;
struct hsm_request *req_hr;
struct hsm_user_item *req_hui;
char *req_opaque;
int rc;
req = ptlrpc_request_alloc(imp, &RQF_MDS_HSM_REQUEST);
if (req == NULL) {
rc = -ENOMEM;
goto out;
}
req_capsule_set_size(&req->rq_pill, &RMF_MDS_HSM_USER_ITEM, RCL_CLIENT,
hur->hur_request.hr_itemcount
* sizeof(struct hsm_user_item));
req_capsule_set_size(&req->rq_pill, &RMF_GENERIC_DATA, RCL_CLIENT,
hur->hur_request.hr_data_len);
rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_REQUEST);
if (rc) {
ptlrpc_request_free(req);
return rc;
}
mdc_pack_body(req, NULL, NULL, OBD_MD_FLRMTPERM, 0, 0, 0);
/* Copy hsm_request struct */
req_hr = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_REQUEST);
if (req_hr == NULL) {
rc = -EPROTO;
goto out;
}
*req_hr = hur->hur_request;
/* Copy hsm_user_item structs */
req_hui = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_USER_ITEM);
if (req_hui == NULL) {
rc = -EPROTO;
goto out;
}
memcpy(req_hui, hur->hur_user_item,
hur->hur_request.hr_itemcount * sizeof(struct hsm_user_item));
/* Copy opaque field */
req_opaque = req_capsule_client_get(&req->rq_pill, &RMF_GENERIC_DATA);
if (req_opaque == NULL) {
rc = -EPROTO;
goto out;
}
memcpy(req_opaque, hur_data(hur), hur->hur_request.hr_data_len);
ptlrpc_request_set_replen(req);
rc = mdc_queue_wait(req);
goto out;
out:
ptlrpc_req_finished(req);
return rc;
}
static struct kuc_hdr *changelog_kuc_hdr(char *buf, int len, int flags)
{
struct kuc_hdr *lh = (struct kuc_hdr *)buf;
LASSERT(len <= KUC_CHANGELOG_MSG_MAXSIZE);
lh->kuc_magic = KUC_MAGIC;
lh->kuc_transport = KUC_TRANSPORT_CHANGELOG;
lh->kuc_flags = flags;
lh->kuc_msgtype = CL_RECORD;
lh->kuc_msglen = len;
return lh;
}
#define D_CHANGELOG 0
struct changelog_show {
__u64 cs_startrec;
__u32 cs_flags;
struct file *cs_fp;
char *cs_buf;
struct obd_device *cs_obd;
};
static int changelog_kkuc_cb(const struct lu_env *env, struct llog_handle *llh,
struct llog_rec_hdr *hdr, void *data)
{
struct changelog_show *cs = data;
struct llog_changelog_rec *rec = (struct llog_changelog_rec *)hdr;
struct kuc_hdr *lh;
int len, rc;
if (rec->cr_hdr.lrh_type != CHANGELOG_REC) {
rc = -EINVAL;
CERROR("%s: not a changelog rec %x/%d: rc = %d\n",
cs->cs_obd->obd_name, rec->cr_hdr.lrh_type,
rec->cr.cr_type, rc);
return rc;
}
if (rec->cr.cr_index < cs->cs_startrec) {
/* Skip entries earlier than what we are interested in */
CDEBUG(D_CHANGELOG, "rec=%llu start=%llu\n",
rec->cr.cr_index, cs->cs_startrec);
return 0;
}
CDEBUG(D_CHANGELOG, "%llu %02d%-5s %llu 0x%x t="DFID" p="DFID
" %.*s\n", rec->cr.cr_index, rec->cr.cr_type,
changelog_type2str(rec->cr.cr_type), rec->cr.cr_time,
rec->cr.cr_flags & CLF_FLAGMASK,
PFID(&rec->cr.cr_tfid), PFID(&rec->cr.cr_pfid),
rec->cr.cr_namelen, changelog_rec_name(&rec->cr));
len = sizeof(*lh) + changelog_rec_size(&rec->cr) + rec->cr.cr_namelen;
/* Set up the message */
lh = changelog_kuc_hdr(cs->cs_buf, len, cs->cs_flags);
memcpy(lh + 1, &rec->cr, len - sizeof(*lh));
rc = libcfs_kkuc_msg_put(cs->cs_fp, lh);
CDEBUG(D_CHANGELOG, "kucmsg fp %p len %d rc %d\n", cs->cs_fp, len, rc);
return rc;
}
static int mdc_changelog_send_thread(void *csdata)
{
struct changelog_show *cs = csdata;
struct llog_ctxt *ctxt = NULL;
struct llog_handle *llh = NULL;
struct kuc_hdr *kuch;
int rc;
CDEBUG(D_CHANGELOG, "changelog to fp=%p start %llu\n",
cs->cs_fp, cs->cs_startrec);
OBD_ALLOC(cs->cs_buf, KUC_CHANGELOG_MSG_MAXSIZE);
if (cs->cs_buf == NULL) {
rc = -ENOMEM;
goto out;
}
/* Set up the remote catalog handle */
ctxt = llog_get_context(cs->cs_obd, LLOG_CHANGELOG_REPL_CTXT);
if (ctxt == NULL) {
rc = -ENOENT;
goto out;
}
rc = llog_open(NULL, ctxt, &llh, NULL, CHANGELOG_CATALOG,
LLOG_OPEN_EXISTS);
if (rc) {
CERROR("%s: fail to open changelog catalog: rc = %d\n",
cs->cs_obd->obd_name, rc);
goto out;
}
rc = llog_init_handle(NULL, llh, LLOG_F_IS_CAT, NULL);
if (rc) {
CERROR("llog_init_handle failed %d\n", rc);
goto out;
}
rc = llog_cat_process(NULL, llh, changelog_kkuc_cb, cs, 0, 0);
/* Send EOF no matter what our result */
kuch = changelog_kuc_hdr(cs->cs_buf, sizeof(*kuch), cs->cs_flags);
if (kuch) {
kuch->kuc_msgtype = CL_EOF;
libcfs_kkuc_msg_put(cs->cs_fp, kuch);
}
out:
fput(cs->cs_fp);
if (llh)
llog_cat_close(NULL, llh);
if (ctxt)
llog_ctxt_put(ctxt);
if (cs->cs_buf)
OBD_FREE(cs->cs_buf, KUC_CHANGELOG_MSG_MAXSIZE);
OBD_FREE_PTR(cs);
return rc;
}
static int mdc_ioc_changelog_send(struct obd_device *obd,
struct ioc_changelog *icc)
{
struct changelog_show *cs;
int rc;
/* Freed in mdc_changelog_send_thread */
OBD_ALLOC_PTR(cs);
if (!cs)
return -ENOMEM;
cs->cs_obd = obd;
cs->cs_startrec = icc->icc_recno;
/* matching fput in mdc_changelog_send_thread */
cs->cs_fp = fget(icc->icc_id);
cs->cs_flags = icc->icc_flags;
/*
* New thread because we should return to user app before
* writing into our pipe
*/
rc = PTR_ERR(kthread_run(mdc_changelog_send_thread, cs,
"mdc_clg_send_thread"));
if (!IS_ERR_VALUE(rc)) {
CDEBUG(D_CHANGELOG, "start changelog thread\n");
return 0;
}
CERROR("Failed to start changelog thread: %d\n", rc);
OBD_FREE_PTR(cs);
return rc;
}
static int mdc_ioc_hsm_ct_start(struct obd_export *exp,
struct lustre_kernelcomm *lk);
static int mdc_quotacheck(struct obd_device *unused, struct obd_export *exp,
struct obd_quotactl *oqctl)
{
struct client_obd *cli = &exp->exp_obd->u.cli;
struct ptlrpc_request *req;
struct obd_quotactl *body;
int rc;
req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
&RQF_MDS_QUOTACHECK, LUSTRE_MDS_VERSION,
MDS_QUOTACHECK);
if (req == NULL)
return -ENOMEM;
body = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
*body = *oqctl;
ptlrpc_request_set_replen(req);
/* the next poll will find -ENODATA, that means quotacheck is
* going on */
cli->cl_qchk_stat = -ENODATA;
rc = ptlrpc_queue_wait(req);
if (rc)
cli->cl_qchk_stat = rc;
ptlrpc_req_finished(req);
return rc;
}
static int mdc_quota_poll_check(struct obd_export *exp,
struct if_quotacheck *qchk)
{
struct client_obd *cli = &exp->exp_obd->u.cli;
int rc;
qchk->obd_uuid = cli->cl_target_uuid;
memcpy(qchk->obd_type, LUSTRE_MDS_NAME, strlen(LUSTRE_MDS_NAME));
rc = cli->cl_qchk_stat;
/* the client is not the previous one */
if (rc == CL_NOT_QUOTACHECKED)
rc = -EINTR;
return rc;
}
static int mdc_quotactl(struct obd_device *unused, struct obd_export *exp,
struct obd_quotactl *oqctl)
{
struct ptlrpc_request *req;
struct obd_quotactl *oqc;
int rc;
req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
&RQF_MDS_QUOTACTL, LUSTRE_MDS_VERSION,
MDS_QUOTACTL);
if (req == NULL)
return -ENOMEM;
oqc = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
*oqc = *oqctl;
ptlrpc_request_set_replen(req);
ptlrpc_at_set_req_timeout(req);
req->rq_no_resend = 1;
rc = ptlrpc_queue_wait(req);
if (rc)
CERROR("ptlrpc_queue_wait failed, rc: %d\n", rc);
if (req->rq_repmsg) {
oqc = req_capsule_server_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
if (oqc) {
*oqctl = *oqc;
} else if (!rc) {
CERROR("Can't unpack obd_quotactl\n");
rc = -EPROTO;
}
} else if (!rc) {
CERROR("Can't unpack obd_quotactl\n");
rc = -EPROTO;
}
ptlrpc_req_finished(req);
return rc;
}
static int mdc_ioc_swap_layouts(struct obd_export *exp,
struct md_op_data *op_data)
{
LIST_HEAD(cancels);
struct ptlrpc_request *req;
int rc, count;
struct mdc_swap_layouts *msl, *payload;
msl = op_data->op_data;
/* When the MDT will get the MDS_SWAP_LAYOUTS RPC the
* first thing it will do is to cancel the 2 layout
* locks hold by this client.
* So the client must cancel its layout locks on the 2 fids
* with the request RPC to avoid extra RPC round trips
*/
count = mdc_resource_get_unused(exp, &op_data->op_fid1, &cancels,
LCK_CR, MDS_INODELOCK_LAYOUT);
count += mdc_resource_get_unused(exp, &op_data->op_fid2, &cancels,
LCK_CR, MDS_INODELOCK_LAYOUT);
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_MDS_SWAP_LAYOUTS);
if (req == NULL) {
ldlm_lock_list_put(&cancels, l_bl_ast, count);
return -ENOMEM;
}
mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
mdc_set_capa_size(req, &RMF_CAPA2, op_data->op_capa2);
rc = mdc_prep_elc_req(exp, req, MDS_SWAP_LAYOUTS, &cancels, count);
if (rc) {
ptlrpc_request_free(req);
return rc;
}
mdc_swap_layouts_pack(req, op_data);
payload = req_capsule_client_get(&req->rq_pill, &RMF_SWAP_LAYOUTS);
LASSERT(payload);
*payload = *msl;
ptlrpc_request_set_replen(req);
rc = ptlrpc_queue_wait(req);
if (rc)
goto out;
out:
ptlrpc_req_finished(req);
return rc;
}
static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
void *karg, void *uarg)
{
struct obd_device *obd = exp->exp_obd;
struct obd_ioctl_data *data = karg;
struct obd_import *imp = obd->u.cli.cl_import;
int rc;
if (!try_module_get(THIS_MODULE)) {
CERROR("Can't get module. Is it alive?");
return -EINVAL;
}
switch (cmd) {
case OBD_IOC_CHANGELOG_SEND:
rc = mdc_ioc_changelog_send(obd, karg);
goto out;
case OBD_IOC_CHANGELOG_CLEAR: {
struct ioc_changelog *icc = karg;
struct changelog_setinfo cs = {
.cs_recno = icc->icc_recno,
.cs_id = icc->icc_id
};
rc = obd_set_info_async(NULL, exp, strlen(KEY_CHANGELOG_CLEAR),
KEY_CHANGELOG_CLEAR, sizeof(cs), &cs,
NULL);
goto out;
}
case OBD_IOC_FID2PATH:
rc = mdc_ioc_fid2path(exp, karg);
goto out;
case LL_IOC_HSM_CT_START:
rc = mdc_ioc_hsm_ct_start(exp, karg);
/* ignore if it was already registered on this MDS. */
if (rc == -EEXIST)
rc = 0;
goto out;
case LL_IOC_HSM_PROGRESS:
rc = mdc_ioc_hsm_progress(exp, karg);
goto out;
case LL_IOC_HSM_STATE_GET:
rc = mdc_ioc_hsm_state_get(exp, karg);
goto out;
case LL_IOC_HSM_STATE_SET:
rc = mdc_ioc_hsm_state_set(exp, karg);
goto out;
case LL_IOC_HSM_ACTION:
rc = mdc_ioc_hsm_current_action(exp, karg);
goto out;
case LL_IOC_HSM_REQUEST:
rc = mdc_ioc_hsm_request(exp, karg);
goto out;
case OBD_IOC_CLIENT_RECOVER:
rc = ptlrpc_recover_import(imp, data->ioc_inlbuf1, 0);
if (rc < 0)
goto out;
rc = 0;
goto out;
case IOC_OSC_SET_ACTIVE:
rc = ptlrpc_set_import_active(imp, data->ioc_offset);
goto out;
case OBD_IOC_POLL_QUOTACHECK:
rc = mdc_quota_poll_check(exp, (struct if_quotacheck *)karg);
goto out;
case OBD_IOC_PING_TARGET:
rc = ptlrpc_obd_ping(obd);
goto out;
/*
* Normally IOC_OBD_STATFS, OBD_IOC_QUOTACTL iocontrol are handled by
* LMV instead of MDC. But when the cluster is upgraded from 1.8,
* there'd be no LMV layer thus we might be called here. Eventually
* this code should be removed.
* bz20731, LU-592.
*/
case IOC_OBD_STATFS: {
struct obd_statfs stat_buf = {0};
if (*((__u32 *) data->ioc_inlbuf2) != 0) {
rc = -ENODEV;
goto out;
}
/* copy UUID */
if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(obd),
min_t(size_t, data->ioc_plen2,
sizeof(struct obd_uuid)))) {
rc = -EFAULT;
goto out;
}
rc = mdc_statfs(NULL, obd->obd_self_export, &stat_buf,
cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
0);
if (rc != 0)
goto out;
if (copy_to_user(data->ioc_pbuf1, &stat_buf,
min_t(size_t, data->ioc_plen1,
sizeof(stat_buf)))) {
rc = -EFAULT;
goto out;
}
rc = 0;
goto out;
}
case OBD_IOC_QUOTACTL: {
struct if_quotactl *qctl = karg;
struct obd_quotactl *oqctl;
OBD_ALLOC_PTR(oqctl);
if (oqctl == NULL) {
rc = -ENOMEM;
goto out;
}
QCTL_COPY(oqctl, qctl);
rc = obd_quotactl(exp, oqctl);
if (rc == 0) {
QCTL_COPY(qctl, oqctl);
qctl->qc_valid = QC_MDTIDX;
qctl->obd_uuid = obd->u.cli.cl_target_uuid;
}
OBD_FREE_PTR(oqctl);
goto out;
}
case LL_IOC_GET_CONNECT_FLAGS:
if (copy_to_user(uarg, exp_connect_flags_ptr(exp),
sizeof(*exp_connect_flags_ptr(exp)))) {
rc = -EFAULT;
goto out;
}
rc = 0;
goto out;
case LL_IOC_LOV_SWAP_LAYOUTS:
rc = mdc_ioc_swap_layouts(exp, karg);
goto out;
default:
CERROR("unrecognised ioctl: cmd = %#x\n", cmd);
rc = -ENOTTY;
goto out;
}
out:
module_put(THIS_MODULE);
return rc;
}
static int mdc_get_info_rpc(struct obd_export *exp,
u32 keylen, void *key,
int vallen, void *val)
{
struct obd_import *imp = class_exp2cliimp(exp);
struct ptlrpc_request *req;
char *tmp;
int rc = -EINVAL;
req = ptlrpc_request_alloc(imp, &RQF_MDS_GET_INFO);
if (req == NULL)
return -ENOMEM;
req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_KEY,
RCL_CLIENT, keylen);
req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_VALLEN,
RCL_CLIENT, sizeof(__u32));
rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GET_INFO);
if (rc) {
ptlrpc_request_free(req);
return rc;
}
tmp = req_capsule_client_get(&req->rq_pill, &RMF_GETINFO_KEY);
memcpy(tmp, key, keylen);
tmp = req_capsule_client_get(&req->rq_pill, &RMF_GETINFO_VALLEN);
memcpy(tmp, &vallen, sizeof(__u32));
req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_VAL,
RCL_SERVER, vallen);
ptlrpc_request_set_replen(req);
rc = ptlrpc_queue_wait(req);
/* -EREMOTE means the get_info result is partial, and it needs to
* continue on another MDT, see fid2path part in lmv_iocontrol */
if (rc == 0 || rc == -EREMOTE) {
tmp = req_capsule_server_get(&req->rq_pill, &RMF_GETINFO_VAL);
memcpy(val, tmp, vallen);
if (ptlrpc_rep_need_swab(req)) {
if (KEY_IS(KEY_FID2PATH))
lustre_swab_fid2path(val);
}
}
ptlrpc_req_finished(req);
return rc;
}
static void lustre_swab_hai(struct hsm_action_item *h)
{
__swab32s(&h->hai_len);
__swab32s(&h->hai_action);
lustre_swab_lu_fid(&h->hai_fid);
lustre_swab_lu_fid(&h->hai_dfid);
__swab64s(&h->hai_cookie);
__swab64s(&h->hai_extent.offset);
__swab64s(&h->hai_extent.length);
__swab64s(&h->hai_gid);
}
static void lustre_swab_hal(struct hsm_action_list *h)
{
struct hsm_action_item *hai;
int i;
__swab32s(&h->hal_version);
__swab32s(&h->hal_count);
__swab32s(&h->hal_archive_id);
__swab64s(&h->hal_flags);
hai = hai_zero(h);
for (i = 0; i < h->hal_count; i++, hai = hai_next(hai))
lustre_swab_hai(hai);
}
static void lustre_swab_kuch(struct kuc_hdr *l)
{
__swab16s(&l->kuc_magic);
/* __u8 l->kuc_transport */
__swab16s(&l->kuc_msgtype);
__swab16s(&l->kuc_msglen);
}
static int mdc_ioc_hsm_ct_start(struct obd_export *exp,
struct lustre_kernelcomm *lk)
{
struct obd_import *imp = class_exp2cliimp(exp);
__u32 archive = lk->lk_data;
int rc = 0;
if (lk->lk_group != KUC_GRP_HSM) {
CERROR("Bad copytool group %d\n", lk->lk_group);
return -EINVAL;
}
CDEBUG(D_HSM, "CT start r%d w%d u%d g%d f%#x\n", lk->lk_rfd, lk->lk_wfd,
lk->lk_uid, lk->lk_group, lk->lk_flags);
if (lk->lk_flags & LK_FLG_STOP) {
/* Unregister with the coordinator */
rc = mdc_ioc_hsm_ct_unregister(imp);
} else {
rc = mdc_ioc_hsm_ct_register(imp, archive);
}
return rc;
}
/**
* Send a message to any listening copytools
* @param val KUC message (kuc_hdr + hsm_action_list)
* @param len total length of message
*/
static int mdc_hsm_copytool_send(int len, void *val)
{
struct kuc_hdr *lh = (struct kuc_hdr *)val;
struct hsm_action_list *hal = (struct hsm_action_list *)(lh + 1);
int rc;
if (len < sizeof(*lh) + sizeof(*hal)) {
CERROR("Short HSM message %d < %d\n", len,
(int) (sizeof(*lh) + sizeof(*hal)));
return -EPROTO;
}
if (lh->kuc_magic == __swab16(KUC_MAGIC)) {
lustre_swab_kuch(lh);
lustre_swab_hal(hal);
} else if (lh->kuc_magic != KUC_MAGIC) {
CERROR("Bad magic %x!=%x\n", lh->kuc_magic, KUC_MAGIC);
return -EPROTO;
}
CDEBUG(D_HSM,
"Received message mg=%x t=%d m=%d l=%d actions=%d on %s\n",
lh->kuc_magic, lh->kuc_transport, lh->kuc_msgtype,
lh->kuc_msglen, hal->hal_count, hal->hal_fsname);
/* Broadcast to HSM listeners */
rc = libcfs_kkuc_group_put(KUC_GRP_HSM, lh);
return rc;
}
/**
* callback function passed to kuc for re-registering each HSM copytool
* running on MDC, after MDT shutdown/recovery.
* @param data archive id served by the copytool
* @param cb_arg callback argument (obd_import)
*/
static int mdc_hsm_ct_reregister(__u32 data, void *cb_arg)
{
struct obd_import *imp = (struct obd_import *)cb_arg;
__u32 archive = data;
int rc;
CDEBUG(D_HA, "recover copytool registration to MDT (archive=%#x)\n",
archive);
rc = mdc_ioc_hsm_ct_register(imp, archive);
/* ignore error if the copytool is already registered */
return ((rc != 0) && (rc != -EEXIST)) ? rc : 0;
}
/**
* Re-establish all kuc contexts with MDT
* after MDT shutdown/recovery.
*/
static int mdc_kuc_reregister(struct obd_import *imp)
{
/* re-register HSM agents */
return libcfs_kkuc_group_foreach(KUC_GRP_HSM, mdc_hsm_ct_reregister,
(void *)imp);
}
static int mdc_set_info_async(const struct lu_env *env,
struct obd_export *exp,
u32 keylen, void *key,
u32 vallen, void *val,
struct ptlrpc_request_set *set)
{
struct obd_import *imp = class_exp2cliimp(exp);
int rc;
if (KEY_IS(KEY_READ_ONLY)) {
if (vallen != sizeof(int))
return -EINVAL;
spin_lock(&imp->imp_lock);
if (*((int *)val)) {
imp->imp_connect_flags_orig |= OBD_CONNECT_RDONLY;
imp->imp_connect_data.ocd_connect_flags |=
OBD_CONNECT_RDONLY;
} else {
imp->imp_connect_flags_orig &= ~OBD_CONNECT_RDONLY;
imp->imp_connect_data.ocd_connect_flags &=
~OBD_CONNECT_RDONLY;
}
spin_unlock(&imp->imp_lock);
rc = do_set_info_async(imp, MDS_SET_INFO, LUSTRE_MDS_VERSION,
keylen, key, vallen, val, set);
return rc;
}
if (KEY_IS(KEY_SPTLRPC_CONF)) {
sptlrpc_conf_client_adapt(exp->exp_obd);
return 0;
}
if (KEY_IS(KEY_FLUSH_CTX)) {
sptlrpc_import_flush_my_ctx(imp);
return 0;
}
if (KEY_IS(KEY_CHANGELOG_CLEAR)) {
rc = do_set_info_async(imp, MDS_SET_INFO, LUSTRE_MDS_VERSION,
keylen, key, vallen, val, set);
return rc;
}
if (KEY_IS(KEY_HSM_COPYTOOL_SEND)) {
rc = mdc_hsm_copytool_send(vallen, val);
return rc;
}
CERROR("Unknown key %s\n", (char *)key);
return -EINVAL;
}
static int mdc_get_info(const struct lu_env *env, struct obd_export *exp,
__u32 keylen, void *key, __u32 *vallen, void *val,
struct lov_stripe_md *lsm)
{
int rc = -EINVAL;
if (KEY_IS(KEY_MAX_EASIZE)) {
int mdsize, *max_easize;
if (*vallen != sizeof(int))
return -EINVAL;
mdsize = *(int *)val;
if (mdsize > exp->exp_obd->u.cli.cl_max_mds_easize)
exp->exp_obd->u.cli.cl_max_mds_easize = mdsize;
max_easize = val;
*max_easize = exp->exp_obd->u.cli.cl_max_mds_easize;
return 0;
} else if (KEY_IS(KEY_DEFAULT_EASIZE)) {
int *default_easize;
if (*vallen != sizeof(int))
return -EINVAL;
default_easize = val;
*default_easize = exp->exp_obd->u.cli.cl_default_mds_easize;
return 0;
} else if (KEY_IS(KEY_MAX_COOKIESIZE)) {
int mdsize, *max_cookiesize;
if (*vallen != sizeof(int))
return -EINVAL;
mdsize = *(int *)val;
if (mdsize > exp->exp_obd->u.cli.cl_max_mds_cookiesize)
exp->exp_obd->u.cli.cl_max_mds_cookiesize = mdsize;
max_cookiesize = val;
*max_cookiesize = exp->exp_obd->u.cli.cl_max_mds_cookiesize;
return 0;
} else if (KEY_IS(KEY_DEFAULT_COOKIESIZE)) {
int *default_cookiesize;
if (*vallen != sizeof(int))
return -EINVAL;
default_cookiesize = val;
*default_cookiesize =
exp->exp_obd->u.cli.cl_default_mds_cookiesize;
return 0;
} else if (KEY_IS(KEY_CONN_DATA)) {
struct obd_import *imp = class_exp2cliimp(exp);
struct obd_connect_data *data = val;
if (*vallen != sizeof(*data))
return -EINVAL;
*data = imp->imp_connect_data;
return 0;
} else if (KEY_IS(KEY_TGT_COUNT)) {
*((int *)val) = 1;
return 0;
}
rc = mdc_get_info_rpc(exp, keylen, key, *vallen, val);
return rc;
}
static int mdc_sync(struct obd_export *exp, const struct lu_fid *fid,
struct obd_capa *oc, struct ptlrpc_request **request)
{
struct ptlrpc_request *req;
int rc;
*request = NULL;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_SYNC);
if (req == NULL)
return -ENOMEM;
mdc_set_capa_size(req, &RMF_CAPA1, oc);
rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_SYNC);
if (rc) {
ptlrpc_request_free(req);
return rc;
}
mdc_pack_body(req, fid, oc, 0, 0, -1, 0);
ptlrpc_request_set_replen(req);
rc = ptlrpc_queue_wait(req);
if (rc)
ptlrpc_req_finished(req);
else
*request = req;
return rc;
}
static int mdc_import_event(struct obd_device *obd, struct obd_import *imp,
enum obd_import_event event)
{
int rc = 0;
LASSERT(imp->imp_obd == obd);
switch (event) {
case IMP_EVENT_DISCON: {
#if 0
/* XXX Pass event up to OBDs stack. used only for FLD now */
rc = obd_notify_observer(obd, obd, OBD_NOTIFY_DISCON, NULL);
#endif
break;
}
case IMP_EVENT_INACTIVE: {
struct client_obd *cli = &obd->u.cli;
/*
* Flush current sequence to make client obtain new one
* from server in case of disconnect/reconnect.
*/
if (cli->cl_seq != NULL)
seq_client_flush(cli->cl_seq);
rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE, NULL);
break;
}
case IMP_EVENT_INVALIDATE: {
struct ldlm_namespace *ns = obd->obd_namespace;
ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
break;
}
case IMP_EVENT_ACTIVE:
rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE, NULL);
/* redo the kuc registration after reconnecting */
if (rc == 0)
rc = mdc_kuc_reregister(imp);
break;
case IMP_EVENT_OCD:
rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD, NULL);
break;
case IMP_EVENT_DEACTIVATE:
case IMP_EVENT_ACTIVATE:
break;
default:
CERROR("Unknown import event %x\n", event);
LBUG();
}
return rc;
}
int mdc_fid_alloc(struct obd_export *exp, struct lu_fid *fid,
struct md_op_data *op_data)
{
struct client_obd *cli = &exp->exp_obd->u.cli;
struct lu_client_seq *seq = cli->cl_seq;
return seq_client_alloc_fid(NULL, seq, fid);
}
static struct obd_uuid *mdc_get_uuid(struct obd_export *exp)
{
struct client_obd *cli = &exp->exp_obd->u.cli;
return &cli->cl_target_uuid;
}
/**
* Determine whether the lock can be canceled before replaying it during
* recovery, non zero value will be return if the lock can be canceled,
* or zero returned for not
*/
static int mdc_cancel_for_recovery(struct ldlm_lock *lock)
{
if (lock->l_resource->lr_type != LDLM_IBITS)
return 0;
/* FIXME: if we ever get into a situation where there are too many
* opened files with open locks on a single node, then we really
* should replay these open locks to reget it */
if (lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_OPEN)
return 0;
return 1;
}
static int mdc_resource_inode_free(struct ldlm_resource *res)
{
if (res->lr_lvb_inode)
res->lr_lvb_inode = NULL;
return 0;
}
static struct ldlm_valblock_ops inode_lvbo = {
.lvbo_free = mdc_resource_inode_free,
};
static int mdc_llog_init(struct obd_device *obd)
{
struct obd_llog_group *olg = &obd->obd_olg;
struct llog_ctxt *ctxt;
int rc;
rc = llog_setup(NULL, obd, olg, LLOG_CHANGELOG_REPL_CTXT, obd,
&llog_client_ops);
if (rc)
return rc;
ctxt = llog_group_get_ctxt(olg, LLOG_CHANGELOG_REPL_CTXT);
llog_initiator_connect(ctxt);
llog_ctxt_put(ctxt);
return 0;
}
static void mdc_llog_finish(struct obd_device *obd)
{
struct llog_ctxt *ctxt;
ctxt = llog_get_context(obd, LLOG_CHANGELOG_REPL_CTXT);
if (ctxt)
llog_cleanup(NULL, ctxt);
}
static int mdc_setup(struct obd_device *obd, struct lustre_cfg *cfg)
{
struct client_obd *cli = &obd->u.cli;
struct lprocfs_static_vars lvars = { NULL };
int rc;
OBD_ALLOC(cli->cl_rpc_lock, sizeof(*cli->cl_rpc_lock));
if (!cli->cl_rpc_lock)
return -ENOMEM;
mdc_init_rpc_lock(cli->cl_rpc_lock);
ptlrpcd_addref();
OBD_ALLOC(cli->cl_close_lock, sizeof(*cli->cl_close_lock));
if (!cli->cl_close_lock) {
rc = -ENOMEM;
goto err_rpc_lock;
}
mdc_init_rpc_lock(cli->cl_close_lock);
rc = client_obd_setup(obd, cfg);
if (rc)
goto err_close_lock;
lprocfs_mdc_init_vars(&lvars);
lprocfs_obd_setup(obd, lvars.obd_vars);
sptlrpc_lprocfs_cliobd_attach(obd);
ptlrpc_lprocfs_register_obd(obd);
ns_register_cancel(obd->obd_namespace, mdc_cancel_for_recovery);
obd->obd_namespace->ns_lvbo = &inode_lvbo;
rc = mdc_llog_init(obd);
if (rc) {
mdc_cleanup(obd);
CERROR("failed to setup llogging subsystems\n");
}
return rc;
err_close_lock:
OBD_FREE(cli->cl_close_lock, sizeof(*cli->cl_close_lock));
err_rpc_lock:
OBD_FREE(cli->cl_rpc_lock, sizeof(*cli->cl_rpc_lock));
ptlrpcd_decref();
return rc;
}
/* Initialize the default and maximum LOV EA and cookie sizes. This allows
* us to make MDS RPCs with large enough reply buffers to hold a default
* sized EA and cookie without having to calculate this (via a call into the
* LOV + OSCs) each time we make an RPC. The maximum size is also tracked
* but not used to avoid wastefully vmalloc()'ing large reply buffers when
* a large number of stripes is possible. If a larger reply buffer is
* required it will be reallocated in the ptlrpc layer due to overflow.
*/
static int mdc_init_ea_size(struct obd_export *exp, int easize,
int def_easize, int cookiesize, int def_cookiesize)
{
struct obd_device *obd = exp->exp_obd;
struct client_obd *cli = &obd->u.cli;
if (cli->cl_max_mds_easize < easize)
cli->cl_max_mds_easize = easize;
if (cli->cl_default_mds_easize < def_easize)
cli->cl_default_mds_easize = def_easize;
if (cli->cl_max_mds_cookiesize < cookiesize)
cli->cl_max_mds_cookiesize = cookiesize;
if (cli->cl_default_mds_cookiesize < def_cookiesize)
cli->cl_default_mds_cookiesize = def_cookiesize;
return 0;
}
static int mdc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
{
switch (stage) {
case OBD_CLEANUP_EARLY:
break;
case OBD_CLEANUP_EXPORTS:
/* Failsafe, ok if racy */
if (obd->obd_type->typ_refcnt <= 1)
libcfs_kkuc_group_rem(0, KUC_GRP_HSM);
obd_cleanup_client_import(obd);
ptlrpc_lprocfs_unregister_obd(obd);
lprocfs_obd_cleanup(obd);
mdc_llog_finish(obd);
break;
}
return 0;
}
static int mdc_cleanup(struct obd_device *obd)
{
struct client_obd *cli = &obd->u.cli;
OBD_FREE(cli->cl_rpc_lock, sizeof(*cli->cl_rpc_lock));
OBD_FREE(cli->cl_close_lock, sizeof(*cli->cl_close_lock));
ptlrpcd_decref();
return client_obd_cleanup(obd);
}
static int mdc_process_config(struct obd_device *obd, u32 len, void *buf)
{
struct lustre_cfg *lcfg = buf;
struct lprocfs_static_vars lvars = { NULL };
int rc = 0;
lprocfs_mdc_init_vars(&lvars);
switch (lcfg->lcfg_command) {
default:
rc = class_process_proc_param(PARAM_MDC, lvars.obd_vars,
lcfg, obd);
if (rc > 0)
rc = 0;
break;
}
return rc;
}
/* get remote permission for current user on fid */
static int mdc_get_remote_perm(struct obd_export *exp, const struct lu_fid *fid,
struct obd_capa *oc, __u32 suppgid,
struct ptlrpc_request **request)
{
struct ptlrpc_request *req;
int rc;
LASSERT(client_is_remote(exp));
*request = NULL;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_GETATTR);
if (req == NULL)
return -ENOMEM;
mdc_set_capa_size(req, &RMF_CAPA1, oc);
rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GETATTR);
if (rc) {
ptlrpc_request_free(req);
return rc;
}
mdc_pack_body(req, fid, oc, OBD_MD_FLRMTPERM, 0, suppgid, 0);
req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER,
sizeof(struct mdt_remote_perm));
ptlrpc_request_set_replen(req);
rc = ptlrpc_queue_wait(req);
if (rc)
ptlrpc_req_finished(req);
else
*request = req;
return rc;
}
static int mdc_interpret_renew_capa(const struct lu_env *env,
struct ptlrpc_request *req, void *args,
int status)
{
struct mdc_renew_capa_args *ra = args;
struct mdt_body *body = NULL;
struct lustre_capa *capa;
if (status) {
capa = ERR_PTR(status);
goto out;
}
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
if (body == NULL) {
capa = ERR_PTR(-EFAULT);
goto out;
}
if ((body->valid & OBD_MD_FLOSSCAPA) == 0) {
capa = ERR_PTR(-ENOENT);
goto out;
}
capa = req_capsule_server_get(&req->rq_pill, &RMF_CAPA2);
if (!capa) {
capa = ERR_PTR(-EFAULT);
goto out;
}
out:
ra->ra_cb(ra->ra_oc, capa);
return 0;
}
static int mdc_renew_capa(struct obd_export *exp, struct obd_capa *oc,
renew_capa_cb_t cb)
{
struct ptlrpc_request *req;
struct mdc_renew_capa_args *ra;
req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_MDS_GETATTR,
LUSTRE_MDS_VERSION, MDS_GETATTR);
if (req == NULL)
return -ENOMEM;
/* NB, OBD_MD_FLOSSCAPA is set here, but it doesn't necessarily mean the
* capa to renew is oss capa.
*/
mdc_pack_body(req, &oc->c_capa.lc_fid, oc, OBD_MD_FLOSSCAPA, 0, -1, 0);
ptlrpc_request_set_replen(req);
CLASSERT(sizeof(*ra) <= sizeof(req->rq_async_args));
ra = ptlrpc_req_async_args(req);
ra->ra_oc = oc;
ra->ra_cb = cb;
req->rq_interpret_reply = mdc_interpret_renew_capa;
ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
return 0;
}
static struct obd_ops mdc_obd_ops = {
.o_owner = THIS_MODULE,
.o_setup = mdc_setup,
.o_precleanup = mdc_precleanup,
.o_cleanup = mdc_cleanup,
.o_add_conn = client_import_add_conn,
.o_del_conn = client_import_del_conn,
.o_connect = client_connect_import,
.o_disconnect = client_disconnect_export,
.o_iocontrol = mdc_iocontrol,
.o_set_info_async = mdc_set_info_async,
.o_statfs = mdc_statfs,
.o_fid_init = client_fid_init,
.o_fid_fini = client_fid_fini,
.o_fid_alloc = mdc_fid_alloc,
.o_import_event = mdc_import_event,
.o_get_info = mdc_get_info,
.o_process_config = mdc_process_config,
.o_get_uuid = mdc_get_uuid,
.o_quotactl = mdc_quotactl,
.o_quotacheck = mdc_quotacheck
};
static struct md_ops mdc_md_ops = {
.m_getstatus = mdc_getstatus,
.m_null_inode = mdc_null_inode,
.m_find_cbdata = mdc_find_cbdata,
.m_close = mdc_close,
.m_create = mdc_create,
.m_done_writing = mdc_done_writing,
.m_enqueue = mdc_enqueue,
.m_getattr = mdc_getattr,
.m_getattr_name = mdc_getattr_name,
.m_intent_lock = mdc_intent_lock,
.m_link = mdc_link,
.m_is_subdir = mdc_is_subdir,
.m_rename = mdc_rename,
.m_setattr = mdc_setattr,
.m_setxattr = mdc_setxattr,
.m_getxattr = mdc_getxattr,
.m_sync = mdc_sync,
.m_readpage = mdc_readpage,
.m_unlink = mdc_unlink,
.m_cancel_unused = mdc_cancel_unused,
.m_init_ea_size = mdc_init_ea_size,
.m_set_lock_data = mdc_set_lock_data,
.m_lock_match = mdc_lock_match,
.m_get_lustre_md = mdc_get_lustre_md,
.m_free_lustre_md = mdc_free_lustre_md,
.m_set_open_replay_data = mdc_set_open_replay_data,
.m_clear_open_replay_data = mdc_clear_open_replay_data,
.m_renew_capa = mdc_renew_capa,
.m_unpack_capa = mdc_unpack_capa,
.m_get_remote_perm = mdc_get_remote_perm,
.m_intent_getattr_async = mdc_intent_getattr_async,
.m_revalidate_lock = mdc_revalidate_lock
};
static int __init mdc_init(void)
{
struct lprocfs_static_vars lvars = { NULL };
lprocfs_mdc_init_vars(&lvars);
return class_register_type(&mdc_obd_ops, &mdc_md_ops, lvars.module_vars,
LUSTRE_MDC_NAME, NULL);
}
static void /*__exit*/ mdc_exit(void)
{
class_unregister_type(LUSTRE_MDC_NAME);
}
MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
MODULE_DESCRIPTION("Lustre Metadata Client");
MODULE_LICENSE("GPL");
module_init(mdc_init);
module_exit(mdc_exit);
| gpl-2.0 |
brieuwers/N8000Kernel | drivers/net/usb/asix.c | 226 | 41533 | /*
* ASIX AX8817X based USB 2.0 Ethernet Devices
* Copyright (C) 2003-2006 David Hollis <dhollis@davehollis.com>
* Copyright (C) 2005 Phil Chang <pchang23@sbcglobal.net>
* Copyright (C) 2006 James Painter <jamie.painter@iname.com>
* Copyright (c) 2002-2003 TiVo Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
// #define DEBUG // error path messages, extra info
// #define VERBOSE // more; success messages
#include <linux/module.h>
#include <linux/kmod.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/workqueue.h>
#include <linux/mii.h>
#include <linux/usb.h>
#include <linux/crc32.h>
#include <linux/usb/usbnet.h>
#include <linux/slab.h>
#define DRIVER_VERSION "14-Jun-2006"
static const char driver_name [] = "asix";
/* ASIX AX8817X based USB 2.0 Ethernet Devices */
#define AX_CMD_SET_SW_MII 0x06
#define AX_CMD_READ_MII_REG 0x07
#define AX_CMD_WRITE_MII_REG 0x08
#define AX_CMD_SET_HW_MII 0x0a
#define AX_CMD_READ_EEPROM 0x0b
#define AX_CMD_WRITE_EEPROM 0x0c
#define AX_CMD_WRITE_ENABLE 0x0d
#define AX_CMD_WRITE_DISABLE 0x0e
#define AX_CMD_READ_RX_CTL 0x0f
#define AX_CMD_WRITE_RX_CTL 0x10
#define AX_CMD_READ_IPG012 0x11
#define AX_CMD_WRITE_IPG0 0x12
#define AX_CMD_WRITE_IPG1 0x13
#define AX_CMD_READ_NODE_ID 0x13
#define AX_CMD_WRITE_NODE_ID 0x14
#define AX_CMD_WRITE_IPG2 0x14
#define AX_CMD_WRITE_MULTI_FILTER 0x16
#define AX88172_CMD_READ_NODE_ID 0x17
#define AX_CMD_READ_PHY_ID 0x19
#define AX_CMD_READ_MEDIUM_STATUS 0x1a
#define AX_CMD_WRITE_MEDIUM_MODE 0x1b
#define AX_CMD_READ_MONITOR_MODE 0x1c
#define AX_CMD_WRITE_MONITOR_MODE 0x1d
#define AX_CMD_READ_GPIOS 0x1e
#define AX_CMD_WRITE_GPIOS 0x1f
#define AX_CMD_SW_RESET 0x20
#define AX_CMD_SW_PHY_STATUS 0x21
#define AX_CMD_SW_PHY_SELECT 0x22
#define AX_MONITOR_MODE 0x01
#define AX_MONITOR_LINK 0x02
#define AX_MONITOR_MAGIC 0x04
#define AX_MONITOR_HSFS 0x10
/* AX88172 Medium Status Register values */
#define AX88172_MEDIUM_FD 0x02
#define AX88172_MEDIUM_TX 0x04
#define AX88172_MEDIUM_FC 0x10
#define AX88172_MEDIUM_DEFAULT \
( AX88172_MEDIUM_FD | AX88172_MEDIUM_TX | AX88172_MEDIUM_FC )
#define AX_MCAST_FILTER_SIZE 8
#define AX_MAX_MCAST 64
#define AX_SWRESET_CLEAR 0x00
#define AX_SWRESET_RR 0x01
#define AX_SWRESET_RT 0x02
#define AX_SWRESET_PRTE 0x04
#define AX_SWRESET_PRL 0x08
#define AX_SWRESET_BZ 0x10
#define AX_SWRESET_IPRL 0x20
#define AX_SWRESET_IPPD 0x40
#define AX88772_IPG0_DEFAULT 0x15
#define AX88772_IPG1_DEFAULT 0x0c
#define AX88772_IPG2_DEFAULT 0x12
/* AX88772 & AX88178 Medium Mode Register */
#define AX_MEDIUM_PF 0x0080
#define AX_MEDIUM_JFE 0x0040
#define AX_MEDIUM_TFC 0x0020
#define AX_MEDIUM_RFC 0x0010
#define AX_MEDIUM_ENCK 0x0008
#define AX_MEDIUM_AC 0x0004
#define AX_MEDIUM_FD 0x0002
#define AX_MEDIUM_GM 0x0001
#define AX_MEDIUM_SM 0x1000
#define AX_MEDIUM_SBP 0x0800
#define AX_MEDIUM_PS 0x0200
#define AX_MEDIUM_RE 0x0100
#define AX88178_MEDIUM_DEFAULT \
(AX_MEDIUM_PS | AX_MEDIUM_FD | AX_MEDIUM_AC | \
AX_MEDIUM_RFC | AX_MEDIUM_TFC | AX_MEDIUM_JFE | \
AX_MEDIUM_RE )
#define AX88772_MEDIUM_DEFAULT \
(AX_MEDIUM_FD | AX_MEDIUM_RFC | \
AX_MEDIUM_TFC | AX_MEDIUM_PS | \
AX_MEDIUM_AC | AX_MEDIUM_RE )
/* AX88772 & AX88178 RX_CTL values */
#define AX_RX_CTL_SO 0x0080
#define AX_RX_CTL_AP 0x0020
#define AX_RX_CTL_AM 0x0010
#define AX_RX_CTL_AB 0x0008
#define AX_RX_CTL_SEP 0x0004
#define AX_RX_CTL_AMALL 0x0002
#define AX_RX_CTL_PRO 0x0001
#define AX_RX_CTL_MFB_2048 0x0000
#define AX_RX_CTL_MFB_4096 0x0100
#define AX_RX_CTL_MFB_8192 0x0200
#define AX_RX_CTL_MFB_16384 0x0300
#define AX_DEFAULT_RX_CTL \
(AX_RX_CTL_SO | AX_RX_CTL_AB )
/* GPIO 0 .. 2 toggles */
#define AX_GPIO_GPO0EN 0x01 /* GPIO0 Output enable */
#define AX_GPIO_GPO_0 0x02 /* GPIO0 Output value */
#define AX_GPIO_GPO1EN 0x04 /* GPIO1 Output enable */
#define AX_GPIO_GPO_1 0x08 /* GPIO1 Output value */
#define AX_GPIO_GPO2EN 0x10 /* GPIO2 Output enable */
#define AX_GPIO_GPO_2 0x20 /* GPIO2 Output value */
#define AX_GPIO_RESERVED 0x40 /* Reserved */
#define AX_GPIO_RSE 0x80 /* Reload serial EEPROM */
#define AX_EEPROM_MAGIC 0xdeadbeef
#define AX88172_EEPROM_LEN 0x40
#define AX88772_EEPROM_LEN 0xff
#define PHY_MODE_MARVELL 0x0000
#define MII_MARVELL_LED_CTRL 0x0018
#define MII_MARVELL_STATUS 0x001b
#define MII_MARVELL_CTRL 0x0014
#define MARVELL_LED_MANUAL 0x0019
#define MARVELL_STATUS_HWCFG 0x0004
#define MARVELL_CTRL_TXDELAY 0x0002
#define MARVELL_CTRL_RXDELAY 0x0080
/* This structure cannot exceed sizeof(unsigned long [5]) AKA 20 bytes */
struct asix_data {
u8 multi_filter[AX_MCAST_FILTER_SIZE];
u8 mac_addr[ETH_ALEN];
u8 phymode;
u8 ledmode;
u8 eeprom_len;
};
struct ax88172_int_data {
__le16 res1;
u8 link;
__le16 res2;
u8 status;
__le16 res3;
} __packed;
static int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
u16 size, void *data)
{
void *buf;
int err = -ENOMEM;
netdev_dbg(dev->net, "asix_read_cmd() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
cmd, value, index, size);
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
goto out;
err = usb_control_msg(
dev->udev,
usb_rcvctrlpipe(dev->udev, 0),
cmd,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value,
index,
buf,
size,
USB_CTRL_GET_TIMEOUT);
if (err == size)
memcpy(data, buf, size);
else if (err >= 0)
err = -EINVAL;
kfree(buf);
out:
return err;
}
static int asix_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
u16 size, void *data)
{
void *buf = NULL;
int err = -ENOMEM;
netdev_dbg(dev->net, "asix_write_cmd() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
cmd, value, index, size);
if (data) {
buf = kmemdup(data, size, GFP_KERNEL);
if (!buf)
goto out;
}
err = usb_control_msg(
dev->udev,
usb_sndctrlpipe(dev->udev, 0),
cmd,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value,
index,
buf,
size,
USB_CTRL_SET_TIMEOUT);
kfree(buf);
out:
return err;
}
static void asix_async_cmd_callback(struct urb *urb)
{
struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
int status = urb->status;
if (status < 0)
printk(KERN_DEBUG "asix_async_cmd_callback() failed with %d",
status);
kfree(req);
usb_free_urb(urb);
}
static void
asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
u16 size, void *data)
{
struct usb_ctrlrequest *req;
int status;
struct urb *urb;
netdev_dbg(dev->net, "asix_write_cmd_async() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
cmd, value, index, size);
if ((urb = usb_alloc_urb(0, GFP_ATOMIC)) == NULL) {
netdev_err(dev->net, "Error allocating URB in write_cmd_async!\n");
return;
}
if ((req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC)) == NULL) {
netdev_err(dev->net, "Failed to allocate memory for control request\n");
usb_free_urb(urb);
return;
}
req->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
req->bRequest = cmd;
req->wValue = cpu_to_le16(value);
req->wIndex = cpu_to_le16(index);
req->wLength = cpu_to_le16(size);
usb_fill_control_urb(urb, dev->udev,
usb_sndctrlpipe(dev->udev, 0),
(void *)req, data, size,
asix_async_cmd_callback, req);
if((status = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
netdev_err(dev->net, "Error submitting the control message: status=%d\n",
status);
kfree(req);
usb_free_urb(urb);
}
}
static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
u8 *head;
u32 header;
char *packet;
struct sk_buff *ax_skb;
u16 size;
head = (u8 *) skb->data;
memcpy(&header, head, sizeof(header));
le32_to_cpus(&header);
packet = head + sizeof(header);
skb_pull(skb, 4);
while (skb->len > 0) {
if ((header & 0x07ff) != ((~header >> 16) & 0x07ff))
netdev_err(dev->net, "asix_rx_fixup() Bad Header Length\n");
/* get the packet length */
size = (u16) (header & 0x000007ff);
if ((skb->len) - ((size + 1) & 0xfffe) == 0) {
u8 alignment = (unsigned long)skb->data & 0x3;
if (alignment != 0x2) {
/*
* not 16bit aligned so use the room provided by
* the 32 bit header to align the data
*
* note we want 16bit alignment as MAC header is
* 14bytes thus ip header will be aligned on
* 32bit boundary so accessing ipheader elements
* using a cast to struct ip header wont cause
* an unaligned accesses.
*/
u8 realignment = (alignment + 2) & 0x3;
memmove(skb->data - realignment,
skb->data,
size);
skb->data -= realignment;
skb_set_tail_pointer(skb, size);
}
return 2;
}
if (size > dev->net->mtu + ETH_HLEN) {
netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
size);
return 0;
}
ax_skb = skb_clone(skb, GFP_ATOMIC);
if (ax_skb) {
u8 alignment = (unsigned long)packet & 0x3;
ax_skb->len = size;
if (alignment != 0x2) {
/*
* not 16bit aligned use the room provided by
* the 32 bit header to align the data
*/
u8 realignment = (alignment + 2) & 0x3;
memmove(packet - realignment, packet, size);
packet -= realignment;
}
ax_skb->data = packet;
skb_set_tail_pointer(ax_skb, size);
usbnet_skb_return(dev, ax_skb);
} else {
return 0;
}
skb_pull(skb, (size + 1) & 0xfffe);
if (skb->len == 0)
break;
head = (u8 *) skb->data;
memcpy(&header, head, sizeof(header));
le32_to_cpus(&header);
packet = head + sizeof(header);
skb_pull(skb, 4);
}
if (skb->len < 0) {
netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d\n",
skb->len);
return 0;
}
return 1;
}
static struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
gfp_t flags)
{
int padlen;
int headroom = skb_headroom(skb);
int tailroom = skb_tailroom(skb);
u32 packet_len;
u32 padbytes = 0xffff0000;
padlen = ((skb->len + 4) % 512) ? 0 : 4;
if ((!skb_cloned(skb)) &&
((headroom + tailroom) >= (4 + padlen))) {
if ((headroom < 4) || (tailroom < padlen)) {
skb->data = memmove(skb->head + 4, skb->data, skb->len);
skb_set_tail_pointer(skb, skb->len);
}
} else {
struct sk_buff *skb2;
skb2 = skb_copy_expand(skb, 4, padlen, flags);
dev_kfree_skb_any(skb);
skb = skb2;
if (!skb)
return NULL;
}
skb_push(skb, 4);
packet_len = (((skb->len - 4) ^ 0x0000ffff) << 16) + (skb->len - 4);
cpu_to_le32s(&packet_len);
skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len));
if ((skb->len % 512) == 0) {
cpu_to_le32s(&padbytes);
memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes));
skb_put(skb, sizeof(padbytes));
}
return skb;
}
static void asix_status(struct usbnet *dev, struct urb *urb)
{
struct ax88172_int_data *event;
int link;
if (urb->actual_length < 8)
return;
event = urb->transfer_buffer;
link = event->link & 0x01;
if (netif_carrier_ok(dev->net) != link) {
if (link) {
netif_carrier_on(dev->net);
usbnet_defer_kevent (dev, EVENT_LINK_RESET );
} else
netif_carrier_off(dev->net);
netdev_dbg(dev->net, "Link Status is: %d\n", link);
}
}
static inline int asix_set_sw_mii(struct usbnet *dev)
{
int ret;
ret = asix_write_cmd(dev, AX_CMD_SET_SW_MII, 0x0000, 0, 0, NULL);
if (ret < 0)
netdev_err(dev->net, "Failed to enable software MII access\n");
return ret;
}
static inline int asix_set_hw_mii(struct usbnet *dev)
{
int ret;
ret = asix_write_cmd(dev, AX_CMD_SET_HW_MII, 0x0000, 0, 0, NULL);
if (ret < 0)
netdev_err(dev->net, "Failed to enable hardware MII access\n");
return ret;
}
static inline int asix_get_phy_addr(struct usbnet *dev)
{
u8 buf[2];
int ret = asix_read_cmd(dev, AX_CMD_READ_PHY_ID, 0, 0, 2, buf);
netdev_dbg(dev->net, "asix_get_phy_addr()\n");
if (ret < 0) {
netdev_err(dev->net, "Error reading PHYID register: %02x\n", ret);
goto out;
}
netdev_dbg(dev->net, "asix_get_phy_addr() returning 0x%04x\n",
*((__le16 *)buf));
ret = buf[1];
out:
return ret;
}
static int asix_sw_reset(struct usbnet *dev, u8 flags)
{
int ret;
ret = asix_write_cmd(dev, AX_CMD_SW_RESET, flags, 0, 0, NULL);
if (ret < 0)
netdev_err(dev->net, "Failed to send software reset: %02x\n", ret);
return ret;
}
static u16 asix_read_rx_ctl(struct usbnet *dev)
{
__le16 v;
int ret = asix_read_cmd(dev, AX_CMD_READ_RX_CTL, 0, 0, 2, &v);
if (ret < 0) {
netdev_err(dev->net, "Error reading RX_CTL register: %02x\n", ret);
goto out;
}
ret = le16_to_cpu(v);
out:
return ret;
}
static int asix_write_rx_ctl(struct usbnet *dev, u16 mode)
{
int ret;
netdev_dbg(dev->net, "asix_write_rx_ctl() - mode = 0x%04x\n", mode);
ret = asix_write_cmd(dev, AX_CMD_WRITE_RX_CTL, mode, 0, 0, NULL);
if (ret < 0)
netdev_err(dev->net, "Failed to write RX_CTL mode to 0x%04x: %02x\n",
mode, ret);
return ret;
}
static u16 asix_read_medium_status(struct usbnet *dev)
{
__le16 v;
int ret = asix_read_cmd(dev, AX_CMD_READ_MEDIUM_STATUS, 0, 0, 2, &v);
if (ret < 0) {
netdev_err(dev->net, "Error reading Medium Status register: %02x\n",
ret);
goto out;
}
ret = le16_to_cpu(v);
out:
return ret;
}
static int asix_write_medium_mode(struct usbnet *dev, u16 mode)
{
int ret;
netdev_dbg(dev->net, "asix_write_medium_mode() - mode = 0x%04x\n", mode);
ret = asix_write_cmd(dev, AX_CMD_WRITE_MEDIUM_MODE, mode, 0, 0, NULL);
if (ret < 0)
netdev_err(dev->net, "Failed to write Medium Mode mode to 0x%04x: %02x\n",
mode, ret);
return ret;
}
static int asix_write_gpio(struct usbnet *dev, u16 value, int sleep)
{
int ret;
netdev_dbg(dev->net, "asix_write_gpio() - value = 0x%04x\n", value);
ret = asix_write_cmd(dev, AX_CMD_WRITE_GPIOS, value, 0, 0, NULL);
if (ret < 0)
netdev_err(dev->net, "Failed to write GPIO value 0x%04x: %02x\n",
value, ret);
if (sleep)
msleep(sleep);
return ret;
}
/*
* AX88772 & AX88178 have a 16-bit RX_CTL value
*/
static void asix_set_multicast(struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
struct asix_data *data = (struct asix_data *)&dev->data;
u16 rx_ctl = AX_DEFAULT_RX_CTL;
if (net->flags & IFF_PROMISC) {
rx_ctl |= AX_RX_CTL_PRO;
} else if (net->flags & IFF_ALLMULTI ||
netdev_mc_count(net) > AX_MAX_MCAST) {
rx_ctl |= AX_RX_CTL_AMALL;
} else if (netdev_mc_empty(net)) {
/* just broadcast and directed */
} else {
/* We use the 20 byte dev->data
* for our 8 byte filter buffer
* to avoid allocating memory that
* is tricky to free later */
struct netdev_hw_addr *ha;
u32 crc_bits;
memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE);
/* Build the multicast hash filter. */
netdev_for_each_mc_addr(ha, net) {
crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
data->multi_filter[crc_bits >> 3] |=
1 << (crc_bits & 7);
}
asix_write_cmd_async(dev, AX_CMD_WRITE_MULTI_FILTER, 0, 0,
AX_MCAST_FILTER_SIZE, data->multi_filter);
rx_ctl |= AX_RX_CTL_AM;
}
asix_write_cmd_async(dev, AX_CMD_WRITE_RX_CTL, rx_ctl, 0, 0, NULL);
}
static int asix_mdio_read(struct net_device *netdev, int phy_id, int loc)
{
struct usbnet *dev = netdev_priv(netdev);
__le16 res;
mutex_lock(&dev->phy_mutex);
asix_set_sw_mii(dev);
asix_read_cmd(dev, AX_CMD_READ_MII_REG, phy_id,
(__u16)loc, 2, &res);
asix_set_hw_mii(dev);
mutex_unlock(&dev->phy_mutex);
netdev_dbg(dev->net, "asix_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n",
phy_id, loc, le16_to_cpu(res));
return le16_to_cpu(res);
}
static void
asix_mdio_write(struct net_device *netdev, int phy_id, int loc, int val)
{
struct usbnet *dev = netdev_priv(netdev);
__le16 res = cpu_to_le16(val);
netdev_dbg(dev->net, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n",
phy_id, loc, val);
mutex_lock(&dev->phy_mutex);
asix_set_sw_mii(dev);
asix_write_cmd(dev, AX_CMD_WRITE_MII_REG, phy_id, (__u16)loc, 2, &res);
asix_set_hw_mii(dev);
mutex_unlock(&dev->phy_mutex);
}
/* Get the PHY Identifier from the PHYSID1 & PHYSID2 MII registers */
static u32 asix_get_phyid(struct usbnet *dev)
{
int phy_reg;
u32 phy_id;
phy_reg = asix_mdio_read(dev->net, dev->mii.phy_id, MII_PHYSID1);
if (phy_reg < 0)
return 0;
phy_id = (phy_reg & 0xffff) << 16;
phy_reg = asix_mdio_read(dev->net, dev->mii.phy_id, MII_PHYSID2);
if (phy_reg < 0)
return 0;
phy_id |= (phy_reg & 0xffff);
return phy_id;
}
static void
asix_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
{
struct usbnet *dev = netdev_priv(net);
u8 opt;
if (asix_read_cmd(dev, AX_CMD_READ_MONITOR_MODE, 0, 0, 1, &opt) < 0) {
wolinfo->supported = 0;
wolinfo->wolopts = 0;
return;
}
wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
wolinfo->wolopts = 0;
if (opt & AX_MONITOR_MODE) {
if (opt & AX_MONITOR_LINK)
wolinfo->wolopts |= WAKE_PHY;
if (opt & AX_MONITOR_MAGIC)
wolinfo->wolopts |= WAKE_MAGIC;
}
}
static int
asix_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
{
struct usbnet *dev = netdev_priv(net);
u8 opt = 0;
if (wolinfo->wolopts & WAKE_PHY)
opt |= AX_MONITOR_LINK;
if (wolinfo->wolopts & WAKE_MAGIC)
opt |= AX_MONITOR_MAGIC;
if (opt != 0)
opt |= AX_MONITOR_MODE;
if (asix_write_cmd(dev, AX_CMD_WRITE_MONITOR_MODE,
opt, 0, 0, NULL) < 0)
return -EINVAL;
return 0;
}
static int asix_get_eeprom_len(struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
struct asix_data *data = (struct asix_data *)&dev->data;
return data->eeprom_len;
}
static int asix_get_eeprom(struct net_device *net,
struct ethtool_eeprom *eeprom, u8 *data)
{
struct usbnet *dev = netdev_priv(net);
__le16 *ebuf = (__le16 *)data;
int i;
/* Crude hack to ensure that we don't overwrite memory
* if an odd length is supplied
*/
if (eeprom->len % 2)
return -EINVAL;
eeprom->magic = AX_EEPROM_MAGIC;
/* ax8817x returns 2 bytes from eeprom on read */
for (i=0; i < eeprom->len / 2; i++) {
if (asix_read_cmd(dev, AX_CMD_READ_EEPROM,
eeprom->offset + i, 0, 2, &ebuf[i]) < 0)
return -EINVAL;
}
return 0;
}
static void asix_get_drvinfo (struct net_device *net,
struct ethtool_drvinfo *info)
{
struct usbnet *dev = netdev_priv(net);
struct asix_data *data = (struct asix_data *)&dev->data;
/* Inherit standard device info */
usbnet_get_drvinfo(net, info);
strncpy (info->driver, driver_name, sizeof info->driver);
strncpy (info->version, DRIVER_VERSION, sizeof info->version);
info->eedump_len = data->eeprom_len;
}
static u32 asix_get_link(struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
return mii_link_ok(&dev->mii);
}
static int asix_ioctl (struct net_device *net, struct ifreq *rq, int cmd)
{
struct usbnet *dev = netdev_priv(net);
return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
}
static int asix_set_mac_address(struct net_device *net, void *p)
{
struct usbnet *dev = netdev_priv(net);
struct asix_data *data = (struct asix_data *)&dev->data;
struct sockaddr *addr = p;
if (netif_running(net))
return -EBUSY;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
/* We use the 20 byte dev->data
* for our 6 byte mac buffer
* to avoid allocating memory that
* is tricky to free later */
memcpy(data->mac_addr, addr->sa_data, ETH_ALEN);
asix_write_cmd_async(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
data->mac_addr);
return 0;
}
/* We need to override some ethtool_ops so we require our
own structure so we don't interfere with other usbnet
devices that may be connected at the same time. */
static const struct ethtool_ops ax88172_ethtool_ops = {
.get_drvinfo = asix_get_drvinfo,
.get_link = asix_get_link,
.get_msglevel = usbnet_get_msglevel,
.set_msglevel = usbnet_set_msglevel,
.get_wol = asix_get_wol,
.set_wol = asix_set_wol,
.get_eeprom_len = asix_get_eeprom_len,
.get_eeprom = asix_get_eeprom,
.get_settings = usbnet_get_settings,
.set_settings = usbnet_set_settings,
.nway_reset = usbnet_nway_reset,
};
static void ax88172_set_multicast(struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
struct asix_data *data = (struct asix_data *)&dev->data;
u8 rx_ctl = 0x8c;
if (net->flags & IFF_PROMISC) {
rx_ctl |= 0x01;
} else if (net->flags & IFF_ALLMULTI ||
netdev_mc_count(net) > AX_MAX_MCAST) {
rx_ctl |= 0x02;
} else if (netdev_mc_empty(net)) {
/* just broadcast and directed */
} else {
/* We use the 20 byte dev->data
* for our 8 byte filter buffer
* to avoid allocating memory that
* is tricky to free later */
struct netdev_hw_addr *ha;
u32 crc_bits;
memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE);
/* Build the multicast hash filter. */
netdev_for_each_mc_addr(ha, net) {
crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
data->multi_filter[crc_bits >> 3] |=
1 << (crc_bits & 7);
}
asix_write_cmd_async(dev, AX_CMD_WRITE_MULTI_FILTER, 0, 0,
AX_MCAST_FILTER_SIZE, data->multi_filter);
rx_ctl |= 0x10;
}
asix_write_cmd_async(dev, AX_CMD_WRITE_RX_CTL, rx_ctl, 0, 0, NULL);
}
static int ax88172_link_reset(struct usbnet *dev)
{
u8 mode;
struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
mii_check_media(&dev->mii, 1, 1);
mii_ethtool_gset(&dev->mii, &ecmd);
mode = AX88172_MEDIUM_DEFAULT;
if (ecmd.duplex != DUPLEX_FULL)
mode |= ~AX88172_MEDIUM_FD;
netdev_dbg(dev->net, "ax88172_link_reset() speed: %u duplex: %d setting mode to 0x%04x\n",
ethtool_cmd_speed(&ecmd), ecmd.duplex, mode);
asix_write_medium_mode(dev, mode);
return 0;
}
static const struct net_device_ops ax88172_netdev_ops = {
.ndo_open = usbnet_open,
.ndo_stop = usbnet_stop,
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = asix_ioctl,
.ndo_set_multicast_list = ax88172_set_multicast,
};
static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
{
int ret = 0;
u8 buf[ETH_ALEN];
int i;
unsigned long gpio_bits = dev->driver_info->data;
struct asix_data *data = (struct asix_data *)&dev->data;
data->eeprom_len = AX88172_EEPROM_LEN;
usbnet_get_endpoints(dev,intf);
/* Toggle the GPIOs in a manufacturer/model specific way */
for (i = 2; i >= 0; i--) {
if ((ret = asix_write_cmd(dev, AX_CMD_WRITE_GPIOS,
(gpio_bits >> (i * 8)) & 0xff, 0, 0,
NULL)) < 0)
goto out;
msleep(5);
}
if ((ret = asix_write_rx_ctl(dev, 0x80)) < 0)
goto out;
/* Get the MAC address */
if ((ret = asix_read_cmd(dev, AX88172_CMD_READ_NODE_ID,
0, 0, ETH_ALEN, buf)) < 0) {
dbg("read AX_CMD_READ_NODE_ID failed: %d", ret);
goto out;
}
memcpy(dev->net->dev_addr, buf, ETH_ALEN);
/* Initialize MII structure */
dev->mii.dev = dev->net;
dev->mii.mdio_read = asix_mdio_read;
dev->mii.mdio_write = asix_mdio_write;
dev->mii.phy_id_mask = 0x3f;
dev->mii.reg_num_mask = 0x1f;
dev->mii.phy_id = asix_get_phy_addr(dev);
dev->net->netdev_ops = &ax88172_netdev_ops;
dev->net->ethtool_ops = &ax88172_ethtool_ops;
asix_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET);
asix_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
mii_nway_restart(&dev->mii);
return 0;
out:
return ret;
}
static const struct ethtool_ops ax88772_ethtool_ops = {
.get_drvinfo = asix_get_drvinfo,
.get_link = asix_get_link,
.get_msglevel = usbnet_get_msglevel,
.set_msglevel = usbnet_set_msglevel,
.get_wol = asix_get_wol,
.set_wol = asix_set_wol,
.get_eeprom_len = asix_get_eeprom_len,
.get_eeprom = asix_get_eeprom,
.get_settings = usbnet_get_settings,
.set_settings = usbnet_set_settings,
.nway_reset = usbnet_nway_reset,
};
static int ax88772_link_reset(struct usbnet *dev)
{
u16 mode;
struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
mii_check_media(&dev->mii, 1, 1);
mii_ethtool_gset(&dev->mii, &ecmd);
mode = AX88772_MEDIUM_DEFAULT;
if (ethtool_cmd_speed(&ecmd) != SPEED_100)
mode &= ~AX_MEDIUM_PS;
if (ecmd.duplex != DUPLEX_FULL)
mode &= ~AX_MEDIUM_FD;
netdev_dbg(dev->net, "ax88772_link_reset() speed: %u duplex: %d setting mode to 0x%04x\n",
ethtool_cmd_speed(&ecmd), ecmd.duplex, mode);
asix_write_medium_mode(dev, mode);
return 0;
}
static const struct net_device_ops ax88772_netdev_ops = {
.ndo_open = usbnet_open,
.ndo_stop = usbnet_stop,
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
.ndo_set_mac_address = asix_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = asix_ioctl,
.ndo_set_multicast_list = asix_set_multicast,
};
static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
{
int ret, embd_phy;
u16 rx_ctl;
struct asix_data *data = (struct asix_data *)&dev->data;
u8 buf[ETH_ALEN];
u32 phyid;
data->eeprom_len = AX88772_EEPROM_LEN;
usbnet_get_endpoints(dev,intf);
if ((ret = asix_write_gpio(dev,
AX_GPIO_RSE | AX_GPIO_GPO_2 | AX_GPIO_GPO2EN, 5)) < 0)
goto out;
/* 0x10 is the phy id of the embedded 10/100 ethernet phy */
embd_phy = ((asix_get_phy_addr(dev) & 0x1f) == 0x10 ? 1 : 0);
if ((ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT,
embd_phy, 0, 0, NULL)) < 0) {
dbg("Select PHY #1 failed: %d", ret);
goto out;
}
if ((ret = asix_sw_reset(dev, AX_SWRESET_IPPD | AX_SWRESET_PRL)) < 0)
goto out;
msleep(150);
if ((ret = asix_sw_reset(dev, AX_SWRESET_CLEAR)) < 0)
goto out;
msleep(150);
if (embd_phy) {
if ((ret = asix_sw_reset(dev, AX_SWRESET_IPRL)) < 0)
goto out;
}
else {
if ((ret = asix_sw_reset(dev, AX_SWRESET_PRTE)) < 0)
goto out;
}
msleep(150);
rx_ctl = asix_read_rx_ctl(dev);
dbg("RX_CTL is 0x%04x after software reset", rx_ctl);
if ((ret = asix_write_rx_ctl(dev, 0x0000)) < 0)
goto out;
rx_ctl = asix_read_rx_ctl(dev);
dbg("RX_CTL is 0x%04x setting to 0x0000", rx_ctl);
/* Get the MAC address */
if ((ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID,
0, 0, ETH_ALEN, buf)) < 0) {
dbg("Failed to read MAC address: %d", ret);
goto out;
}
memcpy(dev->net->dev_addr, buf, ETH_ALEN);
/* Initialize MII structure */
dev->mii.dev = dev->net;
dev->mii.mdio_read = asix_mdio_read;
dev->mii.mdio_write = asix_mdio_write;
dev->mii.phy_id_mask = 0x1f;
dev->mii.reg_num_mask = 0x1f;
dev->mii.phy_id = asix_get_phy_addr(dev);
phyid = asix_get_phyid(dev);
dbg("PHYID=0x%08x", phyid);
if ((ret = asix_sw_reset(dev, AX_SWRESET_PRL)) < 0)
goto out;
msleep(150);
if ((ret = asix_sw_reset(dev, AX_SWRESET_IPRL | AX_SWRESET_PRL)) < 0)
goto out;
msleep(150);
dev->net->netdev_ops = &ax88772_netdev_ops;
dev->net->ethtool_ops = &ax88772_ethtool_ops;
asix_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET);
asix_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
ADVERTISE_ALL | ADVERTISE_CSMA);
mii_nway_restart(&dev->mii);
if ((ret = asix_write_medium_mode(dev, AX88772_MEDIUM_DEFAULT)) < 0)
goto out;
if ((ret = asix_write_cmd(dev, AX_CMD_WRITE_IPG0,
AX88772_IPG0_DEFAULT | AX88772_IPG1_DEFAULT,
AX88772_IPG2_DEFAULT, 0, NULL)) < 0) {
dbg("Write IPG,IPG1,IPG2 failed: %d", ret);
goto out;
}
/* Set RX_CTL to default values with 2k buffer, and enable cactus */
if ((ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL)) < 0)
goto out;
rx_ctl = asix_read_rx_ctl(dev);
dbg("RX_CTL is 0x%04x after all initializations", rx_ctl);
rx_ctl = asix_read_medium_status(dev);
dbg("Medium Status is 0x%04x after all initializations", rx_ctl);
/* Asix framing packs multiple eth frames into a 2K usb bulk transfer */
if (dev->driver_info->flags & FLAG_FRAMING_AX) {
/* hard_mtu is still the default - the device does not support
jumbo eth frames */
dev->rx_urb_size = 2048;
}
return 0;
out:
return ret;
}
static struct ethtool_ops ax88178_ethtool_ops = {
.get_drvinfo = asix_get_drvinfo,
.get_link = asix_get_link,
.get_msglevel = usbnet_get_msglevel,
.set_msglevel = usbnet_set_msglevel,
.get_wol = asix_get_wol,
.set_wol = asix_set_wol,
.get_eeprom_len = asix_get_eeprom_len,
.get_eeprom = asix_get_eeprom,
.get_settings = usbnet_get_settings,
.set_settings = usbnet_set_settings,
.nway_reset = usbnet_nway_reset,
};
static int marvell_phy_init(struct usbnet *dev)
{
struct asix_data *data = (struct asix_data *)&dev->data;
u16 reg;
netdev_dbg(dev->net, "marvell_phy_init()\n");
reg = asix_mdio_read(dev->net, dev->mii.phy_id, MII_MARVELL_STATUS);
netdev_dbg(dev->net, "MII_MARVELL_STATUS = 0x%04x\n", reg);
asix_mdio_write(dev->net, dev->mii.phy_id, MII_MARVELL_CTRL,
MARVELL_CTRL_RXDELAY | MARVELL_CTRL_TXDELAY);
if (data->ledmode) {
reg = asix_mdio_read(dev->net, dev->mii.phy_id,
MII_MARVELL_LED_CTRL);
netdev_dbg(dev->net, "MII_MARVELL_LED_CTRL (1) = 0x%04x\n", reg);
reg &= 0xf8ff;
reg |= (1 + 0x0100);
asix_mdio_write(dev->net, dev->mii.phy_id,
MII_MARVELL_LED_CTRL, reg);
reg = asix_mdio_read(dev->net, dev->mii.phy_id,
MII_MARVELL_LED_CTRL);
netdev_dbg(dev->net, "MII_MARVELL_LED_CTRL (2) = 0x%04x\n", reg);
reg &= 0xfc0f;
}
return 0;
}
static int marvell_led_status(struct usbnet *dev, u16 speed)
{
u16 reg = asix_mdio_read(dev->net, dev->mii.phy_id, MARVELL_LED_MANUAL);
netdev_dbg(dev->net, "marvell_led_status() read 0x%04x\n", reg);
/* Clear out the center LED bits - 0x03F0 */
reg &= 0xfc0f;
switch (speed) {
case SPEED_1000:
reg |= 0x03e0;
break;
case SPEED_100:
reg |= 0x03b0;
break;
default:
reg |= 0x02f0;
}
netdev_dbg(dev->net, "marvell_led_status() writing 0x%04x\n", reg);
asix_mdio_write(dev->net, dev->mii.phy_id, MARVELL_LED_MANUAL, reg);
return 0;
}
static int ax88178_link_reset(struct usbnet *dev)
{
u16 mode;
struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
struct asix_data *data = (struct asix_data *)&dev->data;
u32 speed;
netdev_dbg(dev->net, "ax88178_link_reset()\n");
mii_check_media(&dev->mii, 1, 1);
mii_ethtool_gset(&dev->mii, &ecmd);
mode = AX88178_MEDIUM_DEFAULT;
speed = ethtool_cmd_speed(&ecmd);
if (speed == SPEED_1000)
mode |= AX_MEDIUM_GM;
else if (speed == SPEED_100)
mode |= AX_MEDIUM_PS;
else
mode &= ~(AX_MEDIUM_PS | AX_MEDIUM_GM);
mode |= AX_MEDIUM_ENCK;
if (ecmd.duplex == DUPLEX_FULL)
mode |= AX_MEDIUM_FD;
else
mode &= ~AX_MEDIUM_FD;
netdev_dbg(dev->net, "ax88178_link_reset() speed: %u duplex: %d setting mode to 0x%04x\n",
speed, ecmd.duplex, mode);
asix_write_medium_mode(dev, mode);
if (data->phymode == PHY_MODE_MARVELL && data->ledmode)
marvell_led_status(dev, speed);
return 0;
}
static void ax88178_set_mfb(struct usbnet *dev)
{
u16 mfb = AX_RX_CTL_MFB_16384;
u16 rxctl;
u16 medium;
int old_rx_urb_size = dev->rx_urb_size;
if (dev->hard_mtu < 2048) {
dev->rx_urb_size = 2048;
mfb = AX_RX_CTL_MFB_2048;
} else if (dev->hard_mtu < 4096) {
dev->rx_urb_size = 4096;
mfb = AX_RX_CTL_MFB_4096;
} else if (dev->hard_mtu < 8192) {
dev->rx_urb_size = 8192;
mfb = AX_RX_CTL_MFB_8192;
} else if (dev->hard_mtu < 16384) {
dev->rx_urb_size = 16384;
mfb = AX_RX_CTL_MFB_16384;
}
rxctl = asix_read_rx_ctl(dev);
asix_write_rx_ctl(dev, (rxctl & ~AX_RX_CTL_MFB_16384) | mfb);
medium = asix_read_medium_status(dev);
if (dev->net->mtu > 1500)
medium |= AX_MEDIUM_JFE;
else
medium &= ~AX_MEDIUM_JFE;
asix_write_medium_mode(dev, medium);
if (dev->rx_urb_size > old_rx_urb_size)
usbnet_unlink_rx_urbs(dev);
}
static int ax88178_change_mtu(struct net_device *net, int new_mtu)
{
struct usbnet *dev = netdev_priv(net);
int ll_mtu = new_mtu + net->hard_header_len + 4;
netdev_dbg(dev->net, "ax88178_change_mtu() new_mtu=%d\n", new_mtu);
if (new_mtu <= 0 || ll_mtu > 16384)
return -EINVAL;
if ((ll_mtu % dev->maxpacket) == 0)
return -EDOM;
net->mtu = new_mtu;
dev->hard_mtu = net->mtu + net->hard_header_len;
ax88178_set_mfb(dev);
return 0;
}
static const struct net_device_ops ax88178_netdev_ops = {
.ndo_open = usbnet_open,
.ndo_stop = usbnet_stop,
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_set_mac_address = asix_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_multicast_list = asix_set_multicast,
.ndo_do_ioctl = asix_ioctl,
.ndo_change_mtu = ax88178_change_mtu,
};
static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
{
struct asix_data *data = (struct asix_data *)&dev->data;
int ret;
u8 buf[ETH_ALEN];
__le16 eeprom;
u8 status;
int gpio0 = 0;
u32 phyid;
usbnet_get_endpoints(dev,intf);
asix_read_cmd(dev, AX_CMD_READ_GPIOS, 0, 0, 1, &status);
dbg("GPIO Status: 0x%04x", status);
asix_write_cmd(dev, AX_CMD_WRITE_ENABLE, 0, 0, 0, NULL);
asix_read_cmd(dev, AX_CMD_READ_EEPROM, 0x0017, 0, 2, &eeprom);
asix_write_cmd(dev, AX_CMD_WRITE_DISABLE, 0, 0, 0, NULL);
dbg("EEPROM index 0x17 is 0x%04x", eeprom);
if (eeprom == cpu_to_le16(0xffff)) {
data->phymode = PHY_MODE_MARVELL;
data->ledmode = 0;
gpio0 = 1;
} else {
data->phymode = le16_to_cpu(eeprom) & 7;
data->ledmode = le16_to_cpu(eeprom) >> 8;
gpio0 = (le16_to_cpu(eeprom) & 0x80) ? 0 : 1;
}
dbg("GPIO0: %d, PhyMode: %d", gpio0, data->phymode);
asix_write_gpio(dev, AX_GPIO_RSE | AX_GPIO_GPO_1 | AX_GPIO_GPO1EN, 40);
if ((le16_to_cpu(eeprom) >> 8) != 1) {
asix_write_gpio(dev, 0x003c, 30);
asix_write_gpio(dev, 0x001c, 300);
asix_write_gpio(dev, 0x003c, 30);
} else {
dbg("gpio phymode == 1 path");
asix_write_gpio(dev, AX_GPIO_GPO1EN, 30);
asix_write_gpio(dev, AX_GPIO_GPO1EN | AX_GPIO_GPO_1, 30);
}
asix_sw_reset(dev, 0);
msleep(150);
asix_sw_reset(dev, AX_SWRESET_PRL | AX_SWRESET_IPPD);
msleep(150);
asix_write_rx_ctl(dev, 0);
/* Get the MAC address */
if ((ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID,
0, 0, ETH_ALEN, buf)) < 0) {
dbg("Failed to read MAC address: %d", ret);
goto out;
}
memcpy(dev->net->dev_addr, buf, ETH_ALEN);
/* Initialize MII structure */
dev->mii.dev = dev->net;
dev->mii.mdio_read = asix_mdio_read;
dev->mii.mdio_write = asix_mdio_write;
dev->mii.phy_id_mask = 0x1f;
dev->mii.reg_num_mask = 0xff;
dev->mii.supports_gmii = 1;
dev->mii.phy_id = asix_get_phy_addr(dev);
dev->net->netdev_ops = &ax88178_netdev_ops;
dev->net->ethtool_ops = &ax88178_ethtool_ops;
phyid = asix_get_phyid(dev);
dbg("PHYID=0x%08x", phyid);
if (data->phymode == PHY_MODE_MARVELL) {
marvell_phy_init(dev);
msleep(60);
}
asix_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR,
BMCR_RESET | BMCR_ANENABLE);
asix_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
asix_mdio_write(dev->net, dev->mii.phy_id, MII_CTRL1000,
ADVERTISE_1000FULL);
mii_nway_restart(&dev->mii);
if ((ret = asix_write_medium_mode(dev, AX88178_MEDIUM_DEFAULT)) < 0)
goto out;
if ((ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL)) < 0)
goto out;
/* Asix framing packs multiple eth frames into a 2K usb bulk transfer */
if (dev->driver_info->flags & FLAG_FRAMING_AX) {
/* hard_mtu is still the default - the device does not support
jumbo eth frames */
dev->rx_urb_size = 2048;
}
return 0;
out:
return ret;
}
static const struct driver_info ax8817x_info = {
.description = "ASIX AX8817x USB 2.0 Ethernet",
.bind = ax88172_bind,
.status = asix_status,
.link_reset = ax88172_link_reset,
.reset = ax88172_link_reset,
.flags = FLAG_ETHER | FLAG_LINK_INTR,
.data = 0x00130103,
};
static const struct driver_info dlink_dub_e100_info = {
.description = "DLink DUB-E100 USB Ethernet",
.bind = ax88172_bind,
.status = asix_status,
.link_reset = ax88172_link_reset,
.reset = ax88172_link_reset,
.flags = FLAG_ETHER | FLAG_LINK_INTR,
.data = 0x009f9d9f,
};
static const struct driver_info netgear_fa120_info = {
.description = "Netgear FA-120 USB Ethernet",
.bind = ax88172_bind,
.status = asix_status,
.link_reset = ax88172_link_reset,
.reset = ax88172_link_reset,
.flags = FLAG_ETHER | FLAG_LINK_INTR,
.data = 0x00130103,
};
static const struct driver_info hawking_uf200_info = {
.description = "Hawking UF200 USB Ethernet",
.bind = ax88172_bind,
.status = asix_status,
.link_reset = ax88172_link_reset,
.reset = ax88172_link_reset,
.flags = FLAG_ETHER | FLAG_LINK_INTR,
.data = 0x001f1d1f,
};
static const struct driver_info ax88772_info = {
.description = "ASIX AX88772 USB 2.0 Ethernet",
.bind = ax88772_bind,
.status = asix_status,
.link_reset = ax88772_link_reset,
.reset = ax88772_link_reset,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR,
.rx_fixup = asix_rx_fixup,
.tx_fixup = asix_tx_fixup,
};
static const struct driver_info ax88178_info = {
.description = "ASIX AX88178 USB 2.0 Ethernet",
.bind = ax88178_bind,
.status = asix_status,
.link_reset = ax88178_link_reset,
.reset = ax88178_link_reset,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR,
.rx_fixup = asix_rx_fixup,
.tx_fixup = asix_tx_fixup,
};
static const struct usb_device_id products [] = {
{
// Linksys USB200M
USB_DEVICE (0x077b, 0x2226),
.driver_info = (unsigned long) &ax8817x_info,
}, {
// Netgear FA120
USB_DEVICE (0x0846, 0x1040),
.driver_info = (unsigned long) &netgear_fa120_info,
}, {
// DLink DUB-E100
USB_DEVICE (0x2001, 0x1a00),
.driver_info = (unsigned long) &dlink_dub_e100_info,
}, {
// Intellinet, ST Lab USB Ethernet
USB_DEVICE (0x0b95, 0x1720),
.driver_info = (unsigned long) &ax8817x_info,
}, {
// Hawking UF200, TrendNet TU2-ET100
USB_DEVICE (0x07b8, 0x420a),
.driver_info = (unsigned long) &hawking_uf200_info,
}, {
// Billionton Systems, USB2AR
USB_DEVICE (0x08dd, 0x90ff),
.driver_info = (unsigned long) &ax8817x_info,
}, {
// ATEN UC210T
USB_DEVICE (0x0557, 0x2009),
.driver_info = (unsigned long) &ax8817x_info,
}, {
// Buffalo LUA-U2-KTX
USB_DEVICE (0x0411, 0x003d),
.driver_info = (unsigned long) &ax8817x_info,
}, {
// Buffalo LUA-U2-GT 10/100/1000
USB_DEVICE (0x0411, 0x006e),
.driver_info = (unsigned long) &ax88178_info,
}, {
// Sitecom LN-029 "USB 2.0 10/100 Ethernet adapter"
USB_DEVICE (0x6189, 0x182d),
.driver_info = (unsigned long) &ax8817x_info,
}, {
// corega FEther USB2-TX
USB_DEVICE (0x07aa, 0x0017),
.driver_info = (unsigned long) &ax8817x_info,
}, {
// Surecom EP-1427X-2
USB_DEVICE (0x1189, 0x0893),
.driver_info = (unsigned long) &ax8817x_info,
}, {
// goodway corp usb gwusb2e
USB_DEVICE (0x1631, 0x6200),
.driver_info = (unsigned long) &ax8817x_info,
}, {
// JVC MP-PRX1 Port Replicator
USB_DEVICE (0x04f1, 0x3008),
.driver_info = (unsigned long) &ax8817x_info,
}, {
// ASIX AX88772B 10/100
USB_DEVICE (0x0b95, 0x772b),
.driver_info = (unsigned long) &ax88772_info,
}, {
// ASIX AX88772 10/100
USB_DEVICE (0x0b95, 0x7720),
.driver_info = (unsigned long) &ax88772_info,
}, {
// ASIX AX88178 10/100/1000
USB_DEVICE (0x0b95, 0x1780),
.driver_info = (unsigned long) &ax88178_info,
}, {
// Logitec LAN-GTJ/U2A
USB_DEVICE (0x0789, 0x0160),
.driver_info = (unsigned long) &ax88178_info,
}, {
// Linksys USB200M Rev 2
USB_DEVICE (0x13b1, 0x0018),
.driver_info = (unsigned long) &ax88772_info,
}, {
// 0Q0 cable ethernet
USB_DEVICE (0x1557, 0x7720),
.driver_info = (unsigned long) &ax88772_info,
}, {
// DLink DUB-E100 H/W Ver B1
USB_DEVICE (0x07d1, 0x3c05),
.driver_info = (unsigned long) &ax88772_info,
}, {
// DLink DUB-E100 H/W Ver B1 Alternate
USB_DEVICE (0x2001, 0x3c05),
.driver_info = (unsigned long) &ax88772_info,
}, {
// Linksys USB1000
USB_DEVICE (0x1737, 0x0039),
.driver_info = (unsigned long) &ax88178_info,
}, {
// IO-DATA ETG-US2
USB_DEVICE (0x04bb, 0x0930),
.driver_info = (unsigned long) &ax88178_info,
}, {
// Belkin F5D5055
USB_DEVICE(0x050d, 0x5055),
.driver_info = (unsigned long) &ax88178_info,
}, {
// Apple USB Ethernet Adapter
USB_DEVICE(0x05ac, 0x1402),
.driver_info = (unsigned long) &ax88772_info,
}, {
// Cables-to-Go USB Ethernet Adapter
USB_DEVICE(0x0b95, 0x772a),
.driver_info = (unsigned long) &ax88772_info,
}, {
// ABOCOM for pci
USB_DEVICE(0x14ea, 0xab11),
.driver_info = (unsigned long) &ax88178_info,
}, {
// ASIX 88772a
USB_DEVICE(0x0db0, 0xa877),
.driver_info = (unsigned long) &ax88772_info,
},
{ }, // END
};
MODULE_DEVICE_TABLE(usb, products);
static struct usb_driver asix_driver = {
.name = "asix",
.id_table = products,
.probe = usbnet_probe,
.suspend = usbnet_suspend,
.resume = usbnet_resume,
.disconnect = usbnet_disconnect,
.supports_autosuspend = 1,
};
static int __init asix_init(void)
{
return usb_register(&asix_driver);
}
module_init(asix_init);
static void __exit asix_exit(void)
{
usb_deregister(&asix_driver);
}
module_exit(asix_exit);
MODULE_AUTHOR("David Hollis");
MODULE_DESCRIPTION("ASIX AX8817X based USB 2.0 Ethernet Devices");
MODULE_LICENSE("GPL");
| gpl-2.0 |
impl/rbppc-linux | drivers/gpu/drm/tilcdc/tilcdc_panel.c | 482 | 11139 | /*
* Copyright (C) 2012 Texas Instruments
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/pinctrl/pinmux.h>
#include <linux/pinctrl/consumer.h>
#include <linux/backlight.h>
#include <video/display_timing.h>
#include <video/of_display_timing.h>
#include <video/videomode.h>
#include "tilcdc_drv.h"
struct panel_module {
struct tilcdc_module base;
struct tilcdc_panel_info *info;
struct display_timings *timings;
struct backlight_device *backlight;
};
#define to_panel_module(x) container_of(x, struct panel_module, base)
/*
* Encoder:
*/
struct panel_encoder {
struct drm_encoder base;
struct panel_module *mod;
};
#define to_panel_encoder(x) container_of(x, struct panel_encoder, base)
static void panel_encoder_destroy(struct drm_encoder *encoder)
{
struct panel_encoder *panel_encoder = to_panel_encoder(encoder);
drm_encoder_cleanup(encoder);
kfree(panel_encoder);
}
static void panel_encoder_dpms(struct drm_encoder *encoder, int mode)
{
struct panel_encoder *panel_encoder = to_panel_encoder(encoder);
struct backlight_device *backlight = panel_encoder->mod->backlight;
if (!backlight)
return;
backlight->props.power = mode == DRM_MODE_DPMS_ON
? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
backlight_update_status(backlight);
}
static bool panel_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
/* nothing needed */
return true;
}
static void panel_encoder_prepare(struct drm_encoder *encoder)
{
struct panel_encoder *panel_encoder = to_panel_encoder(encoder);
panel_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
tilcdc_crtc_set_panel_info(encoder->crtc, panel_encoder->mod->info);
}
static void panel_encoder_commit(struct drm_encoder *encoder)
{
panel_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
}
static void panel_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
/* nothing needed */
}
static const struct drm_encoder_funcs panel_encoder_funcs = {
.destroy = panel_encoder_destroy,
};
static const struct drm_encoder_helper_funcs panel_encoder_helper_funcs = {
.dpms = panel_encoder_dpms,
.mode_fixup = panel_encoder_mode_fixup,
.prepare = panel_encoder_prepare,
.commit = panel_encoder_commit,
.mode_set = panel_encoder_mode_set,
};
static struct drm_encoder *panel_encoder_create(struct drm_device *dev,
struct panel_module *mod)
{
struct panel_encoder *panel_encoder;
struct drm_encoder *encoder;
int ret;
panel_encoder = kzalloc(sizeof(*panel_encoder), GFP_KERNEL);
if (!panel_encoder) {
dev_err(dev->dev, "allocation failed\n");
return NULL;
}
panel_encoder->mod = mod;
encoder = &panel_encoder->base;
encoder->possible_crtcs = 1;
ret = drm_encoder_init(dev, encoder, &panel_encoder_funcs,
DRM_MODE_ENCODER_LVDS);
if (ret < 0)
goto fail;
drm_encoder_helper_add(encoder, &panel_encoder_helper_funcs);
return encoder;
fail:
panel_encoder_destroy(encoder);
return NULL;
}
/*
* Connector:
*/
struct panel_connector {
struct drm_connector base;
struct drm_encoder *encoder; /* our connected encoder */
struct panel_module *mod;
};
#define to_panel_connector(x) container_of(x, struct panel_connector, base)
static void panel_connector_destroy(struct drm_connector *connector)
{
struct panel_connector *panel_connector = to_panel_connector(connector);
drm_connector_cleanup(connector);
kfree(panel_connector);
}
static enum drm_connector_status panel_connector_detect(
struct drm_connector *connector,
bool force)
{
return connector_status_connected;
}
static int panel_connector_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct panel_connector *panel_connector = to_panel_connector(connector);
struct display_timings *timings = panel_connector->mod->timings;
int i;
for (i = 0; i < timings->num_timings; i++) {
struct drm_display_mode *mode = drm_mode_create(dev);
struct videomode vm;
if (videomode_from_timings(timings, &vm, i))
break;
drm_display_mode_from_videomode(&vm, mode);
mode->type = DRM_MODE_TYPE_DRIVER;
if (timings->native_mode == i)
mode->type |= DRM_MODE_TYPE_PREFERRED;
drm_mode_set_name(mode);
drm_mode_probed_add(connector, mode);
}
return i;
}
static int panel_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct tilcdc_drm_private *priv = connector->dev->dev_private;
/* our only constraints are what the crtc can generate: */
return tilcdc_crtc_mode_valid(priv->crtc, mode);
}
static struct drm_encoder *panel_connector_best_encoder(
struct drm_connector *connector)
{
struct panel_connector *panel_connector = to_panel_connector(connector);
return panel_connector->encoder;
}
static const struct drm_connector_funcs panel_connector_funcs = {
.destroy = panel_connector_destroy,
.dpms = drm_helper_connector_dpms,
.detect = panel_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
};
static const struct drm_connector_helper_funcs panel_connector_helper_funcs = {
.get_modes = panel_connector_get_modes,
.mode_valid = panel_connector_mode_valid,
.best_encoder = panel_connector_best_encoder,
};
static struct drm_connector *panel_connector_create(struct drm_device *dev,
struct panel_module *mod, struct drm_encoder *encoder)
{
struct panel_connector *panel_connector;
struct drm_connector *connector;
int ret;
panel_connector = kzalloc(sizeof(*panel_connector), GFP_KERNEL);
if (!panel_connector) {
dev_err(dev->dev, "allocation failed\n");
return NULL;
}
panel_connector->encoder = encoder;
panel_connector->mod = mod;
connector = &panel_connector->base;
drm_connector_init(dev, connector, &panel_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
drm_connector_helper_add(connector, &panel_connector_helper_funcs);
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
ret = drm_mode_connector_attach_encoder(connector, encoder);
if (ret)
goto fail;
drm_sysfs_connector_add(connector);
return connector;
fail:
panel_connector_destroy(connector);
return NULL;
}
/*
* Module:
*/
static int panel_modeset_init(struct tilcdc_module *mod, struct drm_device *dev)
{
struct panel_module *panel_mod = to_panel_module(mod);
struct tilcdc_drm_private *priv = dev->dev_private;
struct drm_encoder *encoder;
struct drm_connector *connector;
encoder = panel_encoder_create(dev, panel_mod);
if (!encoder)
return -ENOMEM;
connector = panel_connector_create(dev, panel_mod, encoder);
if (!connector)
return -ENOMEM;
priv->encoders[priv->num_encoders++] = encoder;
priv->connectors[priv->num_connectors++] = connector;
return 0;
}
static void panel_destroy(struct tilcdc_module *mod)
{
struct panel_module *panel_mod = to_panel_module(mod);
if (panel_mod->timings) {
display_timings_release(panel_mod->timings);
kfree(panel_mod->timings);
}
tilcdc_module_cleanup(mod);
kfree(panel_mod->info);
kfree(panel_mod);
}
static const struct tilcdc_module_ops panel_module_ops = {
.modeset_init = panel_modeset_init,
.destroy = panel_destroy,
};
/*
* Device:
*/
/* maybe move this somewhere common if it is needed by other outputs? */
static struct tilcdc_panel_info *of_get_panel_info(struct device_node *np)
{
struct device_node *info_np;
struct tilcdc_panel_info *info;
int ret = 0;
if (!np) {
pr_err("%s: no devicenode given\n", __func__);
return NULL;
}
info_np = of_get_child_by_name(np, "panel-info");
if (!info_np) {
pr_err("%s: could not find panel-info node\n", __func__);
return NULL;
}
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
pr_err("%s: allocation failed\n", __func__);
return NULL;
}
ret |= of_property_read_u32(info_np, "ac-bias", &info->ac_bias);
ret |= of_property_read_u32(info_np, "ac-bias-intrpt", &info->ac_bias_intrpt);
ret |= of_property_read_u32(info_np, "dma-burst-sz", &info->dma_burst_sz);
ret |= of_property_read_u32(info_np, "bpp", &info->bpp);
ret |= of_property_read_u32(info_np, "fdd", &info->fdd);
ret |= of_property_read_u32(info_np, "sync-edge", &info->sync_edge);
ret |= of_property_read_u32(info_np, "sync-ctrl", &info->sync_ctrl);
ret |= of_property_read_u32(info_np, "raster-order", &info->raster_order);
ret |= of_property_read_u32(info_np, "fifo-th", &info->fifo_th);
/* optional: */
info->tft_alt_mode = of_property_read_bool(info_np, "tft-alt-mode");
info->invert_pxl_clk = of_property_read_bool(info_np, "invert-pxl-clk");
if (ret) {
pr_err("%s: error reading panel-info properties\n", __func__);
kfree(info);
return NULL;
}
return info;
}
static struct of_device_id panel_of_match[];
static int panel_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct panel_module *panel_mod;
struct tilcdc_module *mod;
struct pinctrl *pinctrl;
int ret = -EINVAL;
/* bail out early if no DT data: */
if (!node) {
dev_err(&pdev->dev, "device-tree data is missing\n");
return -ENXIO;
}
panel_mod = kzalloc(sizeof(*panel_mod), GFP_KERNEL);
if (!panel_mod)
return -ENOMEM;
mod = &panel_mod->base;
tilcdc_module_init(mod, "panel", &panel_module_ops);
pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
if (IS_ERR(pinctrl))
dev_warn(&pdev->dev, "pins are not configured\n");
panel_mod->timings = of_get_display_timings(node);
if (!panel_mod->timings) {
dev_err(&pdev->dev, "could not get panel timings\n");
goto fail;
}
panel_mod->info = of_get_panel_info(node);
if (!panel_mod->info) {
dev_err(&pdev->dev, "could not get panel info\n");
goto fail;
}
mod->preferred_bpp = panel_mod->info->bpp;
panel_mod->backlight = of_find_backlight_by_node(node);
if (panel_mod->backlight)
dev_info(&pdev->dev, "found backlight\n");
return 0;
fail:
panel_destroy(mod);
return ret;
}
static int panel_remove(struct platform_device *pdev)
{
return 0;
}
static struct of_device_id panel_of_match[] = {
{ .compatible = "ti,tilcdc,panel", },
{ },
};
struct platform_driver panel_driver = {
.probe = panel_probe,
.remove = panel_remove,
.driver = {
.owner = THIS_MODULE,
.name = "panel",
.of_match_table = panel_of_match,
},
};
int __init tilcdc_panel_init(void)
{
return platform_driver_register(&panel_driver);
}
void __exit tilcdc_panel_fini(void)
{
platform_driver_unregister(&panel_driver);
}
| gpl-2.0 |
Chaosz-X/flyer_7x30_xkics_kernel | arch/um/drivers/hostaudio_kern.c | 738 | 7312 | /*
* Copyright (C) 2002 Steve Schmidtke
* Licensed under the GPL
*/
#include "linux/fs.h"
#include "linux/module.h"
#include "linux/slab.h"
#include "linux/sound.h"
#include "linux/soundcard.h"
#include "asm/uaccess.h"
#include "init.h"
#include "os.h"
struct hostaudio_state {
int fd;
};
struct hostmixer_state {
int fd;
};
#define HOSTAUDIO_DEV_DSP "/dev/sound/dsp"
#define HOSTAUDIO_DEV_MIXER "/dev/sound/mixer"
/*
* Changed either at boot time or module load time. At boot, this is
* single-threaded; at module load, multiple modules would each have
* their own copy of these variables.
*/
static char *dsp = HOSTAUDIO_DEV_DSP;
static char *mixer = HOSTAUDIO_DEV_MIXER;
#define DSP_HELP \
" This is used to specify the host dsp device to the hostaudio driver.\n" \
" The default is \"" HOSTAUDIO_DEV_DSP "\".\n\n"
#define MIXER_HELP \
" This is used to specify the host mixer device to the hostaudio driver.\n"\
" The default is \"" HOSTAUDIO_DEV_MIXER "\".\n\n"
#ifndef MODULE
static int set_dsp(char *name, int *add)
{
dsp = name;
return 0;
}
__uml_setup("dsp=", set_dsp, "dsp=<dsp device>\n" DSP_HELP);
static int set_mixer(char *name, int *add)
{
mixer = name;
return 0;
}
__uml_setup("mixer=", set_mixer, "mixer=<mixer device>\n" MIXER_HELP);
#else /*MODULE*/
module_param(dsp, charp, 0644);
MODULE_PARM_DESC(dsp, DSP_HELP);
module_param(mixer, charp, 0644);
MODULE_PARM_DESC(mixer, MIXER_HELP);
#endif
/* /dev/dsp file operations */
static ssize_t hostaudio_read(struct file *file, char __user *buffer,
size_t count, loff_t *ppos)
{
struct hostaudio_state *state = file->private_data;
void *kbuf;
int err;
#ifdef DEBUG
printk(KERN_DEBUG "hostaudio: read called, count = %d\n", count);
#endif
kbuf = kmalloc(count, GFP_KERNEL);
if (kbuf == NULL)
return -ENOMEM;
err = os_read_file(state->fd, kbuf, count);
if (err < 0)
goto out;
if (copy_to_user(buffer, kbuf, err))
err = -EFAULT;
out:
kfree(kbuf);
return err;
}
static ssize_t hostaudio_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
struct hostaudio_state *state = file->private_data;
void *kbuf;
int err;
#ifdef DEBUG
printk(KERN_DEBUG "hostaudio: write called, count = %d\n", count);
#endif
kbuf = kmalloc(count, GFP_KERNEL);
if (kbuf == NULL)
return -ENOMEM;
err = -EFAULT;
if (copy_from_user(kbuf, buffer, count))
goto out;
err = os_write_file(state->fd, kbuf, count);
if (err < 0)
goto out;
*ppos += err;
out:
kfree(kbuf);
return err;
}
static unsigned int hostaudio_poll(struct file *file,
struct poll_table_struct *wait)
{
unsigned int mask = 0;
#ifdef DEBUG
printk(KERN_DEBUG "hostaudio: poll called (unimplemented)\n");
#endif
return mask;
}
static long hostaudio_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
struct hostaudio_state *state = file->private_data;
unsigned long data = 0;
int err;
#ifdef DEBUG
printk(KERN_DEBUG "hostaudio: ioctl called, cmd = %u\n", cmd);
#endif
switch(cmd){
case SNDCTL_DSP_SPEED:
case SNDCTL_DSP_STEREO:
case SNDCTL_DSP_GETBLKSIZE:
case SNDCTL_DSP_CHANNELS:
case SNDCTL_DSP_SUBDIVIDE:
case SNDCTL_DSP_SETFRAGMENT:
if (get_user(data, (int __user *) arg))
return -EFAULT;
break;
default:
break;
}
err = os_ioctl_generic(state->fd, cmd, (unsigned long) &data);
switch(cmd){
case SNDCTL_DSP_SPEED:
case SNDCTL_DSP_STEREO:
case SNDCTL_DSP_GETBLKSIZE:
case SNDCTL_DSP_CHANNELS:
case SNDCTL_DSP_SUBDIVIDE:
case SNDCTL_DSP_SETFRAGMENT:
if (put_user(data, (int __user *) arg))
return -EFAULT;
break;
default:
break;
}
return err;
}
static int hostaudio_open(struct inode *inode, struct file *file)
{
struct hostaudio_state *state;
int r = 0, w = 0;
int ret;
#ifdef DEBUG
printk(KERN_DEBUG "hostaudio: open called (host: %s)\n", dsp);
#endif
state = kmalloc(sizeof(struct hostaudio_state), GFP_KERNEL);
if (state == NULL)
return -ENOMEM;
if (file->f_mode & FMODE_READ)
r = 1;
if (file->f_mode & FMODE_WRITE)
w = 1;
ret = os_open_file(dsp, of_set_rw(OPENFLAGS(), r, w), 0);
if (ret < 0) {
kfree(state);
return ret;
}
state->fd = ret;
file->private_data = state;
return 0;
}
static int hostaudio_release(struct inode *inode, struct file *file)
{
struct hostaudio_state *state = file->private_data;
#ifdef DEBUG
printk(KERN_DEBUG "hostaudio: release called\n");
#endif
os_close_file(state->fd);
kfree(state);
return 0;
}
/* /dev/mixer file operations */
static long hostmixer_ioctl_mixdev(struct file *file,
unsigned int cmd, unsigned long arg)
{
struct hostmixer_state *state = file->private_data;
#ifdef DEBUG
printk(KERN_DEBUG "hostmixer: ioctl called\n");
#endif
return os_ioctl_generic(state->fd, cmd, arg);
}
static int hostmixer_open_mixdev(struct inode *inode, struct file *file)
{
struct hostmixer_state *state;
int r = 0, w = 0;
int ret;
#ifdef DEBUG
printk(KERN_DEBUG "hostmixer: open called (host: %s)\n", mixer);
#endif
state = kmalloc(sizeof(struct hostmixer_state), GFP_KERNEL);
if (state == NULL)
return -ENOMEM;
if (file->f_mode & FMODE_READ)
r = 1;
if (file->f_mode & FMODE_WRITE)
w = 1;
ret = os_open_file(mixer, of_set_rw(OPENFLAGS(), r, w), 0);
if (ret < 0) {
printk(KERN_ERR "hostaudio_open_mixdev failed to open '%s', "
"err = %d\n", dsp, -ret);
kfree(state);
return ret;
}
file->private_data = state;
return 0;
}
static int hostmixer_release(struct inode *inode, struct file *file)
{
struct hostmixer_state *state = file->private_data;
#ifdef DEBUG
printk(KERN_DEBUG "hostmixer: release called\n");
#endif
os_close_file(state->fd);
kfree(state);
return 0;
}
/* kernel module operations */
static const struct file_operations hostaudio_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.read = hostaudio_read,
.write = hostaudio_write,
.poll = hostaudio_poll,
.unlocked_ioctl = hostaudio_ioctl,
.mmap = NULL,
.open = hostaudio_open,
.release = hostaudio_release,
};
static const struct file_operations hostmixer_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.unlocked_ioctl = hostmixer_ioctl_mixdev,
.open = hostmixer_open_mixdev,
.release = hostmixer_release,
};
struct {
int dev_audio;
int dev_mixer;
} module_data;
MODULE_AUTHOR("Steve Schmidtke");
MODULE_DESCRIPTION("UML Audio Relay");
MODULE_LICENSE("GPL");
static int __init hostaudio_init_module(void)
{
printk(KERN_INFO "UML Audio Relay (host dsp = %s, host mixer = %s)\n",
dsp, mixer);
module_data.dev_audio = register_sound_dsp(&hostaudio_fops, -1);
if (module_data.dev_audio < 0) {
printk(KERN_ERR "hostaudio: couldn't register DSP device!\n");
return -ENODEV;
}
module_data.dev_mixer = register_sound_mixer(&hostmixer_fops, -1);
if (module_data.dev_mixer < 0) {
printk(KERN_ERR "hostmixer: couldn't register mixer "
"device!\n");
unregister_sound_dsp(module_data.dev_audio);
return -ENODEV;
}
return 0;
}
static void __exit hostaudio_cleanup_module (void)
{
unregister_sound_mixer(module_data.dev_mixer);
unregister_sound_dsp(module_data.dev_audio);
}
module_init(hostaudio_init_module);
module_exit(hostaudio_cleanup_module);
| gpl-2.0 |
BuzzBumbleBee/linux-htc-acevivo | drivers/hwmon/max6650.c | 994 | 20901 | /*
* max6650.c - Part of lm_sensors, Linux kernel modules for hardware
* monitoring.
*
* (C) 2007 by Hans J. Koch <hjk@linutronix.de>
*
* based on code written by John Morris <john.morris@spirentcom.com>
* Copyright (c) 2003 Spirent Communications
* and Claus Gindhart <claus.gindhart@kontron.com>
*
* This module has only been tested with the MAX6650 chip. It should
* also work with the MAX6651. It does not distinguish max6650 and max6651
* chips.
*
* The datasheet was last seen at:
*
* http://pdfserv.maxim-ic.com/en/ds/MAX6650-MAX6651.pdf
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
/*
* Addresses to scan. There are four disjoint possibilities, by pin config.
*/
static const unsigned short normal_i2c[] = {0x1b, 0x1f, 0x48, 0x4b,
I2C_CLIENT_END};
/*
* Insmod parameters
*/
/* fan_voltage: 5=5V fan, 12=12V fan, 0=don't change */
static int fan_voltage;
/* prescaler: Possible values are 1, 2, 4, 8, 16 or 0 for don't change */
static int prescaler;
/* clock: The clock frequency of the chip the driver should assume */
static int clock = 254000;
module_param(fan_voltage, int, S_IRUGO);
module_param(prescaler, int, S_IRUGO);
module_param(clock, int, S_IRUGO);
/*
* MAX 6650/6651 registers
*/
#define MAX6650_REG_SPEED 0x00
#define MAX6650_REG_CONFIG 0x02
#define MAX6650_REG_GPIO_DEF 0x04
#define MAX6650_REG_DAC 0x06
#define MAX6650_REG_ALARM_EN 0x08
#define MAX6650_REG_ALARM 0x0A
#define MAX6650_REG_TACH0 0x0C
#define MAX6650_REG_TACH1 0x0E
#define MAX6650_REG_TACH2 0x10
#define MAX6650_REG_TACH3 0x12
#define MAX6650_REG_GPIO_STAT 0x14
#define MAX6650_REG_COUNT 0x16
/*
* Config register bits
*/
#define MAX6650_CFG_V12 0x08
#define MAX6650_CFG_PRESCALER_MASK 0x07
#define MAX6650_CFG_PRESCALER_2 0x01
#define MAX6650_CFG_PRESCALER_4 0x02
#define MAX6650_CFG_PRESCALER_8 0x03
#define MAX6650_CFG_PRESCALER_16 0x04
#define MAX6650_CFG_MODE_MASK 0x30
#define MAX6650_CFG_MODE_ON 0x00
#define MAX6650_CFG_MODE_OFF 0x10
#define MAX6650_CFG_MODE_CLOSED_LOOP 0x20
#define MAX6650_CFG_MODE_OPEN_LOOP 0x30
#define MAX6650_COUNT_MASK 0x03
/*
* Alarm status register bits
*/
#define MAX6650_ALRM_MAX 0x01
#define MAX6650_ALRM_MIN 0x02
#define MAX6650_ALRM_TACH 0x04
#define MAX6650_ALRM_GPIO1 0x08
#define MAX6650_ALRM_GPIO2 0x10
/* Minimum and maximum values of the FAN-RPM */
#define FAN_RPM_MIN 240
#define FAN_RPM_MAX 30000
#define DIV_FROM_REG(reg) (1 << (reg & 7))
static int max6650_probe(struct i2c_client *client,
const struct i2c_device_id *id);
static int max6650_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int max6650_init_client(struct i2c_client *client);
static int max6650_remove(struct i2c_client *client);
static struct max6650_data *max6650_update_device(struct device *dev);
/*
* Driver data (common to all clients)
*/
static const struct i2c_device_id max6650_id[] = {
{ "max6650", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, max6650_id);
static struct i2c_driver max6650_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "max6650",
},
.probe = max6650_probe,
.remove = max6650_remove,
.id_table = max6650_id,
.detect = max6650_detect,
.address_list = normal_i2c,
};
/*
* Client data (each client gets its own)
*/
struct max6650_data
{
struct device *hwmon_dev;
struct mutex update_lock;
char valid; /* zero until following fields are valid */
unsigned long last_updated; /* in jiffies */
/* register values */
u8 speed;
u8 config;
u8 tach[4];
u8 count;
u8 dac;
u8 alarm;
};
static ssize_t get_fan(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct max6650_data *data = max6650_update_device(dev);
int rpm;
/*
* Calculation details:
*
* Each tachometer counts over an interval given by the "count"
* register (0.25, 0.5, 1 or 2 seconds). This module assumes
* that the fans produce two pulses per revolution (this seems
* to be the most common).
*/
rpm = ((data->tach[attr->index] * 120) / DIV_FROM_REG(data->count));
return sprintf(buf, "%d\n", rpm);
}
/*
* Set the fan speed to the specified RPM (or read back the RPM setting).
* This works in closed loop mode only. Use pwm1 for open loop speed setting.
*
* The MAX6650/1 will automatically control fan speed when in closed loop
* mode.
*
* Assumptions:
*
* 1) The MAX6650/1 internal 254kHz clock frequency is set correctly. Use
* the clock module parameter if you need to fine tune this.
*
* 2) The prescaler (low three bits of the config register) has already
* been set to an appropriate value. Use the prescaler module parameter
* if your BIOS doesn't initialize the chip properly.
*
* The relevant equations are given on pages 21 and 22 of the datasheet.
*
* From the datasheet, the relevant equation when in regulation is:
*
* [fCLK / (128 x (KTACH + 1))] = 2 x FanSpeed / KSCALE
*
* where:
*
* fCLK is the oscillator frequency (either the 254kHz internal
* oscillator or the externally applied clock)
*
* KTACH is the value in the speed register
*
* FanSpeed is the speed of the fan in rps
*
* KSCALE is the prescaler value (1, 2, 4, 8, or 16)
*
* When reading, we need to solve for FanSpeed. When writing, we need to
* solve for KTACH.
*
* Note: this tachometer is completely separate from the tachometers
* used to measure the fan speeds. Only one fan's speed (fan1) is
* controlled.
*/
static ssize_t get_target(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct max6650_data *data = max6650_update_device(dev);
int kscale, ktach, rpm;
/*
* Use the datasheet equation:
*
* FanSpeed = KSCALE x fCLK / [256 x (KTACH + 1)]
*
* then multiply by 60 to give rpm.
*/
kscale = DIV_FROM_REG(data->config);
ktach = data->speed;
rpm = 60 * kscale * clock / (256 * (ktach + 1));
return sprintf(buf, "%d\n", rpm);
}
static ssize_t set_target(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct max6650_data *data = i2c_get_clientdata(client);
int rpm = simple_strtoul(buf, NULL, 10);
int kscale, ktach;
rpm = SENSORS_LIMIT(rpm, FAN_RPM_MIN, FAN_RPM_MAX);
/*
* Divide the required speed by 60 to get from rpm to rps, then
* use the datasheet equation:
*
* KTACH = [(fCLK x KSCALE) / (256 x FanSpeed)] - 1
*/
mutex_lock(&data->update_lock);
kscale = DIV_FROM_REG(data->config);
ktach = ((clock * kscale) / (256 * rpm / 60)) - 1;
if (ktach < 0)
ktach = 0;
if (ktach > 255)
ktach = 255;
data->speed = ktach;
i2c_smbus_write_byte_data(client, MAX6650_REG_SPEED, data->speed);
mutex_unlock(&data->update_lock);
return count;
}
/*
* Get/set the fan speed in open loop mode using pwm1 sysfs file.
* Speed is given as a relative value from 0 to 255, where 255 is maximum
* speed. Note that this is done by writing directly to the chip's DAC,
* it won't change the closed loop speed set by fan1_target.
* Also note that due to rounding errors it is possible that you don't read
* back exactly the value you have set.
*/
static ssize_t get_pwm(struct device *dev, struct device_attribute *devattr,
char *buf)
{
int pwm;
struct max6650_data *data = max6650_update_device(dev);
/* Useful range for dac is 0-180 for 12V fans and 0-76 for 5V fans.
Lower DAC values mean higher speeds. */
if (data->config & MAX6650_CFG_V12)
pwm = 255 - (255 * (int)data->dac)/180;
else
pwm = 255 - (255 * (int)data->dac)/76;
if (pwm < 0)
pwm = 0;
return sprintf(buf, "%d\n", pwm);
}
static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct max6650_data *data = i2c_get_clientdata(client);
int pwm = simple_strtoul(buf, NULL, 10);
pwm = SENSORS_LIMIT(pwm, 0, 255);
mutex_lock(&data->update_lock);
if (data->config & MAX6650_CFG_V12)
data->dac = 180 - (180 * pwm)/255;
else
data->dac = 76 - (76 * pwm)/255;
i2c_smbus_write_byte_data(client, MAX6650_REG_DAC, data->dac);
mutex_unlock(&data->update_lock);
return count;
}
/*
* Get/Set controller mode:
* Possible values:
* 0 = Fan always on
* 1 = Open loop, Voltage is set according to speed, not regulated.
* 2 = Closed loop, RPM for all fans regulated by fan1 tachometer
*/
static ssize_t get_enable(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct max6650_data *data = max6650_update_device(dev);
int mode = (data->config & MAX6650_CFG_MODE_MASK) >> 4;
int sysfs_modes[4] = {0, 1, 2, 1};
return sprintf(buf, "%d\n", sysfs_modes[mode]);
}
static ssize_t set_enable(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct max6650_data *data = i2c_get_clientdata(client);
int mode = simple_strtoul(buf, NULL, 10);
int max6650_modes[3] = {0, 3, 2};
if ((mode < 0)||(mode > 2)) {
dev_err(&client->dev,
"illegal value for pwm1_enable (%d)\n", mode);
return -EINVAL;
}
mutex_lock(&data->update_lock);
data->config = i2c_smbus_read_byte_data(client, MAX6650_REG_CONFIG);
data->config = (data->config & ~MAX6650_CFG_MODE_MASK)
| (max6650_modes[mode] << 4);
i2c_smbus_write_byte_data(client, MAX6650_REG_CONFIG, data->config);
mutex_unlock(&data->update_lock);
return count;
}
/*
* Read/write functions for fan1_div sysfs file. The MAX6650 has no such
* divider. We handle this by converting between divider and counttime:
*
* (counttime == k) <==> (divider == 2^k), k = 0, 1, 2, or 3
*
* Lower values of k allow to connect a faster fan without the risk of
* counter overflow. The price is lower resolution. You can also set counttime
* using the module parameter. Note that the module parameter "prescaler" also
* influences the behaviour. Unfortunately, there's no sysfs attribute
* defined for that. See the data sheet for details.
*/
static ssize_t get_div(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct max6650_data *data = max6650_update_device(dev);
return sprintf(buf, "%d\n", DIV_FROM_REG(data->count));
}
static ssize_t set_div(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct max6650_data *data = i2c_get_clientdata(client);
int div = simple_strtoul(buf, NULL, 10);
mutex_lock(&data->update_lock);
switch (div) {
case 1:
data->count = 0;
break;
case 2:
data->count = 1;
break;
case 4:
data->count = 2;
break;
case 8:
data->count = 3;
break;
default:
mutex_unlock(&data->update_lock);
dev_err(&client->dev,
"illegal value for fan divider (%d)\n", div);
return -EINVAL;
}
i2c_smbus_write_byte_data(client, MAX6650_REG_COUNT, data->count);
mutex_unlock(&data->update_lock);
return count;
}
/*
* Get alarm stati:
* Possible values:
* 0 = no alarm
* 1 = alarm
*/
static ssize_t get_alarm(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct max6650_data *data = max6650_update_device(dev);
struct i2c_client *client = to_i2c_client(dev);
int alarm = 0;
if (data->alarm & attr->index) {
mutex_lock(&data->update_lock);
alarm = 1;
data->alarm &= ~attr->index;
data->alarm |= i2c_smbus_read_byte_data(client,
MAX6650_REG_ALARM);
mutex_unlock(&data->update_lock);
}
return sprintf(buf, "%d\n", alarm);
}
static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, get_fan, NULL, 0);
static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, get_fan, NULL, 1);
static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, get_fan, NULL, 2);
static SENSOR_DEVICE_ATTR(fan4_input, S_IRUGO, get_fan, NULL, 3);
static DEVICE_ATTR(fan1_target, S_IWUSR | S_IRUGO, get_target, set_target);
static DEVICE_ATTR(fan1_div, S_IWUSR | S_IRUGO, get_div, set_div);
static DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, get_enable, set_enable);
static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, get_pwm, set_pwm);
static SENSOR_DEVICE_ATTR(fan1_max_alarm, S_IRUGO, get_alarm, NULL,
MAX6650_ALRM_MAX);
static SENSOR_DEVICE_ATTR(fan1_min_alarm, S_IRUGO, get_alarm, NULL,
MAX6650_ALRM_MIN);
static SENSOR_DEVICE_ATTR(fan1_fault, S_IRUGO, get_alarm, NULL,
MAX6650_ALRM_TACH);
static SENSOR_DEVICE_ATTR(gpio1_alarm, S_IRUGO, get_alarm, NULL,
MAX6650_ALRM_GPIO1);
static SENSOR_DEVICE_ATTR(gpio2_alarm, S_IRUGO, get_alarm, NULL,
MAX6650_ALRM_GPIO2);
static mode_t max6650_attrs_visible(struct kobject *kobj, struct attribute *a,
int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct i2c_client *client = to_i2c_client(dev);
u8 alarm_en = i2c_smbus_read_byte_data(client, MAX6650_REG_ALARM_EN);
struct device_attribute *devattr;
/*
* Hide the alarms that have not been enabled by the firmware
*/
devattr = container_of(a, struct device_attribute, attr);
if (devattr == &sensor_dev_attr_fan1_max_alarm.dev_attr
|| devattr == &sensor_dev_attr_fan1_min_alarm.dev_attr
|| devattr == &sensor_dev_attr_fan1_fault.dev_attr
|| devattr == &sensor_dev_attr_gpio1_alarm.dev_attr
|| devattr == &sensor_dev_attr_gpio2_alarm.dev_attr) {
if (!(alarm_en & to_sensor_dev_attr(devattr)->index))
return 0;
}
return a->mode;
}
static struct attribute *max6650_attrs[] = {
&sensor_dev_attr_fan1_input.dev_attr.attr,
&sensor_dev_attr_fan2_input.dev_attr.attr,
&sensor_dev_attr_fan3_input.dev_attr.attr,
&sensor_dev_attr_fan4_input.dev_attr.attr,
&dev_attr_fan1_target.attr,
&dev_attr_fan1_div.attr,
&dev_attr_pwm1_enable.attr,
&dev_attr_pwm1.attr,
&sensor_dev_attr_fan1_max_alarm.dev_attr.attr,
&sensor_dev_attr_fan1_min_alarm.dev_attr.attr,
&sensor_dev_attr_fan1_fault.dev_attr.attr,
&sensor_dev_attr_gpio1_alarm.dev_attr.attr,
&sensor_dev_attr_gpio2_alarm.dev_attr.attr,
NULL
};
static struct attribute_group max6650_attr_grp = {
.attrs = max6650_attrs,
.is_visible = max6650_attrs_visible,
};
/*
* Real code
*/
/* Return 0 if detection is successful, -ENODEV otherwise */
static int max6650_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
int address = client->addr;
dev_dbg(&adapter->dev, "max6650_detect called\n");
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
dev_dbg(&adapter->dev, "max6650: I2C bus doesn't support "
"byte read mode, skipping.\n");
return -ENODEV;
}
if (((i2c_smbus_read_byte_data(client, MAX6650_REG_CONFIG) & 0xC0)
||(i2c_smbus_read_byte_data(client, MAX6650_REG_GPIO_STAT) & 0xE0)
||(i2c_smbus_read_byte_data(client, MAX6650_REG_ALARM_EN) & 0xE0)
||(i2c_smbus_read_byte_data(client, MAX6650_REG_ALARM) & 0xE0)
||(i2c_smbus_read_byte_data(client, MAX6650_REG_COUNT) & 0xFC))) {
dev_dbg(&adapter->dev,
"max6650: detection failed at 0x%02x.\n", address);
return -ENODEV;
}
dev_info(&adapter->dev, "max6650: chip found at 0x%02x.\n", address);
strlcpy(info->type, "max6650", I2C_NAME_SIZE);
return 0;
}
static int max6650_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct max6650_data *data;
int err;
if (!(data = kzalloc(sizeof(struct max6650_data), GFP_KERNEL))) {
dev_err(&client->dev, "out of memory.\n");
return -ENOMEM;
}
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
/*
* Initialize the max6650 chip
*/
err = max6650_init_client(client);
if (err)
goto err_free;
err = sysfs_create_group(&client->dev.kobj, &max6650_attr_grp);
if (err)
goto err_free;
data->hwmon_dev = hwmon_device_register(&client->dev);
if (!IS_ERR(data->hwmon_dev))
return 0;
err = PTR_ERR(data->hwmon_dev);
dev_err(&client->dev, "error registering hwmon device.\n");
sysfs_remove_group(&client->dev.kobj, &max6650_attr_grp);
err_free:
kfree(data);
return err;
}
static int max6650_remove(struct i2c_client *client)
{
struct max6650_data *data = i2c_get_clientdata(client);
sysfs_remove_group(&client->dev.kobj, &max6650_attr_grp);
hwmon_device_unregister(data->hwmon_dev);
kfree(data);
return 0;
}
static int max6650_init_client(struct i2c_client *client)
{
struct max6650_data *data = i2c_get_clientdata(client);
int config;
int err = -EIO;
config = i2c_smbus_read_byte_data(client, MAX6650_REG_CONFIG);
if (config < 0) {
dev_err(&client->dev, "Error reading config, aborting.\n");
return err;
}
switch (fan_voltage) {
case 0:
break;
case 5:
config &= ~MAX6650_CFG_V12;
break;
case 12:
config |= MAX6650_CFG_V12;
break;
default:
dev_err(&client->dev,
"illegal value for fan_voltage (%d)\n",
fan_voltage);
}
dev_info(&client->dev, "Fan voltage is set to %dV.\n",
(config & MAX6650_CFG_V12) ? 12 : 5);
switch (prescaler) {
case 0:
break;
case 1:
config &= ~MAX6650_CFG_PRESCALER_MASK;
break;
case 2:
config = (config & ~MAX6650_CFG_PRESCALER_MASK)
| MAX6650_CFG_PRESCALER_2;
break;
case 4:
config = (config & ~MAX6650_CFG_PRESCALER_MASK)
| MAX6650_CFG_PRESCALER_4;
break;
case 8:
config = (config & ~MAX6650_CFG_PRESCALER_MASK)
| MAX6650_CFG_PRESCALER_8;
break;
case 16:
config = (config & ~MAX6650_CFG_PRESCALER_MASK)
| MAX6650_CFG_PRESCALER_16;
break;
default:
dev_err(&client->dev,
"illegal value for prescaler (%d)\n",
prescaler);
}
dev_info(&client->dev, "Prescaler is set to %d.\n",
1 << (config & MAX6650_CFG_PRESCALER_MASK));
/* If mode is set to "full off", we change it to "open loop" and
* set DAC to 255, which has the same effect. We do this because
* there's no "full off" mode defined in hwmon specifcations.
*/
if ((config & MAX6650_CFG_MODE_MASK) == MAX6650_CFG_MODE_OFF) {
dev_dbg(&client->dev, "Change mode to open loop, full off.\n");
config = (config & ~MAX6650_CFG_MODE_MASK)
| MAX6650_CFG_MODE_OPEN_LOOP;
if (i2c_smbus_write_byte_data(client, MAX6650_REG_DAC, 255)) {
dev_err(&client->dev, "DAC write error, aborting.\n");
return err;
}
}
if (i2c_smbus_write_byte_data(client, MAX6650_REG_CONFIG, config)) {
dev_err(&client->dev, "Config write error, aborting.\n");
return err;
}
data->config = config;
data->count = i2c_smbus_read_byte_data(client, MAX6650_REG_COUNT);
return 0;
}
static const u8 tach_reg[] = {
MAX6650_REG_TACH0,
MAX6650_REG_TACH1,
MAX6650_REG_TACH2,
MAX6650_REG_TACH3,
};
static struct max6650_data *max6650_update_device(struct device *dev)
{
int i;
struct i2c_client *client = to_i2c_client(dev);
struct max6650_data *data = i2c_get_clientdata(client);
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
data->speed = i2c_smbus_read_byte_data(client,
MAX6650_REG_SPEED);
data->config = i2c_smbus_read_byte_data(client,
MAX6650_REG_CONFIG);
for (i = 0; i < 4; i++) {
data->tach[i] = i2c_smbus_read_byte_data(client,
tach_reg[i]);
}
data->count = i2c_smbus_read_byte_data(client,
MAX6650_REG_COUNT);
data->dac = i2c_smbus_read_byte_data(client, MAX6650_REG_DAC);
/* Alarms are cleared on read in case the condition that
* caused the alarm is removed. Keep the value latched here
* for providing the register through different alarm files. */
data->alarm |= i2c_smbus_read_byte_data(client,
MAX6650_REG_ALARM);
data->last_updated = jiffies;
data->valid = 1;
}
mutex_unlock(&data->update_lock);
return data;
}
static int __init sensors_max6650_init(void)
{
return i2c_add_driver(&max6650_driver);
}
static void __exit sensors_max6650_exit(void)
{
i2c_del_driver(&max6650_driver);
}
MODULE_AUTHOR("Hans J. Koch");
MODULE_DESCRIPTION("MAX6650 sensor driver");
MODULE_LICENSE("GPL");
module_init(sensors_max6650_init);
module_exit(sensors_max6650_exit);
| gpl-2.0 |
HydraCompany/HydraKernel | drivers/s390/scsi/zfcp_fc.c | 994 | 27252 | /*
* zfcp device driver
*
* Fibre Channel related functions for the zfcp device driver.
*
* Copyright IBM Corp. 2008, 2010
*/
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/utsname.h>
#include <scsi/fc/fc_els.h>
#include <scsi/libfc.h>
#include "zfcp_ext.h"
#include "zfcp_fc.h"
struct kmem_cache *zfcp_fc_req_cache;
static u32 zfcp_fc_rscn_range_mask[] = {
[ELS_ADDR_FMT_PORT] = 0xFFFFFF,
[ELS_ADDR_FMT_AREA] = 0xFFFF00,
[ELS_ADDR_FMT_DOM] = 0xFF0000,
[ELS_ADDR_FMT_FAB] = 0x000000,
};
static bool no_auto_port_rescan;
module_param_named(no_auto_port_rescan, no_auto_port_rescan, bool, 0600);
MODULE_PARM_DESC(no_auto_port_rescan,
"no automatic port_rescan (default off)");
void zfcp_fc_conditional_port_scan(struct zfcp_adapter *adapter)
{
if (no_auto_port_rescan)
return;
queue_work(adapter->work_queue, &adapter->scan_work);
}
void zfcp_fc_inverse_conditional_port_scan(struct zfcp_adapter *adapter)
{
if (!no_auto_port_rescan)
return;
queue_work(adapter->work_queue, &adapter->scan_work);
}
/**
* zfcp_fc_post_event - post event to userspace via fc_transport
* @work: work struct with enqueued events
*/
void zfcp_fc_post_event(struct work_struct *work)
{
struct zfcp_fc_event *event = NULL, *tmp = NULL;
LIST_HEAD(tmp_lh);
struct zfcp_fc_events *events = container_of(work,
struct zfcp_fc_events, work);
struct zfcp_adapter *adapter = container_of(events, struct zfcp_adapter,
events);
spin_lock_bh(&events->list_lock);
list_splice_init(&events->list, &tmp_lh);
spin_unlock_bh(&events->list_lock);
list_for_each_entry_safe(event, tmp, &tmp_lh, list) {
fc_host_post_event(adapter->scsi_host, fc_get_event_number(),
event->code, event->data);
list_del(&event->list);
kfree(event);
}
}
/**
* zfcp_fc_enqueue_event - safely enqueue FC HBA API event from irq context
* @adapter: The adapter where to enqueue the event
* @event_code: The event code (as defined in fc_host_event_code in
* scsi_transport_fc.h)
* @event_data: The event data (e.g. n_port page in case of els)
*/
void zfcp_fc_enqueue_event(struct zfcp_adapter *adapter,
enum fc_host_event_code event_code, u32 event_data)
{
struct zfcp_fc_event *event;
event = kmalloc(sizeof(struct zfcp_fc_event), GFP_ATOMIC);
if (!event)
return;
event->code = event_code;
event->data = event_data;
spin_lock(&adapter->events.list_lock);
list_add_tail(&event->list, &adapter->events.list);
spin_unlock(&adapter->events.list_lock);
queue_work(adapter->work_queue, &adapter->events.work);
}
static int zfcp_fc_wka_port_get(struct zfcp_fc_wka_port *wka_port)
{
if (mutex_lock_interruptible(&wka_port->mutex))
return -ERESTARTSYS;
if (wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE ||
wka_port->status == ZFCP_FC_WKA_PORT_CLOSING) {
wka_port->status = ZFCP_FC_WKA_PORT_OPENING;
if (zfcp_fsf_open_wka_port(wka_port))
wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
}
mutex_unlock(&wka_port->mutex);
wait_event(wka_port->completion_wq,
wka_port->status == ZFCP_FC_WKA_PORT_ONLINE ||
wka_port->status == ZFCP_FC_WKA_PORT_OFFLINE);
if (wka_port->status == ZFCP_FC_WKA_PORT_ONLINE) {
atomic_inc(&wka_port->refcount);
return 0;
}
return -EIO;
}
static void zfcp_fc_wka_port_offline(struct work_struct *work)
{
struct delayed_work *dw = to_delayed_work(work);
struct zfcp_fc_wka_port *wka_port =
container_of(dw, struct zfcp_fc_wka_port, work);
mutex_lock(&wka_port->mutex);
if ((atomic_read(&wka_port->refcount) != 0) ||
(wka_port->status != ZFCP_FC_WKA_PORT_ONLINE))
goto out;
wka_port->status = ZFCP_FC_WKA_PORT_CLOSING;
if (zfcp_fsf_close_wka_port(wka_port)) {
wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
wake_up(&wka_port->completion_wq);
}
out:
mutex_unlock(&wka_port->mutex);
}
static void zfcp_fc_wka_port_put(struct zfcp_fc_wka_port *wka_port)
{
if (atomic_dec_return(&wka_port->refcount) != 0)
return;
/* wait 10 milliseconds, other reqs might pop in */
schedule_delayed_work(&wka_port->work, HZ / 100);
}
static void zfcp_fc_wka_port_init(struct zfcp_fc_wka_port *wka_port, u32 d_id,
struct zfcp_adapter *adapter)
{
init_waitqueue_head(&wka_port->completion_wq);
wka_port->adapter = adapter;
wka_port->d_id = d_id;
wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
atomic_set(&wka_port->refcount, 0);
mutex_init(&wka_port->mutex);
INIT_DELAYED_WORK(&wka_port->work, zfcp_fc_wka_port_offline);
}
static void zfcp_fc_wka_port_force_offline(struct zfcp_fc_wka_port *wka)
{
cancel_delayed_work_sync(&wka->work);
mutex_lock(&wka->mutex);
wka->status = ZFCP_FC_WKA_PORT_OFFLINE;
mutex_unlock(&wka->mutex);
}
void zfcp_fc_wka_ports_force_offline(struct zfcp_fc_wka_ports *gs)
{
if (!gs)
return;
zfcp_fc_wka_port_force_offline(&gs->ms);
zfcp_fc_wka_port_force_offline(&gs->ts);
zfcp_fc_wka_port_force_offline(&gs->ds);
zfcp_fc_wka_port_force_offline(&gs->as);
}
static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
struct fc_els_rscn_page *page)
{
unsigned long flags;
struct zfcp_adapter *adapter = fsf_req->adapter;
struct zfcp_port *port;
read_lock_irqsave(&adapter->port_list_lock, flags);
list_for_each_entry(port, &adapter->port_list, list) {
if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range))
zfcp_fc_test_link(port);
if (!port->d_id)
zfcp_erp_port_reopen(port,
ZFCP_STATUS_COMMON_ERP_FAILED,
"fcrscn1");
}
read_unlock_irqrestore(&adapter->port_list_lock, flags);
}
static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
{
struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data;
struct fc_els_rscn *head;
struct fc_els_rscn_page *page;
u16 i;
u16 no_entries;
unsigned int afmt;
head = (struct fc_els_rscn *) status_buffer->payload.data;
page = (struct fc_els_rscn_page *) head;
/* see FC-FS */
no_entries = head->rscn_plen / sizeof(struct fc_els_rscn_page);
for (i = 1; i < no_entries; i++) {
/* skip head and start with 1st element */
page++;
afmt = page->rscn_page_flags & ELS_RSCN_ADDR_FMT_MASK;
_zfcp_fc_incoming_rscn(fsf_req, zfcp_fc_rscn_range_mask[afmt],
page);
zfcp_fc_enqueue_event(fsf_req->adapter, FCH_EVT_RSCN,
*(u32 *)page);
}
zfcp_fc_conditional_port_scan(fsf_req->adapter);
}
static void zfcp_fc_incoming_wwpn(struct zfcp_fsf_req *req, u64 wwpn)
{
unsigned long flags;
struct zfcp_adapter *adapter = req->adapter;
struct zfcp_port *port;
read_lock_irqsave(&adapter->port_list_lock, flags);
list_for_each_entry(port, &adapter->port_list, list)
if (port->wwpn == wwpn) {
zfcp_erp_port_forced_reopen(port, 0, "fciwwp1");
break;
}
read_unlock_irqrestore(&adapter->port_list_lock, flags);
}
static void zfcp_fc_incoming_plogi(struct zfcp_fsf_req *req)
{
struct fsf_status_read_buffer *status_buffer;
struct fc_els_flogi *plogi;
status_buffer = (struct fsf_status_read_buffer *) req->data;
plogi = (struct fc_els_flogi *) status_buffer->payload.data;
zfcp_fc_incoming_wwpn(req, plogi->fl_wwpn);
}
static void zfcp_fc_incoming_logo(struct zfcp_fsf_req *req)
{
struct fsf_status_read_buffer *status_buffer =
(struct fsf_status_read_buffer *)req->data;
struct fc_els_logo *logo =
(struct fc_els_logo *) status_buffer->payload.data;
zfcp_fc_incoming_wwpn(req, logo->fl_n_port_wwn);
}
/**
* zfcp_fc_incoming_els - handle incoming ELS
* @fsf_req - request which contains incoming ELS
*/
void zfcp_fc_incoming_els(struct zfcp_fsf_req *fsf_req)
{
struct fsf_status_read_buffer *status_buffer =
(struct fsf_status_read_buffer *) fsf_req->data;
unsigned int els_type = status_buffer->payload.data[0];
zfcp_dbf_san_in_els("fciels1", fsf_req);
if (els_type == ELS_PLOGI)
zfcp_fc_incoming_plogi(fsf_req);
else if (els_type == ELS_LOGO)
zfcp_fc_incoming_logo(fsf_req);
else if (els_type == ELS_RSCN)
zfcp_fc_incoming_rscn(fsf_req);
}
static void zfcp_fc_ns_gid_pn_eval(struct zfcp_fc_req *fc_req)
{
struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
struct zfcp_fc_gid_pn_rsp *gid_pn_rsp = &fc_req->u.gid_pn.rsp;
if (ct_els->status)
return;
if (gid_pn_rsp->ct_hdr.ct_cmd != FC_FS_ACC)
return;
/* looks like a valid d_id */
ct_els->port->d_id = ntoh24(gid_pn_rsp->gid_pn.fp_fid);
}
static void zfcp_fc_complete(void *data)
{
complete(data);
}
static void zfcp_fc_ct_ns_init(struct fc_ct_hdr *ct_hdr, u16 cmd, u16 mr_size)
{
ct_hdr->ct_rev = FC_CT_REV;
ct_hdr->ct_fs_type = FC_FST_DIR;
ct_hdr->ct_fs_subtype = FC_NS_SUBTYPE;
ct_hdr->ct_cmd = cmd;
ct_hdr->ct_mr_size = mr_size / 4;
}
static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port,
struct zfcp_fc_req *fc_req)
{
struct zfcp_adapter *adapter = port->adapter;
DECLARE_COMPLETION_ONSTACK(completion);
struct zfcp_fc_gid_pn_req *gid_pn_req = &fc_req->u.gid_pn.req;
struct zfcp_fc_gid_pn_rsp *gid_pn_rsp = &fc_req->u.gid_pn.rsp;
int ret;
/* setup parameters for send generic command */
fc_req->ct_els.port = port;
fc_req->ct_els.handler = zfcp_fc_complete;
fc_req->ct_els.handler_data = &completion;
fc_req->ct_els.req = &fc_req->sg_req;
fc_req->ct_els.resp = &fc_req->sg_rsp;
sg_init_one(&fc_req->sg_req, gid_pn_req, sizeof(*gid_pn_req));
sg_init_one(&fc_req->sg_rsp, gid_pn_rsp, sizeof(*gid_pn_rsp));
zfcp_fc_ct_ns_init(&gid_pn_req->ct_hdr,
FC_NS_GID_PN, ZFCP_FC_CT_SIZE_PAGE);
gid_pn_req->gid_pn.fn_wwpn = port->wwpn;
ret = zfcp_fsf_send_ct(&adapter->gs->ds, &fc_req->ct_els,
adapter->pool.gid_pn_req,
ZFCP_FC_CTELS_TMO);
if (!ret) {
wait_for_completion(&completion);
zfcp_fc_ns_gid_pn_eval(fc_req);
}
return ret;
}
/**
* zfcp_fc_ns_gid_pn - initiate GID_PN nameserver request
* @port: port where GID_PN request is needed
* return: -ENOMEM on error, 0 otherwise
*/
static int zfcp_fc_ns_gid_pn(struct zfcp_port *port)
{
int ret;
struct zfcp_fc_req *fc_req;
struct zfcp_adapter *adapter = port->adapter;
fc_req = mempool_alloc(adapter->pool.gid_pn, GFP_ATOMIC);
if (!fc_req)
return -ENOMEM;
memset(fc_req, 0, sizeof(*fc_req));
ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
if (ret)
goto out;
ret = zfcp_fc_ns_gid_pn_request(port, fc_req);
zfcp_fc_wka_port_put(&adapter->gs->ds);
out:
mempool_free(fc_req, adapter->pool.gid_pn);
return ret;
}
void zfcp_fc_port_did_lookup(struct work_struct *work)
{
int ret;
struct zfcp_port *port = container_of(work, struct zfcp_port,
gid_pn_work);
ret = zfcp_fc_ns_gid_pn(port);
if (ret) {
/* could not issue gid_pn for some reason */
zfcp_erp_adapter_reopen(port->adapter, 0, "fcgpn_1");
goto out;
}
if (!port->d_id) {
zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
goto out;
}
zfcp_erp_port_reopen(port, 0, "fcgpn_3");
out:
put_device(&port->dev);
}
/**
* zfcp_fc_trigger_did_lookup - trigger the d_id lookup using a GID_PN request
* @port: The zfcp_port to lookup the d_id for.
*/
void zfcp_fc_trigger_did_lookup(struct zfcp_port *port)
{
get_device(&port->dev);
if (!queue_work(port->adapter->work_queue, &port->gid_pn_work))
put_device(&port->dev);
}
/**
* zfcp_fc_plogi_evaluate - evaluate PLOGI playload
* @port: zfcp_port structure
* @plogi: plogi payload
*
* Evaluate PLOGI playload and copy important fields into zfcp_port structure
*/
void zfcp_fc_plogi_evaluate(struct zfcp_port *port, struct fc_els_flogi *plogi)
{
if (plogi->fl_wwpn != port->wwpn) {
port->d_id = 0;
dev_warn(&port->adapter->ccw_device->dev,
"A port opened with WWPN 0x%016Lx returned data that "
"identifies it as WWPN 0x%016Lx\n",
(unsigned long long) port->wwpn,
(unsigned long long) plogi->fl_wwpn);
return;
}
port->wwnn = plogi->fl_wwnn;
port->maxframe_size = plogi->fl_csp.sp_bb_data;
if (plogi->fl_cssp[0].cp_class & FC_CPC_VALID)
port->supported_classes |= FC_COS_CLASS1;
if (plogi->fl_cssp[1].cp_class & FC_CPC_VALID)
port->supported_classes |= FC_COS_CLASS2;
if (plogi->fl_cssp[2].cp_class & FC_CPC_VALID)
port->supported_classes |= FC_COS_CLASS3;
if (plogi->fl_cssp[3].cp_class & FC_CPC_VALID)
port->supported_classes |= FC_COS_CLASS4;
}
static void zfcp_fc_adisc_handler(void *data)
{
struct zfcp_fc_req *fc_req = data;
struct zfcp_port *port = fc_req->ct_els.port;
struct fc_els_adisc *adisc_resp = &fc_req->u.adisc.rsp;
if (fc_req->ct_els.status) {
/* request rejected or timed out */
zfcp_erp_port_forced_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
"fcadh_1");
goto out;
}
if (!port->wwnn)
port->wwnn = adisc_resp->adisc_wwnn;
if ((port->wwpn != adisc_resp->adisc_wwpn) ||
!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN)) {
zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
"fcadh_2");
goto out;
}
/* port is good, unblock rport without going through erp */
zfcp_scsi_schedule_rport_register(port);
out:
atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
put_device(&port->dev);
kmem_cache_free(zfcp_fc_req_cache, fc_req);
}
static int zfcp_fc_adisc(struct zfcp_port *port)
{
struct zfcp_fc_req *fc_req;
struct zfcp_adapter *adapter = port->adapter;
struct Scsi_Host *shost = adapter->scsi_host;
int ret;
fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_ATOMIC);
if (!fc_req)
return -ENOMEM;
fc_req->ct_els.port = port;
fc_req->ct_els.req = &fc_req->sg_req;
fc_req->ct_els.resp = &fc_req->sg_rsp;
sg_init_one(&fc_req->sg_req, &fc_req->u.adisc.req,
sizeof(struct fc_els_adisc));
sg_init_one(&fc_req->sg_rsp, &fc_req->u.adisc.rsp,
sizeof(struct fc_els_adisc));
fc_req->ct_els.handler = zfcp_fc_adisc_handler;
fc_req->ct_els.handler_data = fc_req;
/* acc. to FC-FS, hard_nport_id in ADISC should not be set for ports
without FC-AL-2 capability, so we don't set it */
fc_req->u.adisc.req.adisc_wwpn = fc_host_port_name(shost);
fc_req->u.adisc.req.adisc_wwnn = fc_host_node_name(shost);
fc_req->u.adisc.req.adisc_cmd = ELS_ADISC;
hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost));
ret = zfcp_fsf_send_els(adapter, port->d_id, &fc_req->ct_els,
ZFCP_FC_CTELS_TMO);
if (ret)
kmem_cache_free(zfcp_fc_req_cache, fc_req);
return ret;
}
void zfcp_fc_link_test_work(struct work_struct *work)
{
struct zfcp_port *port =
container_of(work, struct zfcp_port, test_link_work);
int retval;
get_device(&port->dev);
port->rport_task = RPORT_DEL;
zfcp_scsi_rport_work(&port->rport_work);
/* only issue one test command at one time per port */
if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST)
goto out;
atomic_set_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
retval = zfcp_fc_adisc(port);
if (retval == 0)
return;
/* send of ADISC was not possible */
atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
zfcp_erp_port_forced_reopen(port, 0, "fcltwk1");
out:
put_device(&port->dev);
}
/**
* zfcp_fc_test_link - lightweight link test procedure
* @port: port to be tested
*
* Test status of a link to a remote port using the ELS command ADISC.
* If there is a problem with the remote port, error recovery steps
* will be triggered.
*/
void zfcp_fc_test_link(struct zfcp_port *port)
{
get_device(&port->dev);
if (!queue_work(port->adapter->work_queue, &port->test_link_work))
put_device(&port->dev);
}
static struct zfcp_fc_req *zfcp_alloc_sg_env(int buf_num)
{
struct zfcp_fc_req *fc_req;
fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_KERNEL);
if (!fc_req)
return NULL;
if (zfcp_sg_setup_table(&fc_req->sg_rsp, buf_num)) {
kmem_cache_free(zfcp_fc_req_cache, fc_req);
return NULL;
}
sg_init_one(&fc_req->sg_req, &fc_req->u.gpn_ft.req,
sizeof(struct zfcp_fc_gpn_ft_req));
return fc_req;
}
static int zfcp_fc_send_gpn_ft(struct zfcp_fc_req *fc_req,
struct zfcp_adapter *adapter, int max_bytes)
{
struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
struct zfcp_fc_gpn_ft_req *req = &fc_req->u.gpn_ft.req;
DECLARE_COMPLETION_ONSTACK(completion);
int ret;
zfcp_fc_ct_ns_init(&req->ct_hdr, FC_NS_GPN_FT, max_bytes);
req->gpn_ft.fn_fc4_type = FC_TYPE_FCP;
ct_els->handler = zfcp_fc_complete;
ct_els->handler_data = &completion;
ct_els->req = &fc_req->sg_req;
ct_els->resp = &fc_req->sg_rsp;
ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
ZFCP_FC_CTELS_TMO);
if (!ret)
wait_for_completion(&completion);
return ret;
}
static void zfcp_fc_validate_port(struct zfcp_port *port, struct list_head *lh)
{
if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC))
return;
atomic_clear_mask(ZFCP_STATUS_COMMON_NOESC, &port->status);
if ((port->supported_classes != 0) ||
!list_empty(&port->unit_list))
return;
list_move_tail(&port->list, lh);
}
static int zfcp_fc_eval_gpn_ft(struct zfcp_fc_req *fc_req,
struct zfcp_adapter *adapter, int max_entries)
{
struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
struct scatterlist *sg = &fc_req->sg_rsp;
struct fc_ct_hdr *hdr = sg_virt(sg);
struct fc_gpn_ft_resp *acc = sg_virt(sg);
struct zfcp_port *port, *tmp;
unsigned long flags;
LIST_HEAD(remove_lh);
u32 d_id;
int ret = 0, x, last = 0;
if (ct_els->status)
return -EIO;
if (hdr->ct_cmd != FC_FS_ACC) {
if (hdr->ct_reason == FC_BA_RJT_UNABLE)
return -EAGAIN; /* might be a temporary condition */
return -EIO;
}
if (hdr->ct_mr_size) {
dev_warn(&adapter->ccw_device->dev,
"The name server reported %d words residual data\n",
hdr->ct_mr_size);
return -E2BIG;
}
/* first entry is the header */
for (x = 1; x < max_entries && !last; x++) {
if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
acc++;
else
acc = sg_virt(++sg);
last = acc->fp_flags & FC_NS_FID_LAST;
d_id = ntoh24(acc->fp_fid);
/* don't attach ports with a well known address */
if (d_id >= FC_FID_WELL_KNOWN_BASE)
continue;
/* skip the adapter's port and known remote ports */
if (acc->fp_wwpn == fc_host_port_name(adapter->scsi_host))
continue;
port = zfcp_port_enqueue(adapter, acc->fp_wwpn,
ZFCP_STATUS_COMMON_NOESC, d_id);
if (!IS_ERR(port))
zfcp_erp_port_reopen(port, 0, "fcegpf1");
else if (PTR_ERR(port) != -EEXIST)
ret = PTR_ERR(port);
}
zfcp_erp_wait(adapter);
write_lock_irqsave(&adapter->port_list_lock, flags);
list_for_each_entry_safe(port, tmp, &adapter->port_list, list)
zfcp_fc_validate_port(port, &remove_lh);
write_unlock_irqrestore(&adapter->port_list_lock, flags);
list_for_each_entry_safe(port, tmp, &remove_lh, list) {
zfcp_erp_port_shutdown(port, 0, "fcegpf2");
device_unregister(&port->dev);
}
return ret;
}
/**
* zfcp_fc_scan_ports - scan remote ports and attach new ports
* @work: reference to scheduled work
*/
void zfcp_fc_scan_ports(struct work_struct *work)
{
struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter,
scan_work);
int ret, i;
struct zfcp_fc_req *fc_req;
int chain, max_entries, buf_num, max_bytes;
chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS;
buf_num = chain ? ZFCP_FC_GPN_FT_NUM_BUFS : 1;
max_entries = chain ? ZFCP_FC_GPN_FT_MAX_ENT : ZFCP_FC_GPN_FT_ENT_PAGE;
max_bytes = chain ? ZFCP_FC_GPN_FT_MAX_SIZE : ZFCP_FC_CT_SIZE_PAGE;
if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT &&
fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
return;
if (zfcp_fc_wka_port_get(&adapter->gs->ds))
return;
fc_req = zfcp_alloc_sg_env(buf_num);
if (!fc_req)
goto out;
for (i = 0; i < 3; i++) {
ret = zfcp_fc_send_gpn_ft(fc_req, adapter, max_bytes);
if (!ret) {
ret = zfcp_fc_eval_gpn_ft(fc_req, adapter, max_entries);
if (ret == -EAGAIN)
ssleep(1);
else
break;
}
}
zfcp_sg_free_table(&fc_req->sg_rsp, buf_num);
kmem_cache_free(zfcp_fc_req_cache, fc_req);
out:
zfcp_fc_wka_port_put(&adapter->gs->ds);
}
static int zfcp_fc_gspn(struct zfcp_adapter *adapter,
struct zfcp_fc_req *fc_req)
{
DECLARE_COMPLETION_ONSTACK(completion);
char devno[] = "DEVNO:";
struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
struct zfcp_fc_gspn_req *gspn_req = &fc_req->u.gspn.req;
struct zfcp_fc_gspn_rsp *gspn_rsp = &fc_req->u.gspn.rsp;
int ret;
zfcp_fc_ct_ns_init(&gspn_req->ct_hdr, FC_NS_GSPN_ID,
FC_SYMBOLIC_NAME_SIZE);
hton24(gspn_req->gspn.fp_fid, fc_host_port_id(adapter->scsi_host));
sg_init_one(&fc_req->sg_req, gspn_req, sizeof(*gspn_req));
sg_init_one(&fc_req->sg_rsp, gspn_rsp, sizeof(*gspn_rsp));
ct_els->handler = zfcp_fc_complete;
ct_els->handler_data = &completion;
ct_els->req = &fc_req->sg_req;
ct_els->resp = &fc_req->sg_rsp;
ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
ZFCP_FC_CTELS_TMO);
if (ret)
return ret;
wait_for_completion(&completion);
if (ct_els->status)
return ct_els->status;
if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_NPIV &&
!(strstr(gspn_rsp->gspn.fp_name, devno)))
snprintf(fc_host_symbolic_name(adapter->scsi_host),
FC_SYMBOLIC_NAME_SIZE, "%s%s %s NAME: %s",
gspn_rsp->gspn.fp_name, devno,
dev_name(&adapter->ccw_device->dev),
init_utsname()->nodename);
else
strlcpy(fc_host_symbolic_name(adapter->scsi_host),
gspn_rsp->gspn.fp_name, FC_SYMBOLIC_NAME_SIZE);
return 0;
}
static void zfcp_fc_rspn(struct zfcp_adapter *adapter,
struct zfcp_fc_req *fc_req)
{
DECLARE_COMPLETION_ONSTACK(completion);
struct Scsi_Host *shost = adapter->scsi_host;
struct zfcp_fsf_ct_els *ct_els = &fc_req->ct_els;
struct zfcp_fc_rspn_req *rspn_req = &fc_req->u.rspn.req;
struct fc_ct_hdr *rspn_rsp = &fc_req->u.rspn.rsp;
int ret, len;
zfcp_fc_ct_ns_init(&rspn_req->ct_hdr, FC_NS_RSPN_ID,
FC_SYMBOLIC_NAME_SIZE);
hton24(rspn_req->rspn.fr_fid.fp_fid, fc_host_port_id(shost));
len = strlcpy(rspn_req->rspn.fr_name, fc_host_symbolic_name(shost),
FC_SYMBOLIC_NAME_SIZE);
rspn_req->rspn.fr_name_len = len;
sg_init_one(&fc_req->sg_req, rspn_req, sizeof(*rspn_req));
sg_init_one(&fc_req->sg_rsp, rspn_rsp, sizeof(*rspn_rsp));
ct_els->handler = zfcp_fc_complete;
ct_els->handler_data = &completion;
ct_els->req = &fc_req->sg_req;
ct_els->resp = &fc_req->sg_rsp;
ret = zfcp_fsf_send_ct(&adapter->gs->ds, ct_els, NULL,
ZFCP_FC_CTELS_TMO);
if (!ret)
wait_for_completion(&completion);
}
/**
* zfcp_fc_sym_name_update - Retrieve and update the symbolic port name
* @work: ns_up_work of the adapter where to update the symbolic port name
*
* Retrieve the current symbolic port name that may have been set by
* the hardware using the GSPN request and update the fc_host
* symbolic_name sysfs attribute. When running in NPIV mode (and hence
* the port name is unique for this system), update the symbolic port
* name to add Linux specific information and update the FC nameserver
* using the RSPN request.
*/
void zfcp_fc_sym_name_update(struct work_struct *work)
{
struct zfcp_adapter *adapter = container_of(work, struct zfcp_adapter,
ns_up_work);
int ret;
struct zfcp_fc_req *fc_req;
if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT &&
fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
return;
fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_KERNEL);
if (!fc_req)
return;
ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
if (ret)
goto out_free;
ret = zfcp_fc_gspn(adapter, fc_req);
if (ret || fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
goto out_ds_put;
memset(fc_req, 0, sizeof(*fc_req));
zfcp_fc_rspn(adapter, fc_req);
out_ds_put:
zfcp_fc_wka_port_put(&adapter->gs->ds);
out_free:
kmem_cache_free(zfcp_fc_req_cache, fc_req);
}
static void zfcp_fc_ct_els_job_handler(void *data)
{
struct fc_bsg_job *job = data;
struct zfcp_fsf_ct_els *zfcp_ct_els = job->dd_data;
struct fc_bsg_reply *jr = job->reply;
jr->reply_payload_rcv_len = job->reply_payload.payload_len;
jr->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
jr->result = zfcp_ct_els->status ? -EIO : 0;
job->job_done(job);
}
static struct zfcp_fc_wka_port *zfcp_fc_job_wka_port(struct fc_bsg_job *job)
{
u32 preamble_word1;
u8 gs_type;
struct zfcp_adapter *adapter;
preamble_word1 = job->request->rqst_data.r_ct.preamble_word1;
gs_type = (preamble_word1 & 0xff000000) >> 24;
adapter = (struct zfcp_adapter *) job->shost->hostdata[0];
switch (gs_type) {
case FC_FST_ALIAS:
return &adapter->gs->as;
case FC_FST_MGMT:
return &adapter->gs->ms;
case FC_FST_TIME:
return &adapter->gs->ts;
break;
case FC_FST_DIR:
return &adapter->gs->ds;
break;
default:
return NULL;
}
}
static void zfcp_fc_ct_job_handler(void *data)
{
struct fc_bsg_job *job = data;
struct zfcp_fc_wka_port *wka_port;
wka_port = zfcp_fc_job_wka_port(job);
zfcp_fc_wka_port_put(wka_port);
zfcp_fc_ct_els_job_handler(data);
}
static int zfcp_fc_exec_els_job(struct fc_bsg_job *job,
struct zfcp_adapter *adapter)
{
struct zfcp_fsf_ct_els *els = job->dd_data;
struct fc_rport *rport = job->rport;
struct zfcp_port *port;
u32 d_id;
if (rport) {
port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
if (!port)
return -EINVAL;
d_id = port->d_id;
put_device(&port->dev);
} else
d_id = ntoh24(job->request->rqst_data.h_els.port_id);
els->handler = zfcp_fc_ct_els_job_handler;
return zfcp_fsf_send_els(adapter, d_id, els, job->req->timeout / HZ);
}
static int zfcp_fc_exec_ct_job(struct fc_bsg_job *job,
struct zfcp_adapter *adapter)
{
int ret;
struct zfcp_fsf_ct_els *ct = job->dd_data;
struct zfcp_fc_wka_port *wka_port;
wka_port = zfcp_fc_job_wka_port(job);
if (!wka_port)
return -EINVAL;
ret = zfcp_fc_wka_port_get(wka_port);
if (ret)
return ret;
ct->handler = zfcp_fc_ct_job_handler;
ret = zfcp_fsf_send_ct(wka_port, ct, NULL, job->req->timeout / HZ);
if (ret)
zfcp_fc_wka_port_put(wka_port);
return ret;
}
int zfcp_fc_exec_bsg_job(struct fc_bsg_job *job)
{
struct Scsi_Host *shost;
struct zfcp_adapter *adapter;
struct zfcp_fsf_ct_els *ct_els = job->dd_data;
shost = job->rport ? rport_to_shost(job->rport) : job->shost;
adapter = (struct zfcp_adapter *)shost->hostdata[0];
if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_OPEN))
return -EINVAL;
ct_els->req = job->request_payload.sg_list;
ct_els->resp = job->reply_payload.sg_list;
ct_els->handler_data = job;
switch (job->request->msgcode) {
case FC_BSG_RPT_ELS:
case FC_BSG_HST_ELS_NOLOGIN:
return zfcp_fc_exec_els_job(job, adapter);
case FC_BSG_RPT_CT:
case FC_BSG_HST_CT:
return zfcp_fc_exec_ct_job(job, adapter);
default:
return -EINVAL;
}
}
int zfcp_fc_timeout_bsg_job(struct fc_bsg_job *job)
{
/* hardware tracks timeout, reset bsg timeout to not interfere */
return -EAGAIN;
}
int zfcp_fc_gs_setup(struct zfcp_adapter *adapter)
{
struct zfcp_fc_wka_ports *wka_ports;
wka_ports = kzalloc(sizeof(struct zfcp_fc_wka_ports), GFP_KERNEL);
if (!wka_ports)
return -ENOMEM;
adapter->gs = wka_ports;
zfcp_fc_wka_port_init(&wka_ports->ms, FC_FID_MGMT_SERV, adapter);
zfcp_fc_wka_port_init(&wka_ports->ts, FC_FID_TIME_SERV, adapter);
zfcp_fc_wka_port_init(&wka_ports->ds, FC_FID_DIR_SERV, adapter);
zfcp_fc_wka_port_init(&wka_ports->as, FC_FID_ALIASES, adapter);
return 0;
}
void zfcp_fc_gs_destroy(struct zfcp_adapter *adapter)
{
kfree(adapter->gs);
adapter->gs = NULL;
}
| gpl-2.0 |
rspc/mmc-sd40 | drivers/media/pci/mantis/mantis_vp1041.c | 1762 | 11253 | /*
Mantis VP-1041 driver
Copyright (C) Manu Abraham (abraham.manu@gmail.com)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include "dmxdev.h"
#include "dvbdev.h"
#include "dvb_demux.h"
#include "dvb_frontend.h"
#include "dvb_net.h"
#include "mantis_common.h"
#include "mantis_ioc.h"
#include "mantis_dvb.h"
#include "mantis_vp1041.h"
#include "stb0899_reg.h"
#include "stb0899_drv.h"
#include "stb0899_cfg.h"
#include "stb6100_cfg.h"
#include "stb6100.h"
#include "lnbp21.h"
#define MANTIS_MODEL_NAME "VP-1041"
#define MANTIS_DEV_TYPE "DSS/DVB-S/DVB-S2"
static const struct stb0899_s1_reg vp1041_stb0899_s1_init_1[] = {
/* 0x0000000b, *//* SYSREG */
{ STB0899_DEV_ID , 0x30 },
{ STB0899_DISCNTRL1 , 0x32 },
{ STB0899_DISCNTRL2 , 0x80 },
{ STB0899_DISRX_ST0 , 0x04 },
{ STB0899_DISRX_ST1 , 0x00 },
{ STB0899_DISPARITY , 0x00 },
{ STB0899_DISSTATUS , 0x20 },
{ STB0899_DISF22 , 0x99 },
{ STB0899_DISF22RX , 0xa8 },
/* SYSREG ? */
{ STB0899_ACRPRESC , 0x11 },
{ STB0899_ACRDIV1 , 0x0a },
{ STB0899_ACRDIV2 , 0x05 },
{ STB0899_DACR1 , 0x00 },
{ STB0899_DACR2 , 0x00 },
{ STB0899_OUTCFG , 0x00 },
{ STB0899_MODECFG , 0x00 },
{ STB0899_IRQSTATUS_3 , 0xfe },
{ STB0899_IRQSTATUS_2 , 0x03 },
{ STB0899_IRQSTATUS_1 , 0x7c },
{ STB0899_IRQSTATUS_0 , 0xf4 },
{ STB0899_IRQMSK_3 , 0xf3 },
{ STB0899_IRQMSK_2 , 0xfc },
{ STB0899_IRQMSK_1 , 0xff },
{ STB0899_IRQMSK_0 , 0xff },
{ STB0899_IRQCFG , 0x00 },
{ STB0899_I2CCFG , 0x88 },
{ STB0899_I2CRPT , 0x58 },
{ STB0899_IOPVALUE5 , 0x00 },
{ STB0899_IOPVALUE4 , 0x33 },
{ STB0899_IOPVALUE3 , 0x6d },
{ STB0899_IOPVALUE2 , 0x90 },
{ STB0899_IOPVALUE1 , 0x60 },
{ STB0899_IOPVALUE0 , 0x00 },
{ STB0899_GPIO00CFG , 0x82 },
{ STB0899_GPIO01CFG , 0x82 },
{ STB0899_GPIO02CFG , 0x82 },
{ STB0899_GPIO03CFG , 0x82 },
{ STB0899_GPIO04CFG , 0x82 },
{ STB0899_GPIO05CFG , 0x82 },
{ STB0899_GPIO06CFG , 0x82 },
{ STB0899_GPIO07CFG , 0x82 },
{ STB0899_GPIO08CFG , 0x82 },
{ STB0899_GPIO09CFG , 0x82 },
{ STB0899_GPIO10CFG , 0x82 },
{ STB0899_GPIO11CFG , 0x82 },
{ STB0899_GPIO12CFG , 0x82 },
{ STB0899_GPIO13CFG , 0x82 },
{ STB0899_GPIO14CFG , 0x82 },
{ STB0899_GPIO15CFG , 0x82 },
{ STB0899_GPIO16CFG , 0x82 },
{ STB0899_GPIO17CFG , 0x82 },
{ STB0899_GPIO18CFG , 0x82 },
{ STB0899_GPIO19CFG , 0x82 },
{ STB0899_GPIO20CFG , 0x82 },
{ STB0899_SDATCFG , 0xb8 },
{ STB0899_SCLTCFG , 0xba },
{ STB0899_AGCRFCFG , 0x1c }, /* 0x11 */
{ STB0899_GPIO22 , 0x82 }, /* AGCBB2CFG */
{ STB0899_GPIO21 , 0x91 }, /* AGCBB1CFG */
{ STB0899_DIRCLKCFG , 0x82 },
{ STB0899_CLKOUT27CFG , 0x7e },
{ STB0899_STDBYCFG , 0x82 },
{ STB0899_CS0CFG , 0x82 },
{ STB0899_CS1CFG , 0x82 },
{ STB0899_DISEQCOCFG , 0x20 },
{ STB0899_GPIO32CFG , 0x82 },
{ STB0899_GPIO33CFG , 0x82 },
{ STB0899_GPIO34CFG , 0x82 },
{ STB0899_GPIO35CFG , 0x82 },
{ STB0899_GPIO36CFG , 0x82 },
{ STB0899_GPIO37CFG , 0x82 },
{ STB0899_GPIO38CFG , 0x82 },
{ STB0899_GPIO39CFG , 0x82 },
{ STB0899_NCOARSE , 0x17 }, /* 0x15 = 27 Mhz Clock, F/3 = 198MHz, F/6 = 99MHz */
{ STB0899_SYNTCTRL , 0x02 }, /* 0x00 = CLK from CLKI, 0x02 = CLK from XTALI */
{ STB0899_FILTCTRL , 0x00 },
{ STB0899_SYSCTRL , 0x01 },
{ STB0899_STOPCLK1 , 0x20 },
{ STB0899_STOPCLK2 , 0x00 },
{ STB0899_INTBUFSTATUS , 0x00 },
{ STB0899_INTBUFCTRL , 0x0a },
{ 0xffff , 0xff },
};
static const struct stb0899_s1_reg vp1041_stb0899_s1_init_3[] = {
{ STB0899_DEMOD , 0x00 },
{ STB0899_RCOMPC , 0xc9 },
{ STB0899_AGC1CN , 0x01 },
{ STB0899_AGC1REF , 0x10 },
{ STB0899_RTC , 0x23 },
{ STB0899_TMGCFG , 0x4e },
{ STB0899_AGC2REF , 0x34 },
{ STB0899_TLSR , 0x84 },
{ STB0899_CFD , 0xf7 },
{ STB0899_ACLC , 0x87 },
{ STB0899_BCLC , 0x94 },
{ STB0899_EQON , 0x41 },
{ STB0899_LDT , 0xf1 },
{ STB0899_LDT2 , 0xe3 },
{ STB0899_EQUALREF , 0xb4 },
{ STB0899_TMGRAMP , 0x10 },
{ STB0899_TMGTHD , 0x30 },
{ STB0899_IDCCOMP , 0xfd },
{ STB0899_QDCCOMP , 0xff },
{ STB0899_POWERI , 0x0c },
{ STB0899_POWERQ , 0x0f },
{ STB0899_RCOMP , 0x6c },
{ STB0899_AGCIQIN , 0x80 },
{ STB0899_AGC2I1 , 0x06 },
{ STB0899_AGC2I2 , 0x00 },
{ STB0899_TLIR , 0x30 },
{ STB0899_RTF , 0x7f },
{ STB0899_DSTATUS , 0x00 },
{ STB0899_LDI , 0xbc },
{ STB0899_CFRM , 0xea },
{ STB0899_CFRL , 0x31 },
{ STB0899_NIRM , 0x2b },
{ STB0899_NIRL , 0x80 },
{ STB0899_ISYMB , 0x1d },
{ STB0899_QSYMB , 0xa6 },
{ STB0899_SFRH , 0x2f },
{ STB0899_SFRM , 0x68 },
{ STB0899_SFRL , 0x40 },
{ STB0899_SFRUPH , 0x2f },
{ STB0899_SFRUPM , 0x68 },
{ STB0899_SFRUPL , 0x40 },
{ STB0899_EQUAI1 , 0x02 },
{ STB0899_EQUAQ1 , 0xff },
{ STB0899_EQUAI2 , 0x04 },
{ STB0899_EQUAQ2 , 0x05 },
{ STB0899_EQUAI3 , 0x02 },
{ STB0899_EQUAQ3 , 0xfd },
{ STB0899_EQUAI4 , 0x03 },
{ STB0899_EQUAQ4 , 0x07 },
{ STB0899_EQUAI5 , 0x08 },
{ STB0899_EQUAQ5 , 0xf5 },
{ STB0899_DSTATUS2 , 0x00 },
{ STB0899_VSTATUS , 0x00 },
{ STB0899_VERROR , 0x86 },
{ STB0899_IQSWAP , 0x2a },
{ STB0899_ECNT1M , 0x00 },
{ STB0899_ECNT1L , 0x00 },
{ STB0899_ECNT2M , 0x00 },
{ STB0899_ECNT2L , 0x00 },
{ STB0899_ECNT3M , 0x0a },
{ STB0899_ECNT3L , 0xad },
{ STB0899_FECAUTO1 , 0x06 },
{ STB0899_FECM , 0x01 },
{ STB0899_VTH12 , 0xb0 },
{ STB0899_VTH23 , 0x7a },
{ STB0899_VTH34 , 0x58 },
{ STB0899_VTH56 , 0x38 },
{ STB0899_VTH67 , 0x34 },
{ STB0899_VTH78 , 0x24 },
{ STB0899_PRVIT , 0xff },
{ STB0899_VITSYNC , 0x19 },
{ STB0899_RSULC , 0xb1 }, /* DVB = 0xb1, DSS = 0xa1 */
{ STB0899_TSULC , 0x42 },
{ STB0899_RSLLC , 0x41 },
{ STB0899_TSLPL , 0x12 },
{ STB0899_TSCFGH , 0x0c },
{ STB0899_TSCFGM , 0x00 },
{ STB0899_TSCFGL , 0x00 },
{ STB0899_TSOUT , 0x69 }, /* 0x0d for CAM */
{ STB0899_RSSYNCDEL , 0x00 },
{ STB0899_TSINHDELH , 0x02 },
{ STB0899_TSINHDELM , 0x00 },
{ STB0899_TSINHDELL , 0x00 },
{ STB0899_TSLLSTKM , 0x1b },
{ STB0899_TSLLSTKL , 0xb3 },
{ STB0899_TSULSTKM , 0x00 },
{ STB0899_TSULSTKL , 0x00 },
{ STB0899_PCKLENUL , 0xbc },
{ STB0899_PCKLENLL , 0xcc },
{ STB0899_RSPCKLEN , 0xbd },
{ STB0899_TSSTATUS , 0x90 },
{ STB0899_ERRCTRL1 , 0xb6 },
{ STB0899_ERRCTRL2 , 0x95 },
{ STB0899_ERRCTRL3 , 0x8d },
{ STB0899_DMONMSK1 , 0x27 },
{ STB0899_DMONMSK0 , 0x03 },
{ STB0899_DEMAPVIT , 0x5c },
{ STB0899_PLPARM , 0x19 },
{ STB0899_PDELCTRL , 0x48 },
{ STB0899_PDELCTRL2 , 0x00 },
{ STB0899_BBHCTRL1 , 0x00 },
{ STB0899_BBHCTRL2 , 0x00 },
{ STB0899_HYSTTHRESH , 0x77 },
{ STB0899_MATCSTM , 0x00 },
{ STB0899_MATCSTL , 0x00 },
{ STB0899_UPLCSTM , 0x00 },
{ STB0899_UPLCSTL , 0x00 },
{ STB0899_DFLCSTM , 0x00 },
{ STB0899_DFLCSTL , 0x00 },
{ STB0899_SYNCCST , 0x00 },
{ STB0899_SYNCDCSTM , 0x00 },
{ STB0899_SYNCDCSTL , 0x00 },
{ STB0899_ISI_ENTRY , 0x00 },
{ STB0899_ISI_BIT_EN , 0x00 },
{ STB0899_MATSTRM , 0xf0 },
{ STB0899_MATSTRL , 0x02 },
{ STB0899_UPLSTRM , 0x45 },
{ STB0899_UPLSTRL , 0x60 },
{ STB0899_DFLSTRM , 0xe3 },
{ STB0899_DFLSTRL , 0x00 },
{ STB0899_SYNCSTR , 0x47 },
{ STB0899_SYNCDSTRM , 0x05 },
{ STB0899_SYNCDSTRL , 0x18 },
{ STB0899_CFGPDELSTATUS1 , 0x19 },
{ STB0899_CFGPDELSTATUS2 , 0x2b },
{ STB0899_BBFERRORM , 0x00 },
{ STB0899_BBFERRORL , 0x01 },
{ STB0899_UPKTERRORM , 0x00 },
{ STB0899_UPKTERRORL , 0x00 },
{ 0xffff , 0xff },
};
static struct stb0899_config vp1041_stb0899_config = {
.init_dev = vp1041_stb0899_s1_init_1,
.init_s2_demod = stb0899_s2_init_2,
.init_s1_demod = vp1041_stb0899_s1_init_3,
.init_s2_fec = stb0899_s2_init_4,
.init_tst = stb0899_s1_init_5,
.demod_address = 0x68, /* 0xd0 >> 1 */
.xtal_freq = 27000000,
.inversion = IQ_SWAP_ON,
.lo_clk = 76500000,
.hi_clk = 99000000,
.esno_ave = STB0899_DVBS2_ESNO_AVE,
.esno_quant = STB0899_DVBS2_ESNO_QUANT,
.avframes_coarse = STB0899_DVBS2_AVFRAMES_COARSE,
.avframes_fine = STB0899_DVBS2_AVFRAMES_FINE,
.miss_threshold = STB0899_DVBS2_MISS_THRESHOLD,
.uwp_threshold_acq = STB0899_DVBS2_UWP_THRESHOLD_ACQ,
.uwp_threshold_track = STB0899_DVBS2_UWP_THRESHOLD_TRACK,
.uwp_threshold_sof = STB0899_DVBS2_UWP_THRESHOLD_SOF,
.sof_search_timeout = STB0899_DVBS2_SOF_SEARCH_TIMEOUT,
.btr_nco_bits = STB0899_DVBS2_BTR_NCO_BITS,
.btr_gain_shift_offset = STB0899_DVBS2_BTR_GAIN_SHIFT_OFFSET,
.crl_nco_bits = STB0899_DVBS2_CRL_NCO_BITS,
.ldpc_max_iter = STB0899_DVBS2_LDPC_MAX_ITER,
.tuner_get_frequency = stb6100_get_frequency,
.tuner_set_frequency = stb6100_set_frequency,
.tuner_set_bandwidth = stb6100_set_bandwidth,
.tuner_get_bandwidth = stb6100_get_bandwidth,
.tuner_set_rfsiggain = NULL,
};
static struct stb6100_config vp1041_stb6100_config = {
.tuner_address = 0x60,
.refclock = 27000000,
};
static int vp1041_frontend_init(struct mantis_pci *mantis, struct dvb_frontend *fe)
{
struct i2c_adapter *adapter = &mantis->adapter;
int err = 0;
err = mantis_frontend_power(mantis, POWER_ON);
if (err == 0) {
mantis_frontend_soft_reset(mantis);
msleep(250);
mantis->fe = dvb_attach(stb0899_attach, &vp1041_stb0899_config, adapter);
if (mantis->fe) {
dprintk(MANTIS_ERROR, 1,
"found STB0899 DVB-S/DVB-S2 frontend @0x%02x",
vp1041_stb0899_config.demod_address);
if (dvb_attach(stb6100_attach, mantis->fe, &vp1041_stb6100_config, adapter)) {
if (!dvb_attach(lnbp21_attach, mantis->fe, adapter, 0, 0))
dprintk(MANTIS_ERROR, 1, "No LNBP21 found!");
}
} else {
return -EREMOTEIO;
}
} else {
dprintk(MANTIS_ERROR, 1, "Frontend on <%s> POWER ON failed! <%d>",
adapter->name,
err);
return -EIO;
}
dprintk(MANTIS_ERROR, 1, "Done!");
return 0;
}
struct mantis_hwconfig vp1041_config = {
.model_name = MANTIS_MODEL_NAME,
.dev_type = MANTIS_DEV_TYPE,
.ts_size = MANTIS_TS_188,
.baud_rate = MANTIS_BAUD_9600,
.parity = MANTIS_PARITY_NONE,
.bytes = 0,
.frontend_init = vp1041_frontend_init,
.power = GPIF_A12,
.reset = GPIF_A13,
};
| gpl-2.0 |
wan-qy/linux | drivers/media/i2c/as3645a.c | 2018 | 23456 | /*
* drivers/media/i2c/as3645a.c - AS3645A and LM3555 flash controllers driver
*
* Copyright (C) 2008-2011 Nokia Corporation
* Copyright (c) 2011, Intel Corporation.
*
* Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*
* TODO:
* - Check hardware FSTROBE control when sensor driver add support for this
*
*/
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <media/as3645a.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#define AS_TIMER_MS_TO_CODE(t) (((t) - 100) / 50)
#define AS_TIMER_CODE_TO_MS(c) (50 * (c) + 100)
/* Register definitions */
/* Read-only Design info register: Reset state: xxxx 0001 */
#define AS_DESIGN_INFO_REG 0x00
#define AS_DESIGN_INFO_FACTORY(x) (((x) >> 4))
#define AS_DESIGN_INFO_MODEL(x) ((x) & 0x0f)
/* Read-only Version control register: Reset state: 0000 0000
* for first engineering samples
*/
#define AS_VERSION_CONTROL_REG 0x01
#define AS_VERSION_CONTROL_RFU(x) (((x) >> 4))
#define AS_VERSION_CONTROL_VERSION(x) ((x) & 0x0f)
/* Read / Write (Indicator and timer register): Reset state: 0000 1111 */
#define AS_INDICATOR_AND_TIMER_REG 0x02
#define AS_INDICATOR_AND_TIMER_TIMEOUT_SHIFT 0
#define AS_INDICATOR_AND_TIMER_VREF_SHIFT 4
#define AS_INDICATOR_AND_TIMER_INDICATOR_SHIFT 6
/* Read / Write (Current set register): Reset state: 0110 1001 */
#define AS_CURRENT_SET_REG 0x03
#define AS_CURRENT_ASSIST_LIGHT_SHIFT 0
#define AS_CURRENT_LED_DET_ON (1 << 3)
#define AS_CURRENT_FLASH_CURRENT_SHIFT 4
/* Read / Write (Control register): Reset state: 1011 0100 */
#define AS_CONTROL_REG 0x04
#define AS_CONTROL_MODE_SETTING_SHIFT 0
#define AS_CONTROL_STROBE_ON (1 << 2)
#define AS_CONTROL_OUT_ON (1 << 3)
#define AS_CONTROL_EXT_TORCH_ON (1 << 4)
#define AS_CONTROL_STROBE_TYPE_EDGE (0 << 5)
#define AS_CONTROL_STROBE_TYPE_LEVEL (1 << 5)
#define AS_CONTROL_COIL_PEAK_SHIFT 6
/* Read only (D3 is read / write) (Fault and info): Reset state: 0000 x000 */
#define AS_FAULT_INFO_REG 0x05
#define AS_FAULT_INFO_INDUCTOR_PEAK_LIMIT (1 << 1)
#define AS_FAULT_INFO_INDICATOR_LED (1 << 2)
#define AS_FAULT_INFO_LED_AMOUNT (1 << 3)
#define AS_FAULT_INFO_TIMEOUT (1 << 4)
#define AS_FAULT_INFO_OVER_TEMPERATURE (1 << 5)
#define AS_FAULT_INFO_SHORT_CIRCUIT (1 << 6)
#define AS_FAULT_INFO_OVER_VOLTAGE (1 << 7)
/* Boost register */
#define AS_BOOST_REG 0x0d
#define AS_BOOST_CURRENT_DISABLE (0 << 0)
#define AS_BOOST_CURRENT_ENABLE (1 << 0)
/* Password register is used to unlock boost register writing */
#define AS_PASSWORD_REG 0x0f
#define AS_PASSWORD_UNLOCK_VALUE 0x55
enum as_mode {
AS_MODE_EXT_TORCH = 0 << AS_CONTROL_MODE_SETTING_SHIFT,
AS_MODE_INDICATOR = 1 << AS_CONTROL_MODE_SETTING_SHIFT,
AS_MODE_ASSIST = 2 << AS_CONTROL_MODE_SETTING_SHIFT,
AS_MODE_FLASH = 3 << AS_CONTROL_MODE_SETTING_SHIFT,
};
/*
* struct as3645a
*
* @subdev: V4L2 subdev
* @pdata: Flash platform data
* @power_lock: Protects power_count
* @power_count: Power reference count
* @led_mode: V4L2 flash LED mode
* @timeout: Flash timeout in microseconds
* @flash_current: Flash current (0=200mA ... 15=500mA). Maximum
* values are 400mA for two LEDs and 500mA for one LED.
* @assist_current: Torch/Assist light current (0=20mA, 1=40mA ... 7=160mA)
* @indicator_current: Indicator LED current (0=0mA, 1=2.5mA ... 4=10mA)
* @strobe_source: Flash strobe source (software or external)
*/
struct as3645a {
struct v4l2_subdev subdev;
const struct as3645a_platform_data *pdata;
struct mutex power_lock;
int power_count;
/* Controls */
struct v4l2_ctrl_handler ctrls;
enum v4l2_flash_led_mode led_mode;
unsigned int timeout;
u8 flash_current;
u8 assist_current;
u8 indicator_current;
enum v4l2_flash_strobe_source strobe_source;
};
#define to_as3645a(sd) container_of(sd, struct as3645a, subdev)
/* Return negative errno else zero on success */
static int as3645a_write(struct as3645a *flash, u8 addr, u8 val)
{
struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev);
int rval;
rval = i2c_smbus_write_byte_data(client, addr, val);
dev_dbg(&client->dev, "Write Addr:%02X Val:%02X %s\n", addr, val,
rval < 0 ? "fail" : "ok");
return rval;
}
/* Return negative errno else a data byte received from the device. */
static int as3645a_read(struct as3645a *flash, u8 addr)
{
struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev);
int rval;
rval = i2c_smbus_read_byte_data(client, addr);
dev_dbg(&client->dev, "Read Addr:%02X Val:%02X %s\n", addr, rval,
rval < 0 ? "fail" : "ok");
return rval;
}
/* -----------------------------------------------------------------------------
* Hardware configuration and trigger
*/
/*
* as3645a_set_config - Set flash configuration registers
* @flash: The flash
*
* Configure the hardware with flash, assist and indicator currents, as well as
* flash timeout.
*
* Return 0 on success, or a negative error code if an I2C communication error
* occurred.
*/
static int as3645a_set_config(struct as3645a *flash)
{
int ret;
u8 val;
val = (flash->flash_current << AS_CURRENT_FLASH_CURRENT_SHIFT)
| (flash->assist_current << AS_CURRENT_ASSIST_LIGHT_SHIFT)
| AS_CURRENT_LED_DET_ON;
ret = as3645a_write(flash, AS_CURRENT_SET_REG, val);
if (ret < 0)
return ret;
val = AS_TIMER_MS_TO_CODE(flash->timeout / 1000)
<< AS_INDICATOR_AND_TIMER_TIMEOUT_SHIFT;
val |= (flash->pdata->vref << AS_INDICATOR_AND_TIMER_VREF_SHIFT)
| ((flash->indicator_current ? flash->indicator_current - 1 : 0)
<< AS_INDICATOR_AND_TIMER_INDICATOR_SHIFT);
return as3645a_write(flash, AS_INDICATOR_AND_TIMER_REG, val);
}
/*
* as3645a_set_control - Set flash control register
* @flash: The flash
* @mode: Desired output mode
* @on: Desired output state
*
* Configure the hardware with output mode and state.
*
* Return 0 on success, or a negative error code if an I2C communication error
* occurred.
*/
static int
as3645a_set_control(struct as3645a *flash, enum as_mode mode, bool on)
{
u8 reg;
/* Configure output parameters and operation mode. */
reg = (flash->pdata->peak << AS_CONTROL_COIL_PEAK_SHIFT)
| (on ? AS_CONTROL_OUT_ON : 0)
| mode;
if (flash->led_mode == V4L2_FLASH_LED_MODE_FLASH &&
flash->strobe_source == V4L2_FLASH_STROBE_SOURCE_EXTERNAL) {
reg |= AS_CONTROL_STROBE_TYPE_LEVEL
| AS_CONTROL_STROBE_ON;
}
return as3645a_write(flash, AS_CONTROL_REG, reg);
}
/*
* as3645a_set_output - Configure output and operation mode
* @flash: Flash controller
* @strobe: Strobe the flash (only valid in flash mode)
*
* Turn the LEDs output on/off and set the operation mode based on the current
* parameters.
*
* The AS3645A can't control the indicator LED independently of the flash/torch
* LED. If the flash controller is in V4L2_FLASH_LED_MODE_NONE mode, set the
* chip to indicator mode. Otherwise set it to assist light (torch) or flash
* mode.
*
* In indicator and assist modes, turn the output on/off based on the indicator
* and torch currents. In software strobe flash mode, turn the output on/off
* based on the strobe parameter.
*/
static int as3645a_set_output(struct as3645a *flash, bool strobe)
{
enum as_mode mode;
bool on;
switch (flash->led_mode) {
case V4L2_FLASH_LED_MODE_NONE:
on = flash->indicator_current != 0;
mode = AS_MODE_INDICATOR;
break;
case V4L2_FLASH_LED_MODE_TORCH:
on = true;
mode = AS_MODE_ASSIST;
break;
case V4L2_FLASH_LED_MODE_FLASH:
on = strobe;
mode = AS_MODE_FLASH;
break;
default:
BUG();
}
/* Configure output parameters and operation mode. */
return as3645a_set_control(flash, mode, on);
}
/* -----------------------------------------------------------------------------
* V4L2 controls
*/
static int as3645a_is_active(struct as3645a *flash)
{
int ret;
ret = as3645a_read(flash, AS_CONTROL_REG);
return ret < 0 ? ret : !!(ret & AS_CONTROL_OUT_ON);
}
static int as3645a_read_fault(struct as3645a *flash)
{
struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev);
int rval;
/* NOTE: reading register clear fault status */
rval = as3645a_read(flash, AS_FAULT_INFO_REG);
if (rval < 0)
return rval;
if (rval & AS_FAULT_INFO_INDUCTOR_PEAK_LIMIT)
dev_dbg(&client->dev, "Inductor Peak limit fault\n");
if (rval & AS_FAULT_INFO_INDICATOR_LED)
dev_dbg(&client->dev, "Indicator LED fault: "
"Short circuit or open loop\n");
dev_dbg(&client->dev, "%u connected LEDs\n",
rval & AS_FAULT_INFO_LED_AMOUNT ? 2 : 1);
if (rval & AS_FAULT_INFO_TIMEOUT)
dev_dbg(&client->dev, "Timeout fault\n");
if (rval & AS_FAULT_INFO_OVER_TEMPERATURE)
dev_dbg(&client->dev, "Over temperature fault\n");
if (rval & AS_FAULT_INFO_SHORT_CIRCUIT)
dev_dbg(&client->dev, "Short circuit fault\n");
if (rval & AS_FAULT_INFO_OVER_VOLTAGE)
dev_dbg(&client->dev, "Over voltage fault: "
"Indicates missing capacitor or open connection\n");
return rval;
}
static int as3645a_get_ctrl(struct v4l2_ctrl *ctrl)
{
struct as3645a *flash =
container_of(ctrl->handler, struct as3645a, ctrls);
struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev);
int value;
switch (ctrl->id) {
case V4L2_CID_FLASH_FAULT:
value = as3645a_read_fault(flash);
if (value < 0)
return value;
ctrl->cur.val = 0;
if (value & AS_FAULT_INFO_SHORT_CIRCUIT)
ctrl->cur.val |= V4L2_FLASH_FAULT_SHORT_CIRCUIT;
if (value & AS_FAULT_INFO_OVER_TEMPERATURE)
ctrl->cur.val |= V4L2_FLASH_FAULT_OVER_TEMPERATURE;
if (value & AS_FAULT_INFO_TIMEOUT)
ctrl->cur.val |= V4L2_FLASH_FAULT_TIMEOUT;
if (value & AS_FAULT_INFO_OVER_VOLTAGE)
ctrl->cur.val |= V4L2_FLASH_FAULT_OVER_VOLTAGE;
if (value & AS_FAULT_INFO_INDUCTOR_PEAK_LIMIT)
ctrl->cur.val |= V4L2_FLASH_FAULT_OVER_CURRENT;
if (value & AS_FAULT_INFO_INDICATOR_LED)
ctrl->cur.val |= V4L2_FLASH_FAULT_INDICATOR;
break;
case V4L2_CID_FLASH_STROBE_STATUS:
if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH) {
ctrl->cur.val = 0;
break;
}
value = as3645a_is_active(flash);
if (value < 0)
return value;
ctrl->cur.val = value;
break;
}
dev_dbg(&client->dev, "G_CTRL %08x:%d\n", ctrl->id, ctrl->cur.val);
return 0;
}
static int as3645a_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct as3645a *flash =
container_of(ctrl->handler, struct as3645a, ctrls);
struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev);
int ret;
dev_dbg(&client->dev, "S_CTRL %08x:%d\n", ctrl->id, ctrl->val);
/* If a control that doesn't apply to the current mode is modified,
* we store the value and return immediately. The setting will be
* applied when the LED mode is changed. Otherwise we apply the setting
* immediately.
*/
switch (ctrl->id) {
case V4L2_CID_FLASH_LED_MODE:
if (flash->indicator_current)
return -EBUSY;
ret = as3645a_set_config(flash);
if (ret < 0)
return ret;
flash->led_mode = ctrl->val;
return as3645a_set_output(flash, false);
case V4L2_CID_FLASH_STROBE_SOURCE:
flash->strobe_source = ctrl->val;
/* Applies to flash mode only. */
if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH)
break;
return as3645a_set_output(flash, false);
case V4L2_CID_FLASH_STROBE:
if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH)
return -EBUSY;
return as3645a_set_output(flash, true);
case V4L2_CID_FLASH_STROBE_STOP:
if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH)
return -EBUSY;
return as3645a_set_output(flash, false);
case V4L2_CID_FLASH_TIMEOUT:
flash->timeout = ctrl->val;
/* Applies to flash mode only. */
if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH)
break;
return as3645a_set_config(flash);
case V4L2_CID_FLASH_INTENSITY:
flash->flash_current = (ctrl->val - AS3645A_FLASH_INTENSITY_MIN)
/ AS3645A_FLASH_INTENSITY_STEP;
/* Applies to flash mode only. */
if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH)
break;
return as3645a_set_config(flash);
case V4L2_CID_FLASH_TORCH_INTENSITY:
flash->assist_current =
(ctrl->val - AS3645A_TORCH_INTENSITY_MIN)
/ AS3645A_TORCH_INTENSITY_STEP;
/* Applies to torch mode only. */
if (flash->led_mode != V4L2_FLASH_LED_MODE_TORCH)
break;
return as3645a_set_config(flash);
case V4L2_CID_FLASH_INDICATOR_INTENSITY:
if (flash->led_mode != V4L2_FLASH_LED_MODE_NONE)
return -EBUSY;
flash->indicator_current =
(ctrl->val - AS3645A_INDICATOR_INTENSITY_MIN)
/ AS3645A_INDICATOR_INTENSITY_STEP;
ret = as3645a_set_config(flash);
if (ret < 0)
return ret;
if ((ctrl->val == 0) == (ctrl->cur.val == 0))
break;
return as3645a_set_output(flash, false);
}
return 0;
}
static const struct v4l2_ctrl_ops as3645a_ctrl_ops = {
.g_volatile_ctrl = as3645a_get_ctrl,
.s_ctrl = as3645a_set_ctrl,
};
/* -----------------------------------------------------------------------------
* V4L2 subdev core operations
*/
/* Put device into know state. */
static int as3645a_setup(struct as3645a *flash)
{
struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev);
int ret;
/* clear errors */
ret = as3645a_read(flash, AS_FAULT_INFO_REG);
if (ret < 0)
return ret;
dev_dbg(&client->dev, "Fault info: %02x\n", ret);
ret = as3645a_set_config(flash);
if (ret < 0)
return ret;
ret = as3645a_set_output(flash, false);
if (ret < 0)
return ret;
/* read status */
ret = as3645a_read_fault(flash);
if (ret < 0)
return ret;
dev_dbg(&client->dev, "AS_INDICATOR_AND_TIMER_REG: %02x\n",
as3645a_read(flash, AS_INDICATOR_AND_TIMER_REG));
dev_dbg(&client->dev, "AS_CURRENT_SET_REG: %02x\n",
as3645a_read(flash, AS_CURRENT_SET_REG));
dev_dbg(&client->dev, "AS_CONTROL_REG: %02x\n",
as3645a_read(flash, AS_CONTROL_REG));
return ret & ~AS_FAULT_INFO_LED_AMOUNT ? -EIO : 0;
}
static int __as3645a_set_power(struct as3645a *flash, int on)
{
int ret;
if (!on)
as3645a_set_control(flash, AS_MODE_EXT_TORCH, false);
if (flash->pdata->set_power) {
ret = flash->pdata->set_power(&flash->subdev, on);
if (ret < 0)
return ret;
}
if (!on)
return 0;
ret = as3645a_setup(flash);
if (ret < 0) {
if (flash->pdata->set_power)
flash->pdata->set_power(&flash->subdev, 0);
}
return ret;
}
static int as3645a_set_power(struct v4l2_subdev *sd, int on)
{
struct as3645a *flash = to_as3645a(sd);
int ret = 0;
mutex_lock(&flash->power_lock);
if (flash->power_count == !on) {
ret = __as3645a_set_power(flash, !!on);
if (ret < 0)
goto done;
}
flash->power_count += on ? 1 : -1;
WARN_ON(flash->power_count < 0);
done:
mutex_unlock(&flash->power_lock);
return ret;
}
static int as3645a_registered(struct v4l2_subdev *sd)
{
struct as3645a *flash = to_as3645a(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
int rval, man, model, rfu, version;
const char *vendor;
/* Power up the flash driver and read manufacturer ID, model ID, RFU
* and version.
*/
rval = as3645a_set_power(&flash->subdev, 1);
if (rval < 0)
return rval;
rval = as3645a_read(flash, AS_DESIGN_INFO_REG);
if (rval < 0)
goto power_off;
man = AS_DESIGN_INFO_FACTORY(rval);
model = AS_DESIGN_INFO_MODEL(rval);
rval = as3645a_read(flash, AS_VERSION_CONTROL_REG);
if (rval < 0)
goto power_off;
rfu = AS_VERSION_CONTROL_RFU(rval);
version = AS_VERSION_CONTROL_VERSION(rval);
/* Verify the chip model and version. */
if (model != 0x01 || rfu != 0x00) {
dev_err(&client->dev, "AS3645A not detected "
"(model %d rfu %d)\n", model, rfu);
rval = -ENODEV;
goto power_off;
}
switch (man) {
case 1:
vendor = "AMS, Austria Micro Systems";
break;
case 2:
vendor = "ADI, Analog Devices Inc.";
break;
case 3:
vendor = "NSC, National Semiconductor";
break;
case 4:
vendor = "NXP";
break;
case 5:
vendor = "TI, Texas Instrument";
break;
default:
vendor = "Unknown";
}
dev_info(&client->dev, "Chip vendor: %s (%d) Version: %d\n", vendor,
man, version);
rval = as3645a_write(flash, AS_PASSWORD_REG, AS_PASSWORD_UNLOCK_VALUE);
if (rval < 0)
goto power_off;
rval = as3645a_write(flash, AS_BOOST_REG, AS_BOOST_CURRENT_DISABLE);
if (rval < 0)
goto power_off;
/* Setup default values. This makes sure that the chip is in a known
* state, in case the power rail can't be controlled.
*/
rval = as3645a_setup(flash);
power_off:
as3645a_set_power(&flash->subdev, 0);
return rval;
}
static int as3645a_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
return as3645a_set_power(sd, 1);
}
static int as3645a_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
return as3645a_set_power(sd, 0);
}
static const struct v4l2_subdev_core_ops as3645a_core_ops = {
.s_power = as3645a_set_power,
};
static const struct v4l2_subdev_ops as3645a_ops = {
.core = &as3645a_core_ops,
};
static const struct v4l2_subdev_internal_ops as3645a_internal_ops = {
.registered = as3645a_registered,
.open = as3645a_open,
.close = as3645a_close,
};
/* -----------------------------------------------------------------------------
* I2C driver
*/
#ifdef CONFIG_PM
static int as3645a_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct as3645a *flash = to_as3645a(subdev);
int rval;
if (flash->power_count == 0)
return 0;
rval = __as3645a_set_power(flash, 0);
dev_dbg(&client->dev, "Suspend %s\n", rval < 0 ? "failed" : "ok");
return rval;
}
static int as3645a_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct as3645a *flash = to_as3645a(subdev);
int rval;
if (flash->power_count == 0)
return 0;
rval = __as3645a_set_power(flash, 1);
dev_dbg(&client->dev, "Resume %s\n", rval < 0 ? "fail" : "ok");
return rval;
}
#else
#define as3645a_suspend NULL
#define as3645a_resume NULL
#endif /* CONFIG_PM */
/*
* as3645a_init_controls - Create controls
* @flash: The flash
*
* The number of LEDs reported in platform data is used to compute default
* limits. Parameters passed through platform data can override those limits.
*/
static int as3645a_init_controls(struct as3645a *flash)
{
const struct as3645a_platform_data *pdata = flash->pdata;
struct v4l2_ctrl *ctrl;
int maximum;
v4l2_ctrl_handler_init(&flash->ctrls, 10);
/* V4L2_CID_FLASH_LED_MODE */
v4l2_ctrl_new_std_menu(&flash->ctrls, &as3645a_ctrl_ops,
V4L2_CID_FLASH_LED_MODE, 2, ~7,
V4L2_FLASH_LED_MODE_NONE);
/* V4L2_CID_FLASH_STROBE_SOURCE */
v4l2_ctrl_new_std_menu(&flash->ctrls, &as3645a_ctrl_ops,
V4L2_CID_FLASH_STROBE_SOURCE,
pdata->ext_strobe ? 1 : 0,
pdata->ext_strobe ? ~3 : ~1,
V4L2_FLASH_STROBE_SOURCE_SOFTWARE);
flash->strobe_source = V4L2_FLASH_STROBE_SOURCE_SOFTWARE;
/* V4L2_CID_FLASH_STROBE */
v4l2_ctrl_new_std(&flash->ctrls, &as3645a_ctrl_ops,
V4L2_CID_FLASH_STROBE, 0, 0, 0, 0);
/* V4L2_CID_FLASH_STROBE_STOP */
v4l2_ctrl_new_std(&flash->ctrls, &as3645a_ctrl_ops,
V4L2_CID_FLASH_STROBE_STOP, 0, 0, 0, 0);
/* V4L2_CID_FLASH_STROBE_STATUS */
ctrl = v4l2_ctrl_new_std(&flash->ctrls, &as3645a_ctrl_ops,
V4L2_CID_FLASH_STROBE_STATUS, 0, 1, 1, 1);
if (ctrl != NULL)
ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
/* V4L2_CID_FLASH_TIMEOUT */
maximum = pdata->timeout_max;
v4l2_ctrl_new_std(&flash->ctrls, &as3645a_ctrl_ops,
V4L2_CID_FLASH_TIMEOUT, AS3645A_FLASH_TIMEOUT_MIN,
maximum, AS3645A_FLASH_TIMEOUT_STEP, maximum);
flash->timeout = maximum;
/* V4L2_CID_FLASH_INTENSITY */
maximum = pdata->flash_max_current;
v4l2_ctrl_new_std(&flash->ctrls, &as3645a_ctrl_ops,
V4L2_CID_FLASH_INTENSITY, AS3645A_FLASH_INTENSITY_MIN,
maximum, AS3645A_FLASH_INTENSITY_STEP, maximum);
flash->flash_current = (maximum - AS3645A_FLASH_INTENSITY_MIN)
/ AS3645A_FLASH_INTENSITY_STEP;
/* V4L2_CID_FLASH_TORCH_INTENSITY */
maximum = pdata->torch_max_current;
v4l2_ctrl_new_std(&flash->ctrls, &as3645a_ctrl_ops,
V4L2_CID_FLASH_TORCH_INTENSITY,
AS3645A_TORCH_INTENSITY_MIN, maximum,
AS3645A_TORCH_INTENSITY_STEP,
AS3645A_TORCH_INTENSITY_MIN);
flash->assist_current = 0;
/* V4L2_CID_FLASH_INDICATOR_INTENSITY */
v4l2_ctrl_new_std(&flash->ctrls, &as3645a_ctrl_ops,
V4L2_CID_FLASH_INDICATOR_INTENSITY,
AS3645A_INDICATOR_INTENSITY_MIN,
AS3645A_INDICATOR_INTENSITY_MAX,
AS3645A_INDICATOR_INTENSITY_STEP,
AS3645A_INDICATOR_INTENSITY_MIN);
flash->indicator_current = 0;
/* V4L2_CID_FLASH_FAULT */
ctrl = v4l2_ctrl_new_std(&flash->ctrls, &as3645a_ctrl_ops,
V4L2_CID_FLASH_FAULT, 0,
V4L2_FLASH_FAULT_OVER_VOLTAGE |
V4L2_FLASH_FAULT_TIMEOUT |
V4L2_FLASH_FAULT_OVER_TEMPERATURE |
V4L2_FLASH_FAULT_SHORT_CIRCUIT, 0, 0);
if (ctrl != NULL)
ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
flash->subdev.ctrl_handler = &flash->ctrls;
return flash->ctrls.error;
}
static int as3645a_probe(struct i2c_client *client,
const struct i2c_device_id *devid)
{
struct as3645a *flash;
int ret;
if (client->dev.platform_data == NULL)
return -ENODEV;
flash = devm_kzalloc(&client->dev, sizeof(*flash), GFP_KERNEL);
if (flash == NULL)
return -ENOMEM;
flash->pdata = client->dev.platform_data;
v4l2_i2c_subdev_init(&flash->subdev, client, &as3645a_ops);
flash->subdev.internal_ops = &as3645a_internal_ops;
flash->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
ret = as3645a_init_controls(flash);
if (ret < 0)
goto done;
ret = media_entity_init(&flash->subdev.entity, 0, NULL, 0);
if (ret < 0)
goto done;
flash->subdev.entity.type = MEDIA_ENT_T_V4L2_SUBDEV_FLASH;
mutex_init(&flash->power_lock);
flash->led_mode = V4L2_FLASH_LED_MODE_NONE;
done:
if (ret < 0)
v4l2_ctrl_handler_free(&flash->ctrls);
return ret;
}
static int as3645a_remove(struct i2c_client *client)
{
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct as3645a *flash = to_as3645a(subdev);
v4l2_device_unregister_subdev(subdev);
v4l2_ctrl_handler_free(&flash->ctrls);
media_entity_cleanup(&flash->subdev.entity);
mutex_destroy(&flash->power_lock);
return 0;
}
static const struct i2c_device_id as3645a_id_table[] = {
{ AS3645A_NAME, 0 },
{ },
};
MODULE_DEVICE_TABLE(i2c, as3645a_id_table);
static const struct dev_pm_ops as3645a_pm_ops = {
.suspend = as3645a_suspend,
.resume = as3645a_resume,
};
static struct i2c_driver as3645a_i2c_driver = {
.driver = {
.name = AS3645A_NAME,
.pm = &as3645a_pm_ops,
},
.probe = as3645a_probe,
.remove = as3645a_remove,
.id_table = as3645a_id_table,
};
module_i2c_driver(as3645a_i2c_driver);
MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
MODULE_DESCRIPTION("LED flash driver for AS3645A, LM3555 and their clones");
MODULE_LICENSE("GPL");
| gpl-2.0 |
wrxtasy/linux | drivers/media/i2c/as3645a.c | 2018 | 23456 | /*
* drivers/media/i2c/as3645a.c - AS3645A and LM3555 flash controllers driver
*
* Copyright (C) 2008-2011 Nokia Corporation
* Copyright (c) 2011, Intel Corporation.
*
* Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*
* TODO:
* - Check hardware FSTROBE control when sensor driver add support for this
*
*/
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <media/as3645a.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#define AS_TIMER_MS_TO_CODE(t) (((t) - 100) / 50)
#define AS_TIMER_CODE_TO_MS(c) (50 * (c) + 100)
/* Register definitions */
/* Read-only Design info register: Reset state: xxxx 0001 */
#define AS_DESIGN_INFO_REG 0x00
#define AS_DESIGN_INFO_FACTORY(x) (((x) >> 4))
#define AS_DESIGN_INFO_MODEL(x) ((x) & 0x0f)
/* Read-only Version control register: Reset state: 0000 0000
* for first engineering samples
*/
#define AS_VERSION_CONTROL_REG 0x01
#define AS_VERSION_CONTROL_RFU(x) (((x) >> 4))
#define AS_VERSION_CONTROL_VERSION(x) ((x) & 0x0f)
/* Read / Write (Indicator and timer register): Reset state: 0000 1111 */
#define AS_INDICATOR_AND_TIMER_REG 0x02
#define AS_INDICATOR_AND_TIMER_TIMEOUT_SHIFT 0
#define AS_INDICATOR_AND_TIMER_VREF_SHIFT 4
#define AS_INDICATOR_AND_TIMER_INDICATOR_SHIFT 6
/* Read / Write (Current set register): Reset state: 0110 1001 */
#define AS_CURRENT_SET_REG 0x03
#define AS_CURRENT_ASSIST_LIGHT_SHIFT 0
#define AS_CURRENT_LED_DET_ON (1 << 3)
#define AS_CURRENT_FLASH_CURRENT_SHIFT 4
/* Read / Write (Control register): Reset state: 1011 0100 */
#define AS_CONTROL_REG 0x04
#define AS_CONTROL_MODE_SETTING_SHIFT 0
#define AS_CONTROL_STROBE_ON (1 << 2)
#define AS_CONTROL_OUT_ON (1 << 3)
#define AS_CONTROL_EXT_TORCH_ON (1 << 4)
#define AS_CONTROL_STROBE_TYPE_EDGE (0 << 5)
#define AS_CONTROL_STROBE_TYPE_LEVEL (1 << 5)
#define AS_CONTROL_COIL_PEAK_SHIFT 6
/* Read only (D3 is read / write) (Fault and info): Reset state: 0000 x000 */
#define AS_FAULT_INFO_REG 0x05
#define AS_FAULT_INFO_INDUCTOR_PEAK_LIMIT (1 << 1)
#define AS_FAULT_INFO_INDICATOR_LED (1 << 2)
#define AS_FAULT_INFO_LED_AMOUNT (1 << 3)
#define AS_FAULT_INFO_TIMEOUT (1 << 4)
#define AS_FAULT_INFO_OVER_TEMPERATURE (1 << 5)
#define AS_FAULT_INFO_SHORT_CIRCUIT (1 << 6)
#define AS_FAULT_INFO_OVER_VOLTAGE (1 << 7)
/* Boost register */
#define AS_BOOST_REG 0x0d
#define AS_BOOST_CURRENT_DISABLE (0 << 0)
#define AS_BOOST_CURRENT_ENABLE (1 << 0)
/* Password register is used to unlock boost register writing */
#define AS_PASSWORD_REG 0x0f
#define AS_PASSWORD_UNLOCK_VALUE 0x55
enum as_mode {
AS_MODE_EXT_TORCH = 0 << AS_CONTROL_MODE_SETTING_SHIFT,
AS_MODE_INDICATOR = 1 << AS_CONTROL_MODE_SETTING_SHIFT,
AS_MODE_ASSIST = 2 << AS_CONTROL_MODE_SETTING_SHIFT,
AS_MODE_FLASH = 3 << AS_CONTROL_MODE_SETTING_SHIFT,
};
/*
* struct as3645a
*
* @subdev: V4L2 subdev
* @pdata: Flash platform data
* @power_lock: Protects power_count
* @power_count: Power reference count
* @led_mode: V4L2 flash LED mode
* @timeout: Flash timeout in microseconds
* @flash_current: Flash current (0=200mA ... 15=500mA). Maximum
* values are 400mA for two LEDs and 500mA for one LED.
* @assist_current: Torch/Assist light current (0=20mA, 1=40mA ... 7=160mA)
* @indicator_current: Indicator LED current (0=0mA, 1=2.5mA ... 4=10mA)
* @strobe_source: Flash strobe source (software or external)
*/
struct as3645a {
struct v4l2_subdev subdev;
const struct as3645a_platform_data *pdata;
struct mutex power_lock;
int power_count;
/* Controls */
struct v4l2_ctrl_handler ctrls;
enum v4l2_flash_led_mode led_mode;
unsigned int timeout;
u8 flash_current;
u8 assist_current;
u8 indicator_current;
enum v4l2_flash_strobe_source strobe_source;
};
#define to_as3645a(sd) container_of(sd, struct as3645a, subdev)
/* Return negative errno else zero on success */
static int as3645a_write(struct as3645a *flash, u8 addr, u8 val)
{
struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev);
int rval;
rval = i2c_smbus_write_byte_data(client, addr, val);
dev_dbg(&client->dev, "Write Addr:%02X Val:%02X %s\n", addr, val,
rval < 0 ? "fail" : "ok");
return rval;
}
/* Return negative errno else a data byte received from the device. */
static int as3645a_read(struct as3645a *flash, u8 addr)
{
struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev);
int rval;
rval = i2c_smbus_read_byte_data(client, addr);
dev_dbg(&client->dev, "Read Addr:%02X Val:%02X %s\n", addr, rval,
rval < 0 ? "fail" : "ok");
return rval;
}
/* -----------------------------------------------------------------------------
* Hardware configuration and trigger
*/
/*
* as3645a_set_config - Set flash configuration registers
* @flash: The flash
*
* Configure the hardware with flash, assist and indicator currents, as well as
* flash timeout.
*
* Return 0 on success, or a negative error code if an I2C communication error
* occurred.
*/
static int as3645a_set_config(struct as3645a *flash)
{
int ret;
u8 val;
val = (flash->flash_current << AS_CURRENT_FLASH_CURRENT_SHIFT)
| (flash->assist_current << AS_CURRENT_ASSIST_LIGHT_SHIFT)
| AS_CURRENT_LED_DET_ON;
ret = as3645a_write(flash, AS_CURRENT_SET_REG, val);
if (ret < 0)
return ret;
val = AS_TIMER_MS_TO_CODE(flash->timeout / 1000)
<< AS_INDICATOR_AND_TIMER_TIMEOUT_SHIFT;
val |= (flash->pdata->vref << AS_INDICATOR_AND_TIMER_VREF_SHIFT)
| ((flash->indicator_current ? flash->indicator_current - 1 : 0)
<< AS_INDICATOR_AND_TIMER_INDICATOR_SHIFT);
return as3645a_write(flash, AS_INDICATOR_AND_TIMER_REG, val);
}
/*
* as3645a_set_control - Set flash control register
* @flash: The flash
* @mode: Desired output mode
* @on: Desired output state
*
* Configure the hardware with output mode and state.
*
* Return 0 on success, or a negative error code if an I2C communication error
* occurred.
*/
static int
as3645a_set_control(struct as3645a *flash, enum as_mode mode, bool on)
{
u8 reg;
/* Configure output parameters and operation mode. */
reg = (flash->pdata->peak << AS_CONTROL_COIL_PEAK_SHIFT)
| (on ? AS_CONTROL_OUT_ON : 0)
| mode;
if (flash->led_mode == V4L2_FLASH_LED_MODE_FLASH &&
flash->strobe_source == V4L2_FLASH_STROBE_SOURCE_EXTERNAL) {
reg |= AS_CONTROL_STROBE_TYPE_LEVEL
| AS_CONTROL_STROBE_ON;
}
return as3645a_write(flash, AS_CONTROL_REG, reg);
}
/*
* as3645a_set_output - Configure output and operation mode
* @flash: Flash controller
* @strobe: Strobe the flash (only valid in flash mode)
*
* Turn the LEDs output on/off and set the operation mode based on the current
* parameters.
*
* The AS3645A can't control the indicator LED independently of the flash/torch
* LED. If the flash controller is in V4L2_FLASH_LED_MODE_NONE mode, set the
* chip to indicator mode. Otherwise set it to assist light (torch) or flash
* mode.
*
* In indicator and assist modes, turn the output on/off based on the indicator
* and torch currents. In software strobe flash mode, turn the output on/off
* based on the strobe parameter.
*/
static int as3645a_set_output(struct as3645a *flash, bool strobe)
{
enum as_mode mode;
bool on;
switch (flash->led_mode) {
case V4L2_FLASH_LED_MODE_NONE:
on = flash->indicator_current != 0;
mode = AS_MODE_INDICATOR;
break;
case V4L2_FLASH_LED_MODE_TORCH:
on = true;
mode = AS_MODE_ASSIST;
break;
case V4L2_FLASH_LED_MODE_FLASH:
on = strobe;
mode = AS_MODE_FLASH;
break;
default:
BUG();
}
/* Configure output parameters and operation mode. */
return as3645a_set_control(flash, mode, on);
}
/* -----------------------------------------------------------------------------
* V4L2 controls
*/
static int as3645a_is_active(struct as3645a *flash)
{
int ret;
ret = as3645a_read(flash, AS_CONTROL_REG);
return ret < 0 ? ret : !!(ret & AS_CONTROL_OUT_ON);
}
static int as3645a_read_fault(struct as3645a *flash)
{
struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev);
int rval;
/* NOTE: reading register clear fault status */
rval = as3645a_read(flash, AS_FAULT_INFO_REG);
if (rval < 0)
return rval;
if (rval & AS_FAULT_INFO_INDUCTOR_PEAK_LIMIT)
dev_dbg(&client->dev, "Inductor Peak limit fault\n");
if (rval & AS_FAULT_INFO_INDICATOR_LED)
dev_dbg(&client->dev, "Indicator LED fault: "
"Short circuit or open loop\n");
dev_dbg(&client->dev, "%u connected LEDs\n",
rval & AS_FAULT_INFO_LED_AMOUNT ? 2 : 1);
if (rval & AS_FAULT_INFO_TIMEOUT)
dev_dbg(&client->dev, "Timeout fault\n");
if (rval & AS_FAULT_INFO_OVER_TEMPERATURE)
dev_dbg(&client->dev, "Over temperature fault\n");
if (rval & AS_FAULT_INFO_SHORT_CIRCUIT)
dev_dbg(&client->dev, "Short circuit fault\n");
if (rval & AS_FAULT_INFO_OVER_VOLTAGE)
dev_dbg(&client->dev, "Over voltage fault: "
"Indicates missing capacitor or open connection\n");
return rval;
}
static int as3645a_get_ctrl(struct v4l2_ctrl *ctrl)
{
struct as3645a *flash =
container_of(ctrl->handler, struct as3645a, ctrls);
struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev);
int value;
switch (ctrl->id) {
case V4L2_CID_FLASH_FAULT:
value = as3645a_read_fault(flash);
if (value < 0)
return value;
ctrl->cur.val = 0;
if (value & AS_FAULT_INFO_SHORT_CIRCUIT)
ctrl->cur.val |= V4L2_FLASH_FAULT_SHORT_CIRCUIT;
if (value & AS_FAULT_INFO_OVER_TEMPERATURE)
ctrl->cur.val |= V4L2_FLASH_FAULT_OVER_TEMPERATURE;
if (value & AS_FAULT_INFO_TIMEOUT)
ctrl->cur.val |= V4L2_FLASH_FAULT_TIMEOUT;
if (value & AS_FAULT_INFO_OVER_VOLTAGE)
ctrl->cur.val |= V4L2_FLASH_FAULT_OVER_VOLTAGE;
if (value & AS_FAULT_INFO_INDUCTOR_PEAK_LIMIT)
ctrl->cur.val |= V4L2_FLASH_FAULT_OVER_CURRENT;
if (value & AS_FAULT_INFO_INDICATOR_LED)
ctrl->cur.val |= V4L2_FLASH_FAULT_INDICATOR;
break;
case V4L2_CID_FLASH_STROBE_STATUS:
if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH) {
ctrl->cur.val = 0;
break;
}
value = as3645a_is_active(flash);
if (value < 0)
return value;
ctrl->cur.val = value;
break;
}
dev_dbg(&client->dev, "G_CTRL %08x:%d\n", ctrl->id, ctrl->cur.val);
return 0;
}
static int as3645a_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct as3645a *flash =
container_of(ctrl->handler, struct as3645a, ctrls);
struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev);
int ret;
dev_dbg(&client->dev, "S_CTRL %08x:%d\n", ctrl->id, ctrl->val);
/* If a control that doesn't apply to the current mode is modified,
* we store the value and return immediately. The setting will be
* applied when the LED mode is changed. Otherwise we apply the setting
* immediately.
*/
switch (ctrl->id) {
case V4L2_CID_FLASH_LED_MODE:
if (flash->indicator_current)
return -EBUSY;
ret = as3645a_set_config(flash);
if (ret < 0)
return ret;
flash->led_mode = ctrl->val;
return as3645a_set_output(flash, false);
case V4L2_CID_FLASH_STROBE_SOURCE:
flash->strobe_source = ctrl->val;
/* Applies to flash mode only. */
if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH)
break;
return as3645a_set_output(flash, false);
case V4L2_CID_FLASH_STROBE:
if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH)
return -EBUSY;
return as3645a_set_output(flash, true);
case V4L2_CID_FLASH_STROBE_STOP:
if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH)
return -EBUSY;
return as3645a_set_output(flash, false);
case V4L2_CID_FLASH_TIMEOUT:
flash->timeout = ctrl->val;
/* Applies to flash mode only. */
if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH)
break;
return as3645a_set_config(flash);
case V4L2_CID_FLASH_INTENSITY:
flash->flash_current = (ctrl->val - AS3645A_FLASH_INTENSITY_MIN)
/ AS3645A_FLASH_INTENSITY_STEP;
/* Applies to flash mode only. */
if (flash->led_mode != V4L2_FLASH_LED_MODE_FLASH)
break;
return as3645a_set_config(flash);
case V4L2_CID_FLASH_TORCH_INTENSITY:
flash->assist_current =
(ctrl->val - AS3645A_TORCH_INTENSITY_MIN)
/ AS3645A_TORCH_INTENSITY_STEP;
/* Applies to torch mode only. */
if (flash->led_mode != V4L2_FLASH_LED_MODE_TORCH)
break;
return as3645a_set_config(flash);
case V4L2_CID_FLASH_INDICATOR_INTENSITY:
if (flash->led_mode != V4L2_FLASH_LED_MODE_NONE)
return -EBUSY;
flash->indicator_current =
(ctrl->val - AS3645A_INDICATOR_INTENSITY_MIN)
/ AS3645A_INDICATOR_INTENSITY_STEP;
ret = as3645a_set_config(flash);
if (ret < 0)
return ret;
if ((ctrl->val == 0) == (ctrl->cur.val == 0))
break;
return as3645a_set_output(flash, false);
}
return 0;
}
static const struct v4l2_ctrl_ops as3645a_ctrl_ops = {
.g_volatile_ctrl = as3645a_get_ctrl,
.s_ctrl = as3645a_set_ctrl,
};
/* -----------------------------------------------------------------------------
* V4L2 subdev core operations
*/
/* Put device into know state. */
static int as3645a_setup(struct as3645a *flash)
{
struct i2c_client *client = v4l2_get_subdevdata(&flash->subdev);
int ret;
/* clear errors */
ret = as3645a_read(flash, AS_FAULT_INFO_REG);
if (ret < 0)
return ret;
dev_dbg(&client->dev, "Fault info: %02x\n", ret);
ret = as3645a_set_config(flash);
if (ret < 0)
return ret;
ret = as3645a_set_output(flash, false);
if (ret < 0)
return ret;
/* read status */
ret = as3645a_read_fault(flash);
if (ret < 0)
return ret;
dev_dbg(&client->dev, "AS_INDICATOR_AND_TIMER_REG: %02x\n",
as3645a_read(flash, AS_INDICATOR_AND_TIMER_REG));
dev_dbg(&client->dev, "AS_CURRENT_SET_REG: %02x\n",
as3645a_read(flash, AS_CURRENT_SET_REG));
dev_dbg(&client->dev, "AS_CONTROL_REG: %02x\n",
as3645a_read(flash, AS_CONTROL_REG));
return ret & ~AS_FAULT_INFO_LED_AMOUNT ? -EIO : 0;
}
static int __as3645a_set_power(struct as3645a *flash, int on)
{
int ret;
if (!on)
as3645a_set_control(flash, AS_MODE_EXT_TORCH, false);
if (flash->pdata->set_power) {
ret = flash->pdata->set_power(&flash->subdev, on);
if (ret < 0)
return ret;
}
if (!on)
return 0;
ret = as3645a_setup(flash);
if (ret < 0) {
if (flash->pdata->set_power)
flash->pdata->set_power(&flash->subdev, 0);
}
return ret;
}
static int as3645a_set_power(struct v4l2_subdev *sd, int on)
{
struct as3645a *flash = to_as3645a(sd);
int ret = 0;
mutex_lock(&flash->power_lock);
if (flash->power_count == !on) {
ret = __as3645a_set_power(flash, !!on);
if (ret < 0)
goto done;
}
flash->power_count += on ? 1 : -1;
WARN_ON(flash->power_count < 0);
done:
mutex_unlock(&flash->power_lock);
return ret;
}
static int as3645a_registered(struct v4l2_subdev *sd)
{
struct as3645a *flash = to_as3645a(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
int rval, man, model, rfu, version;
const char *vendor;
/* Power up the flash driver and read manufacturer ID, model ID, RFU
* and version.
*/
rval = as3645a_set_power(&flash->subdev, 1);
if (rval < 0)
return rval;
rval = as3645a_read(flash, AS_DESIGN_INFO_REG);
if (rval < 0)
goto power_off;
man = AS_DESIGN_INFO_FACTORY(rval);
model = AS_DESIGN_INFO_MODEL(rval);
rval = as3645a_read(flash, AS_VERSION_CONTROL_REG);
if (rval < 0)
goto power_off;
rfu = AS_VERSION_CONTROL_RFU(rval);
version = AS_VERSION_CONTROL_VERSION(rval);
/* Verify the chip model and version. */
if (model != 0x01 || rfu != 0x00) {
dev_err(&client->dev, "AS3645A not detected "
"(model %d rfu %d)\n", model, rfu);
rval = -ENODEV;
goto power_off;
}
switch (man) {
case 1:
vendor = "AMS, Austria Micro Systems";
break;
case 2:
vendor = "ADI, Analog Devices Inc.";
break;
case 3:
vendor = "NSC, National Semiconductor";
break;
case 4:
vendor = "NXP";
break;
case 5:
vendor = "TI, Texas Instrument";
break;
default:
vendor = "Unknown";
}
dev_info(&client->dev, "Chip vendor: %s (%d) Version: %d\n", vendor,
man, version);
rval = as3645a_write(flash, AS_PASSWORD_REG, AS_PASSWORD_UNLOCK_VALUE);
if (rval < 0)
goto power_off;
rval = as3645a_write(flash, AS_BOOST_REG, AS_BOOST_CURRENT_DISABLE);
if (rval < 0)
goto power_off;
/* Setup default values. This makes sure that the chip is in a known
* state, in case the power rail can't be controlled.
*/
rval = as3645a_setup(flash);
power_off:
as3645a_set_power(&flash->subdev, 0);
return rval;
}
static int as3645a_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
return as3645a_set_power(sd, 1);
}
static int as3645a_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
return as3645a_set_power(sd, 0);
}
static const struct v4l2_subdev_core_ops as3645a_core_ops = {
.s_power = as3645a_set_power,
};
static const struct v4l2_subdev_ops as3645a_ops = {
.core = &as3645a_core_ops,
};
static const struct v4l2_subdev_internal_ops as3645a_internal_ops = {
.registered = as3645a_registered,
.open = as3645a_open,
.close = as3645a_close,
};
/* -----------------------------------------------------------------------------
* I2C driver
*/
#ifdef CONFIG_PM
static int as3645a_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct as3645a *flash = to_as3645a(subdev);
int rval;
if (flash->power_count == 0)
return 0;
rval = __as3645a_set_power(flash, 0);
dev_dbg(&client->dev, "Suspend %s\n", rval < 0 ? "failed" : "ok");
return rval;
}
static int as3645a_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct as3645a *flash = to_as3645a(subdev);
int rval;
if (flash->power_count == 0)
return 0;
rval = __as3645a_set_power(flash, 1);
dev_dbg(&client->dev, "Resume %s\n", rval < 0 ? "fail" : "ok");
return rval;
}
#else
#define as3645a_suspend NULL
#define as3645a_resume NULL
#endif /* CONFIG_PM */
/*
* as3645a_init_controls - Create controls
* @flash: The flash
*
* The number of LEDs reported in platform data is used to compute default
* limits. Parameters passed through platform data can override those limits.
*/
static int as3645a_init_controls(struct as3645a *flash)
{
const struct as3645a_platform_data *pdata = flash->pdata;
struct v4l2_ctrl *ctrl;
int maximum;
v4l2_ctrl_handler_init(&flash->ctrls, 10);
/* V4L2_CID_FLASH_LED_MODE */
v4l2_ctrl_new_std_menu(&flash->ctrls, &as3645a_ctrl_ops,
V4L2_CID_FLASH_LED_MODE, 2, ~7,
V4L2_FLASH_LED_MODE_NONE);
/* V4L2_CID_FLASH_STROBE_SOURCE */
v4l2_ctrl_new_std_menu(&flash->ctrls, &as3645a_ctrl_ops,
V4L2_CID_FLASH_STROBE_SOURCE,
pdata->ext_strobe ? 1 : 0,
pdata->ext_strobe ? ~3 : ~1,
V4L2_FLASH_STROBE_SOURCE_SOFTWARE);
flash->strobe_source = V4L2_FLASH_STROBE_SOURCE_SOFTWARE;
/* V4L2_CID_FLASH_STROBE */
v4l2_ctrl_new_std(&flash->ctrls, &as3645a_ctrl_ops,
V4L2_CID_FLASH_STROBE, 0, 0, 0, 0);
/* V4L2_CID_FLASH_STROBE_STOP */
v4l2_ctrl_new_std(&flash->ctrls, &as3645a_ctrl_ops,
V4L2_CID_FLASH_STROBE_STOP, 0, 0, 0, 0);
/* V4L2_CID_FLASH_STROBE_STATUS */
ctrl = v4l2_ctrl_new_std(&flash->ctrls, &as3645a_ctrl_ops,
V4L2_CID_FLASH_STROBE_STATUS, 0, 1, 1, 1);
if (ctrl != NULL)
ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
/* V4L2_CID_FLASH_TIMEOUT */
maximum = pdata->timeout_max;
v4l2_ctrl_new_std(&flash->ctrls, &as3645a_ctrl_ops,
V4L2_CID_FLASH_TIMEOUT, AS3645A_FLASH_TIMEOUT_MIN,
maximum, AS3645A_FLASH_TIMEOUT_STEP, maximum);
flash->timeout = maximum;
/* V4L2_CID_FLASH_INTENSITY */
maximum = pdata->flash_max_current;
v4l2_ctrl_new_std(&flash->ctrls, &as3645a_ctrl_ops,
V4L2_CID_FLASH_INTENSITY, AS3645A_FLASH_INTENSITY_MIN,
maximum, AS3645A_FLASH_INTENSITY_STEP, maximum);
flash->flash_current = (maximum - AS3645A_FLASH_INTENSITY_MIN)
/ AS3645A_FLASH_INTENSITY_STEP;
/* V4L2_CID_FLASH_TORCH_INTENSITY */
maximum = pdata->torch_max_current;
v4l2_ctrl_new_std(&flash->ctrls, &as3645a_ctrl_ops,
V4L2_CID_FLASH_TORCH_INTENSITY,
AS3645A_TORCH_INTENSITY_MIN, maximum,
AS3645A_TORCH_INTENSITY_STEP,
AS3645A_TORCH_INTENSITY_MIN);
flash->assist_current = 0;
/* V4L2_CID_FLASH_INDICATOR_INTENSITY */
v4l2_ctrl_new_std(&flash->ctrls, &as3645a_ctrl_ops,
V4L2_CID_FLASH_INDICATOR_INTENSITY,
AS3645A_INDICATOR_INTENSITY_MIN,
AS3645A_INDICATOR_INTENSITY_MAX,
AS3645A_INDICATOR_INTENSITY_STEP,
AS3645A_INDICATOR_INTENSITY_MIN);
flash->indicator_current = 0;
/* V4L2_CID_FLASH_FAULT */
ctrl = v4l2_ctrl_new_std(&flash->ctrls, &as3645a_ctrl_ops,
V4L2_CID_FLASH_FAULT, 0,
V4L2_FLASH_FAULT_OVER_VOLTAGE |
V4L2_FLASH_FAULT_TIMEOUT |
V4L2_FLASH_FAULT_OVER_TEMPERATURE |
V4L2_FLASH_FAULT_SHORT_CIRCUIT, 0, 0);
if (ctrl != NULL)
ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
flash->subdev.ctrl_handler = &flash->ctrls;
return flash->ctrls.error;
}
static int as3645a_probe(struct i2c_client *client,
const struct i2c_device_id *devid)
{
struct as3645a *flash;
int ret;
if (client->dev.platform_data == NULL)
return -ENODEV;
flash = devm_kzalloc(&client->dev, sizeof(*flash), GFP_KERNEL);
if (flash == NULL)
return -ENOMEM;
flash->pdata = client->dev.platform_data;
v4l2_i2c_subdev_init(&flash->subdev, client, &as3645a_ops);
flash->subdev.internal_ops = &as3645a_internal_ops;
flash->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
ret = as3645a_init_controls(flash);
if (ret < 0)
goto done;
ret = media_entity_init(&flash->subdev.entity, 0, NULL, 0);
if (ret < 0)
goto done;
flash->subdev.entity.type = MEDIA_ENT_T_V4L2_SUBDEV_FLASH;
mutex_init(&flash->power_lock);
flash->led_mode = V4L2_FLASH_LED_MODE_NONE;
done:
if (ret < 0)
v4l2_ctrl_handler_free(&flash->ctrls);
return ret;
}
static int as3645a_remove(struct i2c_client *client)
{
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct as3645a *flash = to_as3645a(subdev);
v4l2_device_unregister_subdev(subdev);
v4l2_ctrl_handler_free(&flash->ctrls);
media_entity_cleanup(&flash->subdev.entity);
mutex_destroy(&flash->power_lock);
return 0;
}
static const struct i2c_device_id as3645a_id_table[] = {
{ AS3645A_NAME, 0 },
{ },
};
MODULE_DEVICE_TABLE(i2c, as3645a_id_table);
static const struct dev_pm_ops as3645a_pm_ops = {
.suspend = as3645a_suspend,
.resume = as3645a_resume,
};
static struct i2c_driver as3645a_i2c_driver = {
.driver = {
.name = AS3645A_NAME,
.pm = &as3645a_pm_ops,
},
.probe = as3645a_probe,
.remove = as3645a_remove,
.id_table = as3645a_id_table,
};
module_i2c_driver(as3645a_i2c_driver);
MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
MODULE_DESCRIPTION("LED flash driver for AS3645A, LM3555 and their clones");
MODULE_LICENSE("GPL");
| gpl-2.0 |
crowell/gbadev.kernel | drivers/ata/pata_macio.c | 2530 | 40942 | /*
* Libata based driver for Apple "macio" family of PATA controllers
*
* Copyright 2008/2009 Benjamin Herrenschmidt, IBM Corp
* <benh@kernel.crashing.org>
*
* Some bits and pieces from drivers/ide/ppc/pmac.c
*
*/
#undef DEBUG
#undef DEBUG_DMA
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/ata.h>
#include <linux/libata.h>
#include <linux/adb.h>
#include <linux/pmu.h>
#include <linux/scatterlist.h>
#include <linux/of.h>
#include <linux/gfp.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <asm/macio.h>
#include <asm/io.h>
#include <asm/dbdma.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
#include <asm/mediabay.h>
#ifdef DEBUG_DMA
#define dev_dbgdma(dev, format, arg...) \
dev_printk(KERN_DEBUG , dev , format , ## arg)
#else
#define dev_dbgdma(dev, format, arg...) \
({ if (0) dev_printk(KERN_DEBUG, dev, format, ##arg); 0; })
#endif
#define DRV_NAME "pata_macio"
#define DRV_VERSION "0.9"
/* Models of macio ATA controller */
enum {
controller_ohare, /* OHare based */
controller_heathrow, /* Heathrow/Paddington */
controller_kl_ata3, /* KeyLargo ATA-3 */
controller_kl_ata4, /* KeyLargo ATA-4 */
controller_un_ata6, /* UniNorth2 ATA-6 */
controller_k2_ata6, /* K2 ATA-6 */
controller_sh_ata6, /* Shasta ATA-6 */
};
static const char* macio_ata_names[] = {
"OHare ATA", /* OHare based */
"Heathrow ATA", /* Heathrow/Paddington */
"KeyLargo ATA-3", /* KeyLargo ATA-3 (MDMA only) */
"KeyLargo ATA-4", /* KeyLargo ATA-4 (UDMA/66) */
"UniNorth ATA-6", /* UniNorth2 ATA-6 (UDMA/100) */
"K2 ATA-6", /* K2 ATA-6 (UDMA/100) */
"Shasta ATA-6", /* Shasta ATA-6 (UDMA/133) */
};
/*
* Extra registers, both 32-bit little-endian
*/
#define IDE_TIMING_CONFIG 0x200
#define IDE_INTERRUPT 0x300
/* Kauai (U2) ATA has different register setup */
#define IDE_KAUAI_PIO_CONFIG 0x200
#define IDE_KAUAI_ULTRA_CONFIG 0x210
#define IDE_KAUAI_POLL_CONFIG 0x220
/*
* Timing configuration register definitions
*/
/* Number of IDE_SYSCLK_NS ticks, argument is in nanoseconds */
#define SYSCLK_TICKS(t) (((t) + IDE_SYSCLK_NS - 1) / IDE_SYSCLK_NS)
#define SYSCLK_TICKS_66(t) (((t) + IDE_SYSCLK_66_NS - 1) / IDE_SYSCLK_66_NS)
#define IDE_SYSCLK_NS 30 /* 33Mhz cell */
#define IDE_SYSCLK_66_NS 15 /* 66Mhz cell */
/* 133Mhz cell, found in shasta.
* See comments about 100 Mhz Uninorth 2...
* Note that PIO_MASK and MDMA_MASK seem to overlap, that's just
* weird and I don't now why .. at this stage
*/
#define TR_133_PIOREG_PIO_MASK 0xff000fff
#define TR_133_PIOREG_MDMA_MASK 0x00fff800
#define TR_133_UDMAREG_UDMA_MASK 0x0003ffff
#define TR_133_UDMAREG_UDMA_EN 0x00000001
/* 100Mhz cell, found in Uninorth 2 and K2. It appears as a pci device
* (106b/0033) on uninorth or K2 internal PCI bus and it's clock is
* controlled like gem or fw. It appears to be an evolution of keylargo
* ATA4 with a timing register extended to 2x32bits registers (one
* for PIO & MWDMA and one for UDMA, and a similar DBDMA channel.
* It has it's own local feature control register as well.
*
* After scratching my mind over the timing values, at least for PIO
* and MDMA, I think I've figured the format of the timing register,
* though I use pre-calculated tables for UDMA as usual...
*/
#define TR_100_PIO_ADDRSETUP_MASK 0xff000000 /* Size of field unknown */
#define TR_100_PIO_ADDRSETUP_SHIFT 24
#define TR_100_MDMA_MASK 0x00fff000
#define TR_100_MDMA_RECOVERY_MASK 0x00fc0000
#define TR_100_MDMA_RECOVERY_SHIFT 18
#define TR_100_MDMA_ACCESS_MASK 0x0003f000
#define TR_100_MDMA_ACCESS_SHIFT 12
#define TR_100_PIO_MASK 0xff000fff
#define TR_100_PIO_RECOVERY_MASK 0x00000fc0
#define TR_100_PIO_RECOVERY_SHIFT 6
#define TR_100_PIO_ACCESS_MASK 0x0000003f
#define TR_100_PIO_ACCESS_SHIFT 0
#define TR_100_UDMAREG_UDMA_MASK 0x0000ffff
#define TR_100_UDMAREG_UDMA_EN 0x00000001
/* 66Mhz cell, found in KeyLargo. Can do ultra mode 0 to 2 on
* 40 connector cable and to 4 on 80 connector one.
* Clock unit is 15ns (66Mhz)
*
* 3 Values can be programmed:
* - Write data setup, which appears to match the cycle time. They
* also call it DIOW setup.
* - Ready to pause time (from spec)
* - Address setup. That one is weird. I don't see where exactly
* it fits in UDMA cycles, I got it's name from an obscure piece
* of commented out code in Darwin. They leave it to 0, we do as
* well, despite a comment that would lead to think it has a
* min value of 45ns.
* Apple also add 60ns to the write data setup (or cycle time ?) on
* reads.
*/
#define TR_66_UDMA_MASK 0xfff00000
#define TR_66_UDMA_EN 0x00100000 /* Enable Ultra mode for DMA */
#define TR_66_PIO_ADDRSETUP_MASK 0xe0000000 /* Address setup */
#define TR_66_PIO_ADDRSETUP_SHIFT 29
#define TR_66_UDMA_RDY2PAUS_MASK 0x1e000000 /* Ready 2 pause time */
#define TR_66_UDMA_RDY2PAUS_SHIFT 25
#define TR_66_UDMA_WRDATASETUP_MASK 0x01e00000 /* Write data setup time */
#define TR_66_UDMA_WRDATASETUP_SHIFT 21
#define TR_66_MDMA_MASK 0x000ffc00
#define TR_66_MDMA_RECOVERY_MASK 0x000f8000
#define TR_66_MDMA_RECOVERY_SHIFT 15
#define TR_66_MDMA_ACCESS_MASK 0x00007c00
#define TR_66_MDMA_ACCESS_SHIFT 10
#define TR_66_PIO_MASK 0xe00003ff
#define TR_66_PIO_RECOVERY_MASK 0x000003e0
#define TR_66_PIO_RECOVERY_SHIFT 5
#define TR_66_PIO_ACCESS_MASK 0x0000001f
#define TR_66_PIO_ACCESS_SHIFT 0
/* 33Mhz cell, found in OHare, Heathrow (& Paddington) and KeyLargo
* Can do pio & mdma modes, clock unit is 30ns (33Mhz)
*
* The access time and recovery time can be programmed. Some older
* Darwin code base limit OHare to 150ns cycle time. I decided to do
* the same here fore safety against broken old hardware ;)
* The HalfTick bit, when set, adds half a clock (15ns) to the access
* time and removes one from recovery. It's not supported on KeyLargo
* implementation afaik. The E bit appears to be set for PIO mode 0 and
* is used to reach long timings used in this mode.
*/
#define TR_33_MDMA_MASK 0x003ff800
#define TR_33_MDMA_RECOVERY_MASK 0x001f0000
#define TR_33_MDMA_RECOVERY_SHIFT 16
#define TR_33_MDMA_ACCESS_MASK 0x0000f800
#define TR_33_MDMA_ACCESS_SHIFT 11
#define TR_33_MDMA_HALFTICK 0x00200000
#define TR_33_PIO_MASK 0x000007ff
#define TR_33_PIO_E 0x00000400
#define TR_33_PIO_RECOVERY_MASK 0x000003e0
#define TR_33_PIO_RECOVERY_SHIFT 5
#define TR_33_PIO_ACCESS_MASK 0x0000001f
#define TR_33_PIO_ACCESS_SHIFT 0
/*
* Interrupt register definitions. Only present on newer cells
* (Keylargo and later afaik) so we don't use it.
*/
#define IDE_INTR_DMA 0x80000000
#define IDE_INTR_DEVICE 0x40000000
/*
* FCR Register on Kauai. Not sure what bit 0x4 is ...
*/
#define KAUAI_FCR_UATA_MAGIC 0x00000004
#define KAUAI_FCR_UATA_RESET_N 0x00000002
#define KAUAI_FCR_UATA_ENABLE 0x00000001
/* Allow up to 256 DBDMA commands per xfer */
#define MAX_DCMDS 256
/* Don't let a DMA segment go all the way to 64K */
#define MAX_DBDMA_SEG 0xff00
/*
* Wait 1s for disk to answer on IDE bus after a hard reset
* of the device (via GPIO/FCR).
*
* Some devices seem to "pollute" the bus even after dropping
* the BSY bit (typically some combo drives slave on the UDMA
* bus) after a hard reset. Since we hard reset all drives on
* KeyLargo ATA66, we have to keep that delay around. I may end
* up not hard resetting anymore on these and keep the delay only
* for older interfaces instead (we have to reset when coming
* from MacOS...) --BenH.
*/
#define IDE_WAKEUP_DELAY_MS 1000
struct pata_macio_timing;
struct pata_macio_priv {
int kind;
int aapl_bus_id;
int mediabay : 1;
struct device_node *node;
struct macio_dev *mdev;
struct pci_dev *pdev;
struct device *dev;
int irq;
u32 treg[2][2];
void __iomem *tfregs;
void __iomem *kauai_fcr;
struct dbdma_cmd * dma_table_cpu;
dma_addr_t dma_table_dma;
struct ata_host *host;
const struct pata_macio_timing *timings;
};
/* Previous variants of this driver used to calculate timings
* for various variants of the chip and use tables for others.
*
* Not only was this confusing, but in addition, it isn't clear
* whether our calculation code was correct. It didn't entirely
* match the darwin code and whatever documentation I could find
* on these cells
*
* I decided to entirely rely on a table instead for this version
* of the driver. Also, because I don't really care about derated
* modes and really old HW other than making it work, I'm not going
* to calculate / snoop timing values for something else than the
* standard modes.
*/
struct pata_macio_timing {
int mode;
u32 reg1; /* Bits to set in first timing reg */
u32 reg2; /* Bits to set in second timing reg */
};
static const struct pata_macio_timing pata_macio_ohare_timings[] = {
{ XFER_PIO_0, 0x00000526, 0, },
{ XFER_PIO_1, 0x00000085, 0, },
{ XFER_PIO_2, 0x00000025, 0, },
{ XFER_PIO_3, 0x00000025, 0, },
{ XFER_PIO_4, 0x00000025, 0, },
{ XFER_MW_DMA_0, 0x00074000, 0, },
{ XFER_MW_DMA_1, 0x00221000, 0, },
{ XFER_MW_DMA_2, 0x00211000, 0, },
{ -1, 0, 0 }
};
static const struct pata_macio_timing pata_macio_heathrow_timings[] = {
{ XFER_PIO_0, 0x00000526, 0, },
{ XFER_PIO_1, 0x00000085, 0, },
{ XFER_PIO_2, 0x00000025, 0, },
{ XFER_PIO_3, 0x00000025, 0, },
{ XFER_PIO_4, 0x00000025, 0, },
{ XFER_MW_DMA_0, 0x00074000, 0, },
{ XFER_MW_DMA_1, 0x00221000, 0, },
{ XFER_MW_DMA_2, 0x00211000, 0, },
{ -1, 0, 0 }
};
static const struct pata_macio_timing pata_macio_kl33_timings[] = {
{ XFER_PIO_0, 0x00000526, 0, },
{ XFER_PIO_1, 0x00000085, 0, },
{ XFER_PIO_2, 0x00000025, 0, },
{ XFER_PIO_3, 0x00000025, 0, },
{ XFER_PIO_4, 0x00000025, 0, },
{ XFER_MW_DMA_0, 0x00084000, 0, },
{ XFER_MW_DMA_1, 0x00021800, 0, },
{ XFER_MW_DMA_2, 0x00011800, 0, },
{ -1, 0, 0 }
};
static const struct pata_macio_timing pata_macio_kl66_timings[] = {
{ XFER_PIO_0, 0x0000038c, 0, },
{ XFER_PIO_1, 0x0000020a, 0, },
{ XFER_PIO_2, 0x00000127, 0, },
{ XFER_PIO_3, 0x000000c6, 0, },
{ XFER_PIO_4, 0x00000065, 0, },
{ XFER_MW_DMA_0, 0x00084000, 0, },
{ XFER_MW_DMA_1, 0x00029800, 0, },
{ XFER_MW_DMA_2, 0x00019400, 0, },
{ XFER_UDMA_0, 0x19100000, 0, },
{ XFER_UDMA_1, 0x14d00000, 0, },
{ XFER_UDMA_2, 0x10900000, 0, },
{ XFER_UDMA_3, 0x0c700000, 0, },
{ XFER_UDMA_4, 0x0c500000, 0, },
{ -1, 0, 0 }
};
static const struct pata_macio_timing pata_macio_kauai_timings[] = {
{ XFER_PIO_0, 0x08000a92, 0, },
{ XFER_PIO_1, 0x0800060f, 0, },
{ XFER_PIO_2, 0x0800038b, 0, },
{ XFER_PIO_3, 0x05000249, 0, },
{ XFER_PIO_4, 0x04000148, 0, },
{ XFER_MW_DMA_0, 0x00618000, 0, },
{ XFER_MW_DMA_1, 0x00209000, 0, },
{ XFER_MW_DMA_2, 0x00148000, 0, },
{ XFER_UDMA_0, 0, 0x000070c1, },
{ XFER_UDMA_1, 0, 0x00005d81, },
{ XFER_UDMA_2, 0, 0x00004a61, },
{ XFER_UDMA_3, 0, 0x00003a51, },
{ XFER_UDMA_4, 0, 0x00002a31, },
{ XFER_UDMA_5, 0, 0x00002921, },
{ -1, 0, 0 }
};
static const struct pata_macio_timing pata_macio_shasta_timings[] = {
{ XFER_PIO_0, 0x0a000c97, 0, },
{ XFER_PIO_1, 0x07000712, 0, },
{ XFER_PIO_2, 0x040003cd, 0, },
{ XFER_PIO_3, 0x0500028b, 0, },
{ XFER_PIO_4, 0x0400010a, 0, },
{ XFER_MW_DMA_0, 0x00820800, 0, },
{ XFER_MW_DMA_1, 0x0028b000, 0, },
{ XFER_MW_DMA_2, 0x001ca000, 0, },
{ XFER_UDMA_0, 0, 0x00035901, },
{ XFER_UDMA_1, 0, 0x000348b1, },
{ XFER_UDMA_2, 0, 0x00033881, },
{ XFER_UDMA_3, 0, 0x00033861, },
{ XFER_UDMA_4, 0, 0x00033841, },
{ XFER_UDMA_5, 0, 0x00033031, },
{ XFER_UDMA_6, 0, 0x00033021, },
{ -1, 0, 0 }
};
static const struct pata_macio_timing *pata_macio_find_timing(
struct pata_macio_priv *priv,
int mode)
{
int i;
for (i = 0; priv->timings[i].mode > 0; i++) {
if (priv->timings[i].mode == mode)
return &priv->timings[i];
}
return NULL;
}
static void pata_macio_apply_timings(struct ata_port *ap, unsigned int device)
{
struct pata_macio_priv *priv = ap->private_data;
void __iomem *rbase = ap->ioaddr.cmd_addr;
if (priv->kind == controller_sh_ata6 ||
priv->kind == controller_un_ata6 ||
priv->kind == controller_k2_ata6) {
writel(priv->treg[device][0], rbase + IDE_KAUAI_PIO_CONFIG);
writel(priv->treg[device][1], rbase + IDE_KAUAI_ULTRA_CONFIG);
} else
writel(priv->treg[device][0], rbase + IDE_TIMING_CONFIG);
}
static void pata_macio_dev_select(struct ata_port *ap, unsigned int device)
{
ata_sff_dev_select(ap, device);
/* Apply timings */
pata_macio_apply_timings(ap, device);
}
static void pata_macio_set_timings(struct ata_port *ap,
struct ata_device *adev)
{
struct pata_macio_priv *priv = ap->private_data;
const struct pata_macio_timing *t;
dev_dbg(priv->dev, "Set timings: DEV=%d,PIO=0x%x (%s),DMA=0x%x (%s)\n",
adev->devno,
adev->pio_mode,
ata_mode_string(ata_xfer_mode2mask(adev->pio_mode)),
adev->dma_mode,
ata_mode_string(ata_xfer_mode2mask(adev->dma_mode)));
/* First clear timings */
priv->treg[adev->devno][0] = priv->treg[adev->devno][1] = 0;
/* Now get the PIO timings */
t = pata_macio_find_timing(priv, adev->pio_mode);
if (t == NULL) {
dev_warn(priv->dev, "Invalid PIO timing requested: 0x%x\n",
adev->pio_mode);
t = pata_macio_find_timing(priv, XFER_PIO_0);
}
BUG_ON(t == NULL);
/* PIO timings only ever use the first treg */
priv->treg[adev->devno][0] |= t->reg1;
/* Now get DMA timings */
t = pata_macio_find_timing(priv, adev->dma_mode);
if (t == NULL || (t->reg1 == 0 && t->reg2 == 0)) {
dev_dbg(priv->dev, "DMA timing not set yet, using MW_DMA_0\n");
t = pata_macio_find_timing(priv, XFER_MW_DMA_0);
}
BUG_ON(t == NULL);
/* DMA timings can use both tregs */
priv->treg[adev->devno][0] |= t->reg1;
priv->treg[adev->devno][1] |= t->reg2;
dev_dbg(priv->dev, " -> %08x %08x\n",
priv->treg[adev->devno][0],
priv->treg[adev->devno][1]);
/* Apply to hardware */
pata_macio_apply_timings(ap, adev->devno);
}
/*
* Blast some well known "safe" values to the timing registers at init or
* wakeup from sleep time, before we do real calculation
*/
static void pata_macio_default_timings(struct pata_macio_priv *priv)
{
unsigned int value, value2 = 0;
switch(priv->kind) {
case controller_sh_ata6:
value = 0x0a820c97;
value2 = 0x00033031;
break;
case controller_un_ata6:
case controller_k2_ata6:
value = 0x08618a92;
value2 = 0x00002921;
break;
case controller_kl_ata4:
value = 0x0008438c;
break;
case controller_kl_ata3:
value = 0x00084526;
break;
case controller_heathrow:
case controller_ohare:
default:
value = 0x00074526;
break;
}
priv->treg[0][0] = priv->treg[1][0] = value;
priv->treg[0][1] = priv->treg[1][1] = value2;
}
static int pata_macio_cable_detect(struct ata_port *ap)
{
struct pata_macio_priv *priv = ap->private_data;
/* Get cable type from device-tree */
if (priv->kind == controller_kl_ata4 ||
priv->kind == controller_un_ata6 ||
priv->kind == controller_k2_ata6 ||
priv->kind == controller_sh_ata6) {
const char* cable = of_get_property(priv->node, "cable-type",
NULL);
struct device_node *root = of_find_node_by_path("/");
const char *model = of_get_property(root, "model", NULL);
if (cable && !strncmp(cable, "80-", 3)) {
/* Some drives fail to detect 80c cable in PowerBook
* These machine use proprietary short IDE cable
* anyway
*/
if (!strncmp(model, "PowerBook", 9))
return ATA_CBL_PATA40_SHORT;
else
return ATA_CBL_PATA80;
}
}
/* G5's seem to have incorrect cable type in device-tree.
* Let's assume they always have a 80 conductor cable, this seem to
* be always the case unless the user mucked around
*/
if (of_device_is_compatible(priv->node, "K2-UATA") ||
of_device_is_compatible(priv->node, "shasta-ata"))
return ATA_CBL_PATA80;
/* Anything else is 40 connectors */
return ATA_CBL_PATA40;
}
static void pata_macio_qc_prep(struct ata_queued_cmd *qc)
{
unsigned int write = (qc->tf.flags & ATA_TFLAG_WRITE);
struct ata_port *ap = qc->ap;
struct pata_macio_priv *priv = ap->private_data;
struct scatterlist *sg;
struct dbdma_cmd *table;
unsigned int si, pi;
dev_dbgdma(priv->dev, "%s: qc %p flags %lx, write %d dev %d\n",
__func__, qc, qc->flags, write, qc->dev->devno);
if (!(qc->flags & ATA_QCFLAG_DMAMAP))
return;
table = (struct dbdma_cmd *) priv->dma_table_cpu;
pi = 0;
for_each_sg(qc->sg, sg, qc->n_elem, si) {
u32 addr, sg_len, len;
/* determine if physical DMA addr spans 64K boundary.
* Note h/w doesn't support 64-bit, so we unconditionally
* truncate dma_addr_t to u32.
*/
addr = (u32) sg_dma_address(sg);
sg_len = sg_dma_len(sg);
while (sg_len) {
/* table overflow should never happen */
BUG_ON (pi++ >= MAX_DCMDS);
len = (sg_len < MAX_DBDMA_SEG) ? sg_len : MAX_DBDMA_SEG;
st_le16(&table->command, write ? OUTPUT_MORE: INPUT_MORE);
st_le16(&table->req_count, len);
st_le32(&table->phy_addr, addr);
table->cmd_dep = 0;
table->xfer_status = 0;
table->res_count = 0;
addr += len;
sg_len -= len;
++table;
}
}
/* Should never happen according to Tejun */
BUG_ON(!pi);
/* Convert the last command to an input/output */
table--;
st_le16(&table->command, write ? OUTPUT_LAST: INPUT_LAST);
table++;
/* Add the stop command to the end of the list */
memset(table, 0, sizeof(struct dbdma_cmd));
st_le16(&table->command, DBDMA_STOP);
dev_dbgdma(priv->dev, "%s: %d DMA list entries\n", __func__, pi);
}
static void pata_macio_freeze(struct ata_port *ap)
{
struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
if (dma_regs) {
unsigned int timeout = 1000000;
/* Make sure DMA controller is stopped */
writel((RUN|PAUSE|FLUSH|WAKE|DEAD) << 16, &dma_regs->control);
while (--timeout && (readl(&dma_regs->status) & RUN))
udelay(1);
}
ata_sff_freeze(ap);
}
static void pata_macio_bmdma_setup(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct pata_macio_priv *priv = ap->private_data;
struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
int dev = qc->dev->devno;
dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc);
/* Make sure DMA commands updates are visible */
writel(priv->dma_table_dma, &dma_regs->cmdptr);
/* On KeyLargo 66Mhz cell, we need to add 60ns to wrDataSetup on
* UDMA reads
*/
if (priv->kind == controller_kl_ata4 &&
(priv->treg[dev][0] & TR_66_UDMA_EN)) {
void __iomem *rbase = ap->ioaddr.cmd_addr;
u32 reg = priv->treg[dev][0];
if (!(qc->tf.flags & ATA_TFLAG_WRITE))
reg += 0x00800000;
writel(reg, rbase + IDE_TIMING_CONFIG);
}
/* issue r/w command */
ap->ops->sff_exec_command(ap, &qc->tf);
}
static void pata_macio_bmdma_start(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct pata_macio_priv *priv = ap->private_data;
struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc);
writel((RUN << 16) | RUN, &dma_regs->control);
/* Make sure it gets to the controller right now */
(void)readl(&dma_regs->control);
}
static void pata_macio_bmdma_stop(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct pata_macio_priv *priv = ap->private_data;
struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
unsigned int timeout = 1000000;
dev_dbgdma(priv->dev, "%s: qc %p\n", __func__, qc);
/* Stop the DMA engine and wait for it to full halt */
writel (((RUN|WAKE|DEAD) << 16), &dma_regs->control);
while (--timeout && (readl(&dma_regs->status) & RUN))
udelay(1);
}
static u8 pata_macio_bmdma_status(struct ata_port *ap)
{
struct pata_macio_priv *priv = ap->private_data;
struct dbdma_regs __iomem *dma_regs = ap->ioaddr.bmdma_addr;
u32 dstat, rstat = ATA_DMA_INTR;
unsigned long timeout = 0;
dstat = readl(&dma_regs->status);
dev_dbgdma(priv->dev, "%s: dstat=%x\n", __func__, dstat);
/* We have two things to deal with here:
*
* - The dbdma won't stop if the command was started
* but completed with an error without transferring all
* datas. This happens when bad blocks are met during
* a multi-block transfer.
*
* - The dbdma fifo hasn't yet finished flushing to
* to system memory when the disk interrupt occurs.
*
*/
/* First check for errors */
if ((dstat & (RUN|DEAD)) != RUN)
rstat |= ATA_DMA_ERR;
/* If ACTIVE is cleared, the STOP command has been hit and
* the transfer is complete. If not, we have to flush the
* channel.
*/
if ((dstat & ACTIVE) == 0)
return rstat;
dev_dbgdma(priv->dev, "%s: DMA still active, flushing...\n", __func__);
/* If dbdma didn't execute the STOP command yet, the
* active bit is still set. We consider that we aren't
* sharing interrupts (which is hopefully the case with
* those controllers) and so we just try to flush the
* channel for pending data in the fifo
*/
udelay(1);
writel((FLUSH << 16) | FLUSH, &dma_regs->control);
for (;;) {
udelay(1);
dstat = readl(&dma_regs->status);
if ((dstat & FLUSH) == 0)
break;
if (++timeout > 1000) {
dev_warn(priv->dev, "timeout flushing DMA\n");
rstat |= ATA_DMA_ERR;
break;
}
}
return rstat;
}
/* port_start is when we allocate the DMA command list */
static int pata_macio_port_start(struct ata_port *ap)
{
struct pata_macio_priv *priv = ap->private_data;
if (ap->ioaddr.bmdma_addr == NULL)
return 0;
/* Allocate space for the DBDMA commands.
*
* The +2 is +1 for the stop command and +1 to allow for
* aligning the start address to a multiple of 16 bytes.
*/
priv->dma_table_cpu =
dmam_alloc_coherent(priv->dev,
(MAX_DCMDS + 2) * sizeof(struct dbdma_cmd),
&priv->dma_table_dma, GFP_KERNEL);
if (priv->dma_table_cpu == NULL) {
dev_err(priv->dev, "Unable to allocate DMA command list\n");
ap->ioaddr.bmdma_addr = NULL;
ap->mwdma_mask = 0;
ap->udma_mask = 0;
}
return 0;
}
static void pata_macio_irq_clear(struct ata_port *ap)
{
struct pata_macio_priv *priv = ap->private_data;
/* Nothing to do here */
dev_dbgdma(priv->dev, "%s\n", __func__);
}
static void pata_macio_reset_hw(struct pata_macio_priv *priv, int resume)
{
dev_dbg(priv->dev, "Enabling & resetting... \n");
if (priv->mediabay)
return;
if (priv->kind == controller_ohare && !resume) {
/* The code below is having trouble on some ohare machines
* (timing related ?). Until I can put my hand on one of these
* units, I keep the old way
*/
ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, priv->node, 0, 1);
} else {
int rc;
/* Reset and enable controller */
rc = ppc_md.feature_call(PMAC_FTR_IDE_RESET,
priv->node, priv->aapl_bus_id, 1);
ppc_md.feature_call(PMAC_FTR_IDE_ENABLE,
priv->node, priv->aapl_bus_id, 1);
msleep(10);
/* Only bother waiting if there's a reset control */
if (rc == 0) {
ppc_md.feature_call(PMAC_FTR_IDE_RESET,
priv->node, priv->aapl_bus_id, 0);
msleep(IDE_WAKEUP_DELAY_MS);
}
}
/* If resuming a PCI device, restore the config space here */
if (priv->pdev && resume) {
int rc;
pci_restore_state(priv->pdev);
rc = pcim_enable_device(priv->pdev);
if (rc)
dev_printk(KERN_ERR, &priv->pdev->dev,
"Failed to enable device after resume (%d)\n", rc);
else
pci_set_master(priv->pdev);
}
/* On Kauai, initialize the FCR. We don't perform a reset, doesn't really
* seem necessary and speeds up the boot process
*/
if (priv->kauai_fcr)
writel(KAUAI_FCR_UATA_MAGIC |
KAUAI_FCR_UATA_RESET_N |
KAUAI_FCR_UATA_ENABLE, priv->kauai_fcr);
}
/* Hook the standard slave config to fixup some HW related alignment
* restrictions
*/
static int pata_macio_slave_config(struct scsi_device *sdev)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct pata_macio_priv *priv = ap->private_data;
struct ata_device *dev;
u16 cmd;
int rc;
/* First call original */
rc = ata_scsi_slave_config(sdev);
if (rc)
return rc;
/* This is lifted from sata_nv */
dev = &ap->link.device[sdev->id];
/* OHare has issues with non cache aligned DMA on some chipsets */
if (priv->kind == controller_ohare) {
blk_queue_update_dma_alignment(sdev->request_queue, 31);
blk_queue_update_dma_pad(sdev->request_queue, 31);
/* Tell the world about it */
ata_dev_printk(dev, KERN_INFO, "OHare alignment limits applied\n");
return 0;
}
/* We only have issues with ATAPI */
if (dev->class != ATA_DEV_ATAPI)
return 0;
/* Shasta and K2 seem to have "issues" with reads ... */
if (priv->kind == controller_sh_ata6 || priv->kind == controller_k2_ata6) {
/* Allright these are bad, apply restrictions */
blk_queue_update_dma_alignment(sdev->request_queue, 15);
blk_queue_update_dma_pad(sdev->request_queue, 15);
/* We enable MWI and hack cache line size directly here, this
* is specific to this chipset and not normal values, we happen
* to somewhat know what we are doing here (which is basically
* to do the same Apple does and pray they did not get it wrong :-)
*/
BUG_ON(!priv->pdev);
pci_write_config_byte(priv->pdev, PCI_CACHE_LINE_SIZE, 0x08);
pci_read_config_word(priv->pdev, PCI_COMMAND, &cmd);
pci_write_config_word(priv->pdev, PCI_COMMAND,
cmd | PCI_COMMAND_INVALIDATE);
/* Tell the world about it */
ata_dev_printk(dev, KERN_INFO,
"K2/Shasta alignment limits applied\n");
}
return 0;
}
#ifdef CONFIG_PM
static int pata_macio_do_suspend(struct pata_macio_priv *priv, pm_message_t mesg)
{
int rc;
/* First, core libata suspend to do most of the work */
rc = ata_host_suspend(priv->host, mesg);
if (rc)
return rc;
/* Restore to default timings */
pata_macio_default_timings(priv);
/* Mask interrupt. Not strictly necessary but old driver did
* it and I'd rather not change that here */
disable_irq(priv->irq);
/* The media bay will handle itself just fine */
if (priv->mediabay)
return 0;
/* Kauai has bus control FCRs directly here */
if (priv->kauai_fcr) {
u32 fcr = readl(priv->kauai_fcr);
fcr &= ~(KAUAI_FCR_UATA_RESET_N | KAUAI_FCR_UATA_ENABLE);
writel(fcr, priv->kauai_fcr);
}
/* For PCI, save state and disable DMA. No need to call
* pci_set_power_state(), the HW doesn't do D states that
* way, the platform code will take care of suspending the
* ASIC properly
*/
if (priv->pdev) {
pci_save_state(priv->pdev);
pci_disable_device(priv->pdev);
}
/* Disable the bus on older machines and the cell on kauai */
ppc_md.feature_call(PMAC_FTR_IDE_ENABLE, priv->node,
priv->aapl_bus_id, 0);
return 0;
}
static int pata_macio_do_resume(struct pata_macio_priv *priv)
{
/* Reset and re-enable the HW */
pata_macio_reset_hw(priv, 1);
/* Sanitize drive timings */
pata_macio_apply_timings(priv->host->ports[0], 0);
/* We want our IRQ back ! */
enable_irq(priv->irq);
/* Let the libata core take it from there */
ata_host_resume(priv->host);
return 0;
}
#endif /* CONFIG_PM */
static struct scsi_host_template pata_macio_sht = {
ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = MAX_DCMDS,
/* We may not need that strict one */
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = pata_macio_slave_config,
};
static struct ata_port_operations pata_macio_ops = {
.inherits = &ata_bmdma_port_ops,
.freeze = pata_macio_freeze,
.set_piomode = pata_macio_set_timings,
.set_dmamode = pata_macio_set_timings,
.cable_detect = pata_macio_cable_detect,
.sff_dev_select = pata_macio_dev_select,
.qc_prep = pata_macio_qc_prep,
.bmdma_setup = pata_macio_bmdma_setup,
.bmdma_start = pata_macio_bmdma_start,
.bmdma_stop = pata_macio_bmdma_stop,
.bmdma_status = pata_macio_bmdma_status,
.port_start = pata_macio_port_start,
.sff_irq_clear = pata_macio_irq_clear,
};
static void __devinit pata_macio_invariants(struct pata_macio_priv *priv)
{
const int *bidp;
/* Identify the type of controller */
if (of_device_is_compatible(priv->node, "shasta-ata")) {
priv->kind = controller_sh_ata6;
priv->timings = pata_macio_shasta_timings;
} else if (of_device_is_compatible(priv->node, "kauai-ata")) {
priv->kind = controller_un_ata6;
priv->timings = pata_macio_kauai_timings;
} else if (of_device_is_compatible(priv->node, "K2-UATA")) {
priv->kind = controller_k2_ata6;
priv->timings = pata_macio_kauai_timings;
} else if (of_device_is_compatible(priv->node, "keylargo-ata")) {
if (strcmp(priv->node->name, "ata-4") == 0) {
priv->kind = controller_kl_ata4;
priv->timings = pata_macio_kl66_timings;
} else {
priv->kind = controller_kl_ata3;
priv->timings = pata_macio_kl33_timings;
}
} else if (of_device_is_compatible(priv->node, "heathrow-ata")) {
priv->kind = controller_heathrow;
priv->timings = pata_macio_heathrow_timings;
} else {
priv->kind = controller_ohare;
priv->timings = pata_macio_ohare_timings;
}
/* XXX FIXME --- setup priv->mediabay here */
/* Get Apple bus ID (for clock and ASIC control) */
bidp = of_get_property(priv->node, "AAPL,bus-id", NULL);
priv->aapl_bus_id = bidp ? *bidp : 0;
/* Fixup missing Apple bus ID in case of media-bay */
if (priv->mediabay && bidp == 0)
priv->aapl_bus_id = 1;
}
static void __devinit pata_macio_setup_ios(struct ata_ioports *ioaddr,
void __iomem * base,
void __iomem * dma)
{
/* cmd_addr is the base of regs for that port */
ioaddr->cmd_addr = base;
/* taskfile registers */
ioaddr->data_addr = base + (ATA_REG_DATA << 4);
ioaddr->error_addr = base + (ATA_REG_ERR << 4);
ioaddr->feature_addr = base + (ATA_REG_FEATURE << 4);
ioaddr->nsect_addr = base + (ATA_REG_NSECT << 4);
ioaddr->lbal_addr = base + (ATA_REG_LBAL << 4);
ioaddr->lbam_addr = base + (ATA_REG_LBAM << 4);
ioaddr->lbah_addr = base + (ATA_REG_LBAH << 4);
ioaddr->device_addr = base + (ATA_REG_DEVICE << 4);
ioaddr->status_addr = base + (ATA_REG_STATUS << 4);
ioaddr->command_addr = base + (ATA_REG_CMD << 4);
ioaddr->altstatus_addr = base + 0x160;
ioaddr->ctl_addr = base + 0x160;
ioaddr->bmdma_addr = dma;
}
static void __devinit pmac_macio_calc_timing_masks(struct pata_macio_priv *priv,
struct ata_port_info *pinfo)
{
int i = 0;
pinfo->pio_mask = 0;
pinfo->mwdma_mask = 0;
pinfo->udma_mask = 0;
while (priv->timings[i].mode > 0) {
unsigned int mask = 1U << (priv->timings[i].mode & 0x0f);
switch(priv->timings[i].mode & 0xf0) {
case 0x00: /* PIO */
pinfo->pio_mask |= (mask >> 8);
break;
case 0x20: /* MWDMA */
pinfo->mwdma_mask |= mask;
break;
case 0x40: /* UDMA */
pinfo->udma_mask |= mask;
break;
}
i++;
}
dev_dbg(priv->dev, "Supported masks: PIO=%lx, MWDMA=%lx, UDMA=%lx\n",
pinfo->pio_mask, pinfo->mwdma_mask, pinfo->udma_mask);
}
static int __devinit pata_macio_common_init(struct pata_macio_priv *priv,
resource_size_t tfregs,
resource_size_t dmaregs,
resource_size_t fcregs,
unsigned long irq)
{
struct ata_port_info pinfo;
const struct ata_port_info *ppi[] = { &pinfo, NULL };
void __iomem *dma_regs = NULL;
/* Fill up privates with various invariants collected from the
* device-tree
*/
pata_macio_invariants(priv);
/* Make sure we have sane initial timings in the cache */
pata_macio_default_timings(priv);
/* Not sure what the real max is but we know it's less than 64K, let's
* use 64K minus 256
*/
dma_set_max_seg_size(priv->dev, MAX_DBDMA_SEG);
/* Allocate libata host for 1 port */
memset(&pinfo, 0, sizeof(struct ata_port_info));
pmac_macio_calc_timing_masks(priv, &pinfo);
pinfo.flags = ATA_FLAG_SLAVE_POSS;
pinfo.port_ops = &pata_macio_ops;
pinfo.private_data = priv;
priv->host = ata_host_alloc_pinfo(priv->dev, ppi, 1);
if (priv->host == NULL) {
dev_err(priv->dev, "Failed to allocate ATA port structure\n");
return -ENOMEM;
}
/* Setup the private data in host too */
priv->host->private_data = priv;
/* Map base registers */
priv->tfregs = devm_ioremap(priv->dev, tfregs, 0x100);
if (priv->tfregs == NULL) {
dev_err(priv->dev, "Failed to map ATA ports\n");
return -ENOMEM;
}
priv->host->iomap = &priv->tfregs;
/* Map DMA regs */
if (dmaregs != 0) {
dma_regs = devm_ioremap(priv->dev, dmaregs,
sizeof(struct dbdma_regs));
if (dma_regs == NULL)
dev_warn(priv->dev, "Failed to map ATA DMA registers\n");
}
/* If chip has local feature control, map those regs too */
if (fcregs != 0) {
priv->kauai_fcr = devm_ioremap(priv->dev, fcregs, 4);
if (priv->kauai_fcr == NULL) {
dev_err(priv->dev, "Failed to map ATA FCR register\n");
return -ENOMEM;
}
}
/* Setup port data structure */
pata_macio_setup_ios(&priv->host->ports[0]->ioaddr,
priv->tfregs, dma_regs);
priv->host->ports[0]->private_data = priv;
/* hard-reset the controller */
pata_macio_reset_hw(priv, 0);
pata_macio_apply_timings(priv->host->ports[0], 0);
/* Enable bus master if necessary */
if (priv->pdev && dma_regs)
pci_set_master(priv->pdev);
dev_info(priv->dev, "Activating pata-macio chipset %s, Apple bus ID %d\n",
macio_ata_names[priv->kind], priv->aapl_bus_id);
/* Start it up */
priv->irq = irq;
return ata_host_activate(priv->host, irq, ata_bmdma_interrupt, 0,
&pata_macio_sht);
}
static int __devinit pata_macio_attach(struct macio_dev *mdev,
const struct of_device_id *match)
{
struct pata_macio_priv *priv;
resource_size_t tfregs, dmaregs = 0;
unsigned long irq;
int rc;
/* Check for broken device-trees */
if (macio_resource_count(mdev) == 0) {
dev_err(&mdev->ofdev.dev,
"No addresses for controller\n");
return -ENXIO;
}
/* Enable managed resources */
macio_enable_devres(mdev);
/* Allocate and init private data structure */
priv = devm_kzalloc(&mdev->ofdev.dev,
sizeof(struct pata_macio_priv), GFP_KERNEL);
if (priv == NULL) {
dev_err(&mdev->ofdev.dev,
"Failed to allocate private memory\n");
return -ENOMEM;
}
priv->node = of_node_get(mdev->ofdev.dev.of_node);
priv->mdev = mdev;
priv->dev = &mdev->ofdev.dev;
/* Request memory resource for taskfile registers */
if (macio_request_resource(mdev, 0, "pata-macio")) {
dev_err(&mdev->ofdev.dev,
"Cannot obtain taskfile resource\n");
return -EBUSY;
}
tfregs = macio_resource_start(mdev, 0);
/* Request resources for DMA registers if any */
if (macio_resource_count(mdev) >= 2) {
if (macio_request_resource(mdev, 1, "pata-macio-dma"))
dev_err(&mdev->ofdev.dev,
"Cannot obtain DMA resource\n");
else
dmaregs = macio_resource_start(mdev, 1);
}
/*
* Fixup missing IRQ for some old implementations with broken
* device-trees.
*
* This is a bit bogus, it should be fixed in the device-tree itself,
* via the existing macio fixups, based on the type of interrupt
* controller in the machine. However, I have no test HW for this case,
* and this trick works well enough on those old machines...
*/
if (macio_irq_count(mdev) == 0) {
dev_warn(&mdev->ofdev.dev,
"No interrupts for controller, using 13\n");
irq = irq_create_mapping(NULL, 13);
} else
irq = macio_irq(mdev, 0);
/* Prevvent media bay callbacks until fully registered */
lock_media_bay(priv->mdev->media_bay);
/* Get register addresses and call common initialization */
rc = pata_macio_common_init(priv,
tfregs, /* Taskfile regs */
dmaregs, /* DBDMA regs */
0, /* Feature control */
irq);
unlock_media_bay(priv->mdev->media_bay);
return rc;
}
static int __devexit pata_macio_detach(struct macio_dev *mdev)
{
struct ata_host *host = macio_get_drvdata(mdev);
struct pata_macio_priv *priv = host->private_data;
lock_media_bay(priv->mdev->media_bay);
/* Make sure the mediabay callback doesn't try to access
* dead stuff
*/
priv->host->private_data = NULL;
ata_host_detach(host);
unlock_media_bay(priv->mdev->media_bay);
return 0;
}
#ifdef CONFIG_PM
static int pata_macio_suspend(struct macio_dev *mdev, pm_message_t mesg)
{
struct ata_host *host = macio_get_drvdata(mdev);
return pata_macio_do_suspend(host->private_data, mesg);
}
static int pata_macio_resume(struct macio_dev *mdev)
{
struct ata_host *host = macio_get_drvdata(mdev);
return pata_macio_do_resume(host->private_data);
}
#endif /* CONFIG_PM */
#ifdef CONFIG_PMAC_MEDIABAY
static void pata_macio_mb_event(struct macio_dev* mdev, int mb_state)
{
struct ata_host *host = macio_get_drvdata(mdev);
struct ata_port *ap;
struct ata_eh_info *ehi;
struct ata_device *dev;
unsigned long flags;
if (!host || !host->private_data)
return;
ap = host->ports[0];
spin_lock_irqsave(ap->lock, flags);
ehi = &ap->link.eh_info;
if (mb_state == MB_CD) {
ata_ehi_push_desc(ehi, "mediabay plug");
ata_ehi_hotplugged(ehi);
ata_port_freeze(ap);
} else {
ata_ehi_push_desc(ehi, "mediabay unplug");
ata_for_each_dev(dev, &ap->link, ALL)
dev->flags |= ATA_DFLAG_DETACH;
ata_port_abort(ap);
}
spin_unlock_irqrestore(ap->lock, flags);
}
#endif /* CONFIG_PMAC_MEDIABAY */
static int __devinit pata_macio_pci_attach(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct pata_macio_priv *priv;
struct device_node *np;
resource_size_t rbase;
/* We cannot use a MacIO controller without its OF device node */
np = pci_device_to_OF_node(pdev);
if (np == NULL) {
dev_err(&pdev->dev,
"Cannot find OF device node for controller\n");
return -ENODEV;
}
/* Check that it can be enabled */
if (pcim_enable_device(pdev)) {
dev_err(&pdev->dev,
"Cannot enable controller PCI device\n");
return -ENXIO;
}
/* Allocate and init private data structure */
priv = devm_kzalloc(&pdev->dev,
sizeof(struct pata_macio_priv), GFP_KERNEL);
if (priv == NULL) {
dev_err(&pdev->dev,
"Failed to allocate private memory\n");
return -ENOMEM;
}
priv->node = of_node_get(np);
priv->pdev = pdev;
priv->dev = &pdev->dev;
/* Get MMIO regions */
if (pci_request_regions(pdev, "pata-macio")) {
dev_err(&pdev->dev,
"Cannot obtain PCI resources\n");
return -EBUSY;
}
/* Get register addresses and call common initialization */
rbase = pci_resource_start(pdev, 0);
if (pata_macio_common_init(priv,
rbase + 0x2000, /* Taskfile regs */
rbase + 0x1000, /* DBDMA regs */
rbase, /* Feature control */
pdev->irq))
return -ENXIO;
return 0;
}
static void __devexit pata_macio_pci_detach(struct pci_dev *pdev)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
ata_host_detach(host);
}
#ifdef CONFIG_PM
static int pata_macio_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
return pata_macio_do_suspend(host->private_data, mesg);
}
static int pata_macio_pci_resume(struct pci_dev *pdev)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
return pata_macio_do_resume(host->private_data);
}
#endif /* CONFIG_PM */
static struct of_device_id pata_macio_match[] =
{
{
.name = "IDE",
},
{
.name = "ATA",
},
{
.type = "ide",
},
{
.type = "ata",
},
{},
};
static struct macio_driver pata_macio_driver =
{
.driver = {
.name = "pata-macio",
.owner = THIS_MODULE,
.of_match_table = pata_macio_match,
},
.probe = pata_macio_attach,
.remove = pata_macio_detach,
#ifdef CONFIG_PM
.suspend = pata_macio_suspend,
.resume = pata_macio_resume,
#endif
#ifdef CONFIG_PMAC_MEDIABAY
.mediabay_event = pata_macio_mb_event,
#endif
};
static const struct pci_device_id pata_macio_pci_match[] = {
{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_UNI_N_ATA), 0 },
{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID_ATA100), 0 },
{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_K2_ATA100), 0 },
{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_SH_ATA), 0 },
{ PCI_VDEVICE(APPLE, PCI_DEVICE_ID_APPLE_IPID2_ATA), 0 },
{},
};
static struct pci_driver pata_macio_pci_driver = {
.name = "pata-pci-macio",
.id_table = pata_macio_pci_match,
.probe = pata_macio_pci_attach,
.remove = pata_macio_pci_detach,
#ifdef CONFIG_PM
.suspend = pata_macio_pci_suspend,
.resume = pata_macio_pci_resume,
#endif
.driver = {
.owner = THIS_MODULE,
},
};
MODULE_DEVICE_TABLE(pci, pata_macio_pci_match);
static int __init pata_macio_init(void)
{
int rc;
if (!machine_is(powermac))
return -ENODEV;
rc = pci_register_driver(&pata_macio_pci_driver);
if (rc)
return rc;
rc = macio_register_driver(&pata_macio_driver);
if (rc) {
pci_unregister_driver(&pata_macio_pci_driver);
return rc;
}
return 0;
}
static void __exit pata_macio_exit(void)
{
macio_unregister_driver(&pata_macio_driver);
pci_unregister_driver(&pata_macio_pci_driver);
}
module_init(pata_macio_init);
module_exit(pata_macio_exit);
MODULE_AUTHOR("Benjamin Herrenschmidt");
MODULE_DESCRIPTION("Apple MacIO PATA driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
| gpl-2.0 |
TeamHackYU/SKernel_Yu | drivers/gpu/drm/nouveau/nv50_fence.c | 2530 | 3621 | /*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
#include <core/object.h>
#include <core/class.h>
#include "nouveau_drm.h"
#include "nouveau_dma.h"
#include "nv10_fence.h"
#include "nv50_display.h"
static int
nv50_fence_context_new(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->drm->dev;
struct nv10_fence_priv *priv = chan->drm->fence;
struct nv10_fence_chan *fctx;
struct ttm_mem_reg *mem = &priv->bo->bo.mem;
struct nouveau_object *object;
u32 start = mem->start * PAGE_SIZE;
u32 limit = start + mem->size - 1;
int ret, i;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
if (!fctx)
return -ENOMEM;
nouveau_fence_context_new(&fctx->base);
fctx->base.emit = nv10_fence_emit;
fctx->base.read = nv10_fence_read;
fctx->base.sync = nv17_fence_sync;
ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
NvSema, 0x003d,
&(struct nv_dma_class) {
.flags = NV_DMA_TARGET_VRAM |
NV_DMA_ACCESS_RDWR,
.start = start,
.limit = limit,
}, sizeof(struct nv_dma_class),
&object);
/* dma objects for display sync channel semaphore blocks */
for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
u32 start = bo->bo.mem.start * PAGE_SIZE;
u32 limit = start + bo->bo.mem.size - 1;
ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
NvEvoSema0 + i, 0x003d,
&(struct nv_dma_class) {
.flags = NV_DMA_TARGET_VRAM |
NV_DMA_ACCESS_RDWR,
.start = start,
.limit = limit,
}, sizeof(struct nv_dma_class),
&object);
}
if (ret)
nv10_fence_context_del(chan);
return ret;
}
int
nv50_fence_create(struct nouveau_drm *drm)
{
struct nv10_fence_priv *priv;
int ret = 0;
priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->base.dtor = nv10_fence_destroy;
priv->base.resume = nv17_fence_resume;
priv->base.context_new = nv50_fence_context_new;
priv->base.context_del = nv10_fence_context_del;
spin_lock_init(&priv->lock);
ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
0, 0x0000, NULL, &priv->bo);
if (!ret) {
ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
if (!ret) {
ret = nouveau_bo_map(priv->bo);
if (ret)
nouveau_bo_unpin(priv->bo);
}
if (ret)
nouveau_bo_ref(NULL, &priv->bo);
}
if (ret) {
nv10_fence_destroy(drm);
return ret;
}
nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
return ret;
}
| gpl-2.0 |
Evervolv/android_kernel_samsung_tuna | arch/arm/mach-pxa/colibri-pxa300.c | 2530 | 4546 | /*
* arch/arm/mach-pxa/colibri-pxa300.c
*
* Support for Toradex PXA300/310 based Colibri module
*
* Daniel Mack <daniel@caiaq.de>
* Matthias Meier <matthias.j.meier@gmx.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <asm/mach-types.h>
#include <asm/sizes.h>
#include <asm/mach/arch.h>
#include <asm/mach/irq.h>
#include <mach/pxa300.h>
#include <mach/colibri.h>
#include <mach/ohci.h>
#include <mach/pxafb.h>
#include <mach/audio.h>
#include "generic.h"
#include "devices.h"
#ifdef CONFIG_MACH_COLIBRI_EVALBOARD
static mfp_cfg_t colibri_pxa300_evalboard_pin_config[] __initdata = {
/* MMC */
GPIO7_MMC1_CLK,
GPIO14_MMC1_CMD,
GPIO3_MMC1_DAT0,
GPIO4_MMC1_DAT1,
GPIO5_MMC1_DAT2,
GPIO6_MMC1_DAT3,
GPIO13_GPIO, /* GPIO13_COLIBRI_PXA300_SD_DETECT */
/* UHC */
GPIO0_2_USBH_PEN,
GPIO1_2_USBH_PWR,
GPIO77_USB_P3_1,
GPIO78_USB_P3_2,
GPIO79_USB_P3_3,
GPIO80_USB_P3_4,
GPIO81_USB_P3_5,
GPIO82_USB_P3_6,
/* I2C */
GPIO21_I2C_SCL,
GPIO22_I2C_SDA,
};
#else
static mfp_cfg_t colibri_pxa300_evalboard_pin_config[] __initdata = {};
#endif
#if defined(CONFIG_AX88796)
#define COLIBRI_ETH_IRQ_GPIO mfp_to_gpio(GPIO26_GPIO)
/*
* Asix AX88796 Ethernet
*/
static struct ax_plat_data colibri_asix_platdata = {
.flags = 0, /* defined later */
.wordlength = 2,
};
static struct resource colibri_asix_resource[] = {
[0] = {
.start = PXA3xx_CS2_PHYS,
.end = PXA3xx_CS2_PHYS + (0x20 * 2) - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = gpio_to_irq(COLIBRI_ETH_IRQ_GPIO),
.end = gpio_to_irq(COLIBRI_ETH_IRQ_GPIO),
.flags = IORESOURCE_IRQ | IRQF_TRIGGER_FALLING,
}
};
static struct platform_device asix_device = {
.name = "ax88796",
.id = 0,
.num_resources = ARRAY_SIZE(colibri_asix_resource),
.resource = colibri_asix_resource,
.dev = {
.platform_data = &colibri_asix_platdata
}
};
static mfp_cfg_t colibri_pxa300_eth_pin_config[] __initdata = {
GPIO1_nCS2, /* AX88796 chip select */
GPIO26_GPIO | MFP_PULL_HIGH /* AX88796 IRQ */
};
static void __init colibri_pxa300_init_eth(void)
{
colibri_pxa3xx_init_eth(&colibri_asix_platdata);
pxa3xx_mfp_config(ARRAY_AND_SIZE(colibri_pxa300_eth_pin_config));
platform_device_register(&asix_device);
}
#else
static inline void __init colibri_pxa300_init_eth(void) {}
#endif /* CONFIG_AX88796 */
#if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE)
static mfp_cfg_t colibri_pxa300_lcd_pin_config[] __initdata = {
GPIO54_LCD_LDD_0,
GPIO55_LCD_LDD_1,
GPIO56_LCD_LDD_2,
GPIO57_LCD_LDD_3,
GPIO58_LCD_LDD_4,
GPIO59_LCD_LDD_5,
GPIO60_LCD_LDD_6,
GPIO61_LCD_LDD_7,
GPIO62_LCD_LDD_8,
GPIO63_LCD_LDD_9,
GPIO64_LCD_LDD_10,
GPIO65_LCD_LDD_11,
GPIO66_LCD_LDD_12,
GPIO67_LCD_LDD_13,
GPIO68_LCD_LDD_14,
GPIO69_LCD_LDD_15,
GPIO70_LCD_LDD_16,
GPIO71_LCD_LDD_17,
GPIO62_LCD_CS_N,
GPIO72_LCD_FCLK,
GPIO73_LCD_LCLK,
GPIO74_LCD_PCLK,
GPIO75_LCD_BIAS,
GPIO76_LCD_VSYNC,
};
static void __init colibri_pxa300_init_lcd(void)
{
pxa3xx_mfp_config(ARRAY_AND_SIZE(colibri_pxa300_lcd_pin_config));
}
#else
static inline void colibri_pxa300_init_lcd(void) {}
#endif /* CONFIG_FB_PXA || CONFIG_FB_PXA_MODULE */
#if defined(CONFIG_SND_AC97_CODEC) || defined(CONFIG_SND_AC97_CODEC_MODULE)
static mfp_cfg_t colibri_pxa310_ac97_pin_config[] __initdata = {
GPIO24_AC97_SYSCLK,
GPIO23_AC97_nACRESET,
GPIO25_AC97_SDATA_IN_0,
GPIO27_AC97_SDATA_OUT,
GPIO28_AC97_SYNC,
GPIO29_AC97_BITCLK
};
static inline void __init colibri_pxa310_init_ac97(void)
{
/* no AC97 codec on Colibri PXA300 */
if (!cpu_is_pxa310())
return;
pxa3xx_mfp_config(ARRAY_AND_SIZE(colibri_pxa310_ac97_pin_config));
pxa_set_ac97_info(NULL);
}
#else
static inline void colibri_pxa310_init_ac97(void) {}
#endif
void __init colibri_pxa300_init(void)
{
colibri_pxa300_init_eth();
colibri_pxa3xx_init_nand();
colibri_pxa300_init_lcd();
colibri_pxa3xx_init_lcd(mfp_to_gpio(GPIO39_GPIO));
colibri_pxa310_init_ac97();
/* Evalboard init */
pxa3xx_mfp_config(ARRAY_AND_SIZE(colibri_pxa300_evalboard_pin_config));
colibri_evalboard_init();
}
MACHINE_START(COLIBRI300, "Toradex Colibri PXA300")
.boot_params = COLIBRI_SDRAM_BASE + 0x100,
.init_machine = colibri_pxa300_init,
.map_io = pxa3xx_map_io,
.init_irq = pxa3xx_init_irq,
.timer = &pxa_timer,
MACHINE_END
| gpl-2.0 |
Evil-Green/Lonas_KL | drivers/media/video/adv7175.c | 3042 | 10859 | /*
* adv7175 - adv7175a video encoder driver version 0.0.3
*
* Copyright (C) 1998 Dave Perks <dperks@ibm.net>
* Copyright (C) 1999 Wolfgang Scherr <scherr@net4you.net>
* Copyright (C) 2000 Serguei Miridonov <mirsev@cicese.mx>
* - some corrections for Pinnacle Systems Inc. DC10plus card.
*
* Changes by Ronald Bultje <rbultje@ronald.bitfreak.net>
* - moved over to linux>=2.4.x i2c protocol (9/9/2002)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/ioctl.h>
#include <asm/uaccess.h>
#include <linux/i2c.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
MODULE_DESCRIPTION("Analog Devices ADV7175 video encoder driver");
MODULE_AUTHOR("Dave Perks");
MODULE_LICENSE("GPL");
#define I2C_ADV7175 0xd4
#define I2C_ADV7176 0x54
static int debug;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0-1)");
/* ----------------------------------------------------------------------- */
struct adv7175 {
struct v4l2_subdev sd;
v4l2_std_id norm;
int input;
};
static inline struct adv7175 *to_adv7175(struct v4l2_subdev *sd)
{
return container_of(sd, struct adv7175, sd);
}
static char *inputs[] = { "pass_through", "play_back", "color_bar" };
/* ----------------------------------------------------------------------- */
static inline int adv7175_write(struct v4l2_subdev *sd, u8 reg, u8 value)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
return i2c_smbus_write_byte_data(client, reg, value);
}
static inline int adv7175_read(struct v4l2_subdev *sd, u8 reg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
return i2c_smbus_read_byte_data(client, reg);
}
static int adv7175_write_block(struct v4l2_subdev *sd,
const u8 *data, unsigned int len)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = -1;
u8 reg;
/* the adv7175 has an autoincrement function, use it if
* the adapter understands raw I2C */
if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
/* do raw I2C, not smbus compatible */
u8 block_data[32];
int block_len;
while (len >= 2) {
block_len = 0;
block_data[block_len++] = reg = data[0];
do {
block_data[block_len++] = data[1];
reg++;
len -= 2;
data += 2;
} while (len >= 2 && data[0] == reg && block_len < 32);
ret = i2c_master_send(client, block_data, block_len);
if (ret < 0)
break;
}
} else {
/* do some slow I2C emulation kind of thing */
while (len >= 2) {
reg = *data++;
ret = adv7175_write(sd, reg, *data++);
if (ret < 0)
break;
len -= 2;
}
}
return ret;
}
static void set_subcarrier_freq(struct v4l2_subdev *sd, int pass_through)
{
/* for some reason pass_through NTSC needs
* a different sub-carrier freq to remain stable. */
if (pass_through)
adv7175_write(sd, 0x02, 0x00);
else
adv7175_write(sd, 0x02, 0x55);
adv7175_write(sd, 0x03, 0x55);
adv7175_write(sd, 0x04, 0x55);
adv7175_write(sd, 0x05, 0x25);
}
/* ----------------------------------------------------------------------- */
/* Output filter: S-Video Composite */
#define MR050 0x11 /* 0x09 */
#define MR060 0x14 /* 0x0c */
/* ----------------------------------------------------------------------- */
#define TR0MODE 0x46
#define TR0RST 0x80
#define TR1CAPT 0x80
#define TR1PLAY 0x00
static const unsigned char init_common[] = {
0x00, MR050, /* MR0, PAL enabled */
0x01, 0x00, /* MR1 */
0x02, 0x0c, /* subc. freq. */
0x03, 0x8c, /* subc. freq. */
0x04, 0x79, /* subc. freq. */
0x05, 0x26, /* subc. freq. */
0x06, 0x40, /* subc. phase */
0x07, TR0MODE, /* TR0, 16bit */
0x08, 0x21, /* */
0x09, 0x00, /* */
0x0a, 0x00, /* */
0x0b, 0x00, /* */
0x0c, TR1CAPT, /* TR1 */
0x0d, 0x4f, /* MR2 */
0x0e, 0x00, /* */
0x0f, 0x00, /* */
0x10, 0x00, /* */
0x11, 0x00, /* */
};
static const unsigned char init_pal[] = {
0x00, MR050, /* MR0, PAL enabled */
0x01, 0x00, /* MR1 */
0x02, 0x0c, /* subc. freq. */
0x03, 0x8c, /* subc. freq. */
0x04, 0x79, /* subc. freq. */
0x05, 0x26, /* subc. freq. */
0x06, 0x40, /* subc. phase */
};
static const unsigned char init_ntsc[] = {
0x00, MR060, /* MR0, NTSC enabled */
0x01, 0x00, /* MR1 */
0x02, 0x55, /* subc. freq. */
0x03, 0x55, /* subc. freq. */
0x04, 0x55, /* subc. freq. */
0x05, 0x25, /* subc. freq. */
0x06, 0x1a, /* subc. phase */
};
static int adv7175_init(struct v4l2_subdev *sd, u32 val)
{
/* This is just for testing!!! */
adv7175_write_block(sd, init_common, sizeof(init_common));
adv7175_write(sd, 0x07, TR0MODE | TR0RST);
adv7175_write(sd, 0x07, TR0MODE);
return 0;
}
static int adv7175_s_std_output(struct v4l2_subdev *sd, v4l2_std_id std)
{
struct adv7175 *encoder = to_adv7175(sd);
if (std & V4L2_STD_NTSC) {
adv7175_write_block(sd, init_ntsc, sizeof(init_ntsc));
if (encoder->input == 0)
adv7175_write(sd, 0x0d, 0x4f); /* Enable genlock */
adv7175_write(sd, 0x07, TR0MODE | TR0RST);
adv7175_write(sd, 0x07, TR0MODE);
} else if (std & V4L2_STD_PAL) {
adv7175_write_block(sd, init_pal, sizeof(init_pal));
if (encoder->input == 0)
adv7175_write(sd, 0x0d, 0x4f); /* Enable genlock */
adv7175_write(sd, 0x07, TR0MODE | TR0RST);
adv7175_write(sd, 0x07, TR0MODE);
} else if (std & V4L2_STD_SECAM) {
/* This is an attempt to convert
* SECAM->PAL (typically it does not work
* due to genlock: when decoder is in SECAM
* and encoder in in PAL the subcarrier can
* not be syncronized with horizontal
* quency) */
adv7175_write_block(sd, init_pal, sizeof(init_pal));
if (encoder->input == 0)
adv7175_write(sd, 0x0d, 0x49); /* Disable genlock */
adv7175_write(sd, 0x07, TR0MODE | TR0RST);
adv7175_write(sd, 0x07, TR0MODE);
} else {
v4l2_dbg(1, debug, sd, "illegal norm: %llx\n",
(unsigned long long)std);
return -EINVAL;
}
v4l2_dbg(1, debug, sd, "switched to %llx\n", (unsigned long long)std);
encoder->norm = std;
return 0;
}
static int adv7175_s_routing(struct v4l2_subdev *sd,
u32 input, u32 output, u32 config)
{
struct adv7175 *encoder = to_adv7175(sd);
/* RJ: input = 0: input is from decoder
input = 1: input is from ZR36060
input = 2: color bar */
switch (input) {
case 0:
adv7175_write(sd, 0x01, 0x00);
if (encoder->norm & V4L2_STD_NTSC)
set_subcarrier_freq(sd, 1);
adv7175_write(sd, 0x0c, TR1CAPT); /* TR1 */
if (encoder->norm & V4L2_STD_SECAM)
adv7175_write(sd, 0x0d, 0x49); /* Disable genlock */
else
adv7175_write(sd, 0x0d, 0x4f); /* Enable genlock */
adv7175_write(sd, 0x07, TR0MODE | TR0RST);
adv7175_write(sd, 0x07, TR0MODE);
/*udelay(10);*/
break;
case 1:
adv7175_write(sd, 0x01, 0x00);
if (encoder->norm & V4L2_STD_NTSC)
set_subcarrier_freq(sd, 0);
adv7175_write(sd, 0x0c, TR1PLAY); /* TR1 */
adv7175_write(sd, 0x0d, 0x49);
adv7175_write(sd, 0x07, TR0MODE | TR0RST);
adv7175_write(sd, 0x07, TR0MODE);
/* udelay(10); */
break;
case 2:
adv7175_write(sd, 0x01, 0x80);
if (encoder->norm & V4L2_STD_NTSC)
set_subcarrier_freq(sd, 0);
adv7175_write(sd, 0x0d, 0x49);
adv7175_write(sd, 0x07, TR0MODE | TR0RST);
adv7175_write(sd, 0x07, TR0MODE);
/* udelay(10); */
break;
default:
v4l2_dbg(1, debug, sd, "illegal input: %d\n", input);
return -EINVAL;
}
v4l2_dbg(1, debug, sd, "switched to %s\n", inputs[input]);
encoder->input = input;
return 0;
}
static int adv7175_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_ADV7175, 0);
}
static int adv7175_s_power(struct v4l2_subdev *sd, int on)
{
if (on)
adv7175_write(sd, 0x01, 0x00);
else
adv7175_write(sd, 0x01, 0x78);
return 0;
}
/* ----------------------------------------------------------------------- */
static const struct v4l2_subdev_core_ops adv7175_core_ops = {
.g_chip_ident = adv7175_g_chip_ident,
.init = adv7175_init,
.s_power = adv7175_s_power,
};
static const struct v4l2_subdev_video_ops adv7175_video_ops = {
.s_std_output = adv7175_s_std_output,
.s_routing = adv7175_s_routing,
};
static const struct v4l2_subdev_ops adv7175_ops = {
.core = &adv7175_core_ops,
.video = &adv7175_video_ops,
};
/* ----------------------------------------------------------------------- */
static int adv7175_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int i;
struct adv7175 *encoder;
struct v4l2_subdev *sd;
/* Check if the adapter supports the needed features */
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
v4l_info(client, "chip found @ 0x%x (%s)\n",
client->addr << 1, client->adapter->name);
encoder = kzalloc(sizeof(struct adv7175), GFP_KERNEL);
if (encoder == NULL)
return -ENOMEM;
sd = &encoder->sd;
v4l2_i2c_subdev_init(sd, client, &adv7175_ops);
encoder->norm = V4L2_STD_NTSC;
encoder->input = 0;
i = adv7175_write_block(sd, init_common, sizeof(init_common));
if (i >= 0) {
i = adv7175_write(sd, 0x07, TR0MODE | TR0RST);
i = adv7175_write(sd, 0x07, TR0MODE);
i = adv7175_read(sd, 0x12);
v4l2_dbg(1, debug, sd, "revision %d\n", i & 1);
}
if (i < 0)
v4l2_dbg(1, debug, sd, "init error 0x%x\n", i);
return 0;
}
static int adv7175_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
kfree(to_adv7175(sd));
return 0;
}
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id adv7175_id[] = {
{ "adv7175", 0 },
{ "adv7176", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, adv7175_id);
static struct i2c_driver adv7175_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "adv7175",
},
.probe = adv7175_probe,
.remove = adv7175_remove,
.id_table = adv7175_id,
};
static __init int init_adv7175(void)
{
return i2c_add_driver(&adv7175_driver);
}
static __exit void exit_adv7175(void)
{
i2c_del_driver(&adv7175_driver);
}
module_init(init_adv7175);
module_exit(exit_adv7175);
| gpl-2.0 |
Stane1983/kernel-amlogic-mx | net/sunrpc/auth_gss/gss_krb5_crypto.c | 3298 | 24981 | /*
* linux/net/sunrpc/gss_krb5_crypto.c
*
* Copyright (c) 2000-2008 The Regents of the University of Michigan.
* All rights reserved.
*
* Andy Adamson <andros@umich.edu>
* Bruce Fields <bfields@umich.edu>
*/
/*
* Copyright (C) 1998 by the FundsXpress, INC.
*
* All rights reserved.
*
* Export of this software from the United States of America may require
* a specific license from the United States Government. It is the
* responsibility of any person or organization contemplating export to
* obtain such a license before exporting.
*
* WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
* distribute this software and its documentation for any purpose and
* without fee is hereby granted, provided that the above copyright
* notice appear in all copies and that both that copyright notice and
* this permission notice appear in supporting documentation, and that
* the name of FundsXpress. not be used in advertising or publicity pertaining
* to distribution of the software without specific, written prior
* permission. FundsXpress makes no representations about the suitability of
* this software for any purpose. It is provided "as is" without express
* or implied warranty.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#include <linux/err.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/crypto.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/random.h>
#include <linux/sunrpc/gss_krb5.h>
#include <linux/sunrpc/xdr.h>
#ifdef RPC_DEBUG
# define RPCDBG_FACILITY RPCDBG_AUTH
#endif
u32
krb5_encrypt(
struct crypto_blkcipher *tfm,
void * iv,
void * in,
void * out,
int length)
{
u32 ret = -EINVAL;
struct scatterlist sg[1];
u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
if (length % crypto_blkcipher_blocksize(tfm) != 0)
goto out;
if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n",
crypto_blkcipher_ivsize(tfm));
goto out;
}
if (iv)
memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));
memcpy(out, in, length);
sg_init_one(sg, out, length);
ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length);
out:
dprintk("RPC: krb5_encrypt returns %d\n", ret);
return ret;
}
u32
krb5_decrypt(
struct crypto_blkcipher *tfm,
void * iv,
void * in,
void * out,
int length)
{
u32 ret = -EINVAL;
struct scatterlist sg[1];
u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
if (length % crypto_blkcipher_blocksize(tfm) != 0)
goto out;
if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n",
crypto_blkcipher_ivsize(tfm));
goto out;
}
if (iv)
memcpy(local_iv,iv, crypto_blkcipher_ivsize(tfm));
memcpy(out, in, length);
sg_init_one(sg, out, length);
ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length);
out:
dprintk("RPC: gss_k5decrypt returns %d\n",ret);
return ret;
}
static int
checksummer(struct scatterlist *sg, void *data)
{
struct hash_desc *desc = data;
return crypto_hash_update(desc, sg, sg->length);
}
static int
arcfour_hmac_md5_usage_to_salt(unsigned int usage, u8 salt[4])
{
unsigned int ms_usage;
switch (usage) {
case KG_USAGE_SIGN:
ms_usage = 15;
break;
case KG_USAGE_SEAL:
ms_usage = 13;
break;
default:
return -EINVAL;
}
salt[0] = (ms_usage >> 0) & 0xff;
salt[1] = (ms_usage >> 8) & 0xff;
salt[2] = (ms_usage >> 16) & 0xff;
salt[3] = (ms_usage >> 24) & 0xff;
return 0;
}
static u32
make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
struct xdr_buf *body, int body_offset, u8 *cksumkey,
unsigned int usage, struct xdr_netobj *cksumout)
{
struct hash_desc desc;
struct scatterlist sg[1];
int err;
u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
u8 rc4salt[4];
struct crypto_hash *md5;
struct crypto_hash *hmac_md5;
if (cksumkey == NULL)
return GSS_S_FAILURE;
if (cksumout->len < kctx->gk5e->cksumlength) {
dprintk("%s: checksum buffer length, %u, too small for %s\n",
__func__, cksumout->len, kctx->gk5e->name);
return GSS_S_FAILURE;
}
if (arcfour_hmac_md5_usage_to_salt(usage, rc4salt)) {
dprintk("%s: invalid usage value %u\n", __func__, usage);
return GSS_S_FAILURE;
}
md5 = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(md5))
return GSS_S_FAILURE;
hmac_md5 = crypto_alloc_hash(kctx->gk5e->cksum_name, 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(hmac_md5)) {
crypto_free_hash(md5);
return GSS_S_FAILURE;
}
desc.tfm = md5;
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_hash_init(&desc);
if (err)
goto out;
sg_init_one(sg, rc4salt, 4);
err = crypto_hash_update(&desc, sg, 4);
if (err)
goto out;
sg_init_one(sg, header, hdrlen);
err = crypto_hash_update(&desc, sg, hdrlen);
if (err)
goto out;
err = xdr_process_buf(body, body_offset, body->len - body_offset,
checksummer, &desc);
if (err)
goto out;
err = crypto_hash_final(&desc, checksumdata);
if (err)
goto out;
desc.tfm = hmac_md5;
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_hash_init(&desc);
if (err)
goto out;
err = crypto_hash_setkey(hmac_md5, cksumkey, kctx->gk5e->keylength);
if (err)
goto out;
sg_init_one(sg, checksumdata, crypto_hash_digestsize(md5));
err = crypto_hash_digest(&desc, sg, crypto_hash_digestsize(md5),
checksumdata);
if (err)
goto out;
memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
cksumout->len = kctx->gk5e->cksumlength;
out:
crypto_free_hash(md5);
crypto_free_hash(hmac_md5);
return err ? GSS_S_FAILURE : 0;
}
/*
* checksum the plaintext data and hdrlen bytes of the token header
* The checksum is performed over the first 8 bytes of the
* gss token header and then over the data body
*/
u32
make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
struct xdr_buf *body, int body_offset, u8 *cksumkey,
unsigned int usage, struct xdr_netobj *cksumout)
{
struct hash_desc desc;
struct scatterlist sg[1];
int err;
u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
unsigned int checksumlen;
if (kctx->gk5e->ctype == CKSUMTYPE_HMAC_MD5_ARCFOUR)
return make_checksum_hmac_md5(kctx, header, hdrlen,
body, body_offset,
cksumkey, usage, cksumout);
if (cksumout->len < kctx->gk5e->cksumlength) {
dprintk("%s: checksum buffer length, %u, too small for %s\n",
__func__, cksumout->len, kctx->gk5e->name);
return GSS_S_FAILURE;
}
desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(desc.tfm))
return GSS_S_FAILURE;
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
checksumlen = crypto_hash_digestsize(desc.tfm);
if (cksumkey != NULL) {
err = crypto_hash_setkey(desc.tfm, cksumkey,
kctx->gk5e->keylength);
if (err)
goto out;
}
err = crypto_hash_init(&desc);
if (err)
goto out;
sg_init_one(sg, header, hdrlen);
err = crypto_hash_update(&desc, sg, hdrlen);
if (err)
goto out;
err = xdr_process_buf(body, body_offset, body->len - body_offset,
checksummer, &desc);
if (err)
goto out;
err = crypto_hash_final(&desc, checksumdata);
if (err)
goto out;
switch (kctx->gk5e->ctype) {
case CKSUMTYPE_RSA_MD5:
err = kctx->gk5e->encrypt(kctx->seq, NULL, checksumdata,
checksumdata, checksumlen);
if (err)
goto out;
memcpy(cksumout->data,
checksumdata + checksumlen - kctx->gk5e->cksumlength,
kctx->gk5e->cksumlength);
break;
case CKSUMTYPE_HMAC_SHA1_DES3:
memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
break;
default:
BUG();
break;
}
cksumout->len = kctx->gk5e->cksumlength;
out:
crypto_free_hash(desc.tfm);
return err ? GSS_S_FAILURE : 0;
}
/*
* checksum the plaintext data and hdrlen bytes of the token header
* Per rfc4121, sec. 4.2.4, the checksum is performed over the data
* body then over the first 16 octets of the MIC token
* Inclusion of the header data in the calculation of the
* checksum is optional.
*/
u32
make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
struct xdr_buf *body, int body_offset, u8 *cksumkey,
unsigned int usage, struct xdr_netobj *cksumout)
{
struct hash_desc desc;
struct scatterlist sg[1];
int err;
u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
unsigned int checksumlen;
if (kctx->gk5e->keyed_cksum == 0) {
dprintk("%s: expected keyed hash for %s\n",
__func__, kctx->gk5e->name);
return GSS_S_FAILURE;
}
if (cksumkey == NULL) {
dprintk("%s: no key supplied for %s\n",
__func__, kctx->gk5e->name);
return GSS_S_FAILURE;
}
desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(desc.tfm))
return GSS_S_FAILURE;
checksumlen = crypto_hash_digestsize(desc.tfm);
desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
err = crypto_hash_setkey(desc.tfm, cksumkey, kctx->gk5e->keylength);
if (err)
goto out;
err = crypto_hash_init(&desc);
if (err)
goto out;
err = xdr_process_buf(body, body_offset, body->len - body_offset,
checksummer, &desc);
if (err)
goto out;
if (header != NULL) {
sg_init_one(sg, header, hdrlen);
err = crypto_hash_update(&desc, sg, hdrlen);
if (err)
goto out;
}
err = crypto_hash_final(&desc, checksumdata);
if (err)
goto out;
cksumout->len = kctx->gk5e->cksumlength;
switch (kctx->gk5e->ctype) {
case CKSUMTYPE_HMAC_SHA1_96_AES128:
case CKSUMTYPE_HMAC_SHA1_96_AES256:
/* note that this truncates the hash */
memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
break;
default:
BUG();
break;
}
out:
crypto_free_hash(desc.tfm);
return err ? GSS_S_FAILURE : 0;
}
struct encryptor_desc {
u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
struct blkcipher_desc desc;
int pos;
struct xdr_buf *outbuf;
struct page **pages;
struct scatterlist infrags[4];
struct scatterlist outfrags[4];
int fragno;
int fraglen;
};
static int
encryptor(struct scatterlist *sg, void *data)
{
struct encryptor_desc *desc = data;
struct xdr_buf *outbuf = desc->outbuf;
struct page *in_page;
int thislen = desc->fraglen + sg->length;
int fraglen, ret;
int page_pos;
/* Worst case is 4 fragments: head, end of page 1, start
* of page 2, tail. Anything more is a bug. */
BUG_ON(desc->fragno > 3);
page_pos = desc->pos - outbuf->head[0].iov_len;
if (page_pos >= 0 && page_pos < outbuf->page_len) {
/* pages are not in place: */
int i = (page_pos + outbuf->page_base) >> PAGE_CACHE_SHIFT;
in_page = desc->pages[i];
} else {
in_page = sg_page(sg);
}
sg_set_page(&desc->infrags[desc->fragno], in_page, sg->length,
sg->offset);
sg_set_page(&desc->outfrags[desc->fragno], sg_page(sg), sg->length,
sg->offset);
desc->fragno++;
desc->fraglen += sg->length;
desc->pos += sg->length;
fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1);
thislen -= fraglen;
if (thislen == 0)
return 0;
sg_mark_end(&desc->infrags[desc->fragno - 1]);
sg_mark_end(&desc->outfrags[desc->fragno - 1]);
ret = crypto_blkcipher_encrypt_iv(&desc->desc, desc->outfrags,
desc->infrags, thislen);
if (ret)
return ret;
sg_init_table(desc->infrags, 4);
sg_init_table(desc->outfrags, 4);
if (fraglen) {
sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen,
sg->offset + sg->length - fraglen);
desc->infrags[0] = desc->outfrags[0];
sg_assign_page(&desc->infrags[0], in_page);
desc->fragno = 1;
desc->fraglen = fraglen;
} else {
desc->fragno = 0;
desc->fraglen = 0;
}
return 0;
}
int
gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
int offset, struct page **pages)
{
int ret;
struct encryptor_desc desc;
BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
memset(desc.iv, 0, sizeof(desc.iv));
desc.desc.tfm = tfm;
desc.desc.info = desc.iv;
desc.desc.flags = 0;
desc.pos = offset;
desc.outbuf = buf;
desc.pages = pages;
desc.fragno = 0;
desc.fraglen = 0;
sg_init_table(desc.infrags, 4);
sg_init_table(desc.outfrags, 4);
ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc);
return ret;
}
struct decryptor_desc {
u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
struct blkcipher_desc desc;
struct scatterlist frags[4];
int fragno;
int fraglen;
};
static int
decryptor(struct scatterlist *sg, void *data)
{
struct decryptor_desc *desc = data;
int thislen = desc->fraglen + sg->length;
int fraglen, ret;
/* Worst case is 4 fragments: head, end of page 1, start
* of page 2, tail. Anything more is a bug. */
BUG_ON(desc->fragno > 3);
sg_set_page(&desc->frags[desc->fragno], sg_page(sg), sg->length,
sg->offset);
desc->fragno++;
desc->fraglen += sg->length;
fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1);
thislen -= fraglen;
if (thislen == 0)
return 0;
sg_mark_end(&desc->frags[desc->fragno - 1]);
ret = crypto_blkcipher_decrypt_iv(&desc->desc, desc->frags,
desc->frags, thislen);
if (ret)
return ret;
sg_init_table(desc->frags, 4);
if (fraglen) {
sg_set_page(&desc->frags[0], sg_page(sg), fraglen,
sg->offset + sg->length - fraglen);
desc->fragno = 1;
desc->fraglen = fraglen;
} else {
desc->fragno = 0;
desc->fraglen = 0;
}
return 0;
}
int
gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
int offset)
{
struct decryptor_desc desc;
/* XXXJBF: */
BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
memset(desc.iv, 0, sizeof(desc.iv));
desc.desc.tfm = tfm;
desc.desc.info = desc.iv;
desc.desc.flags = 0;
desc.fragno = 0;
desc.fraglen = 0;
sg_init_table(desc.frags, 4);
return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc);
}
/*
* This function makes the assumption that it was ultimately called
* from gss_wrap().
*
* The client auth_gss code moves any existing tail data into a
* separate page before calling gss_wrap.
* The server svcauth_gss code ensures that both the head and the
* tail have slack space of RPC_MAX_AUTH_SIZE before calling gss_wrap.
*
* Even with that guarantee, this function may be called more than
* once in the processing of gss_wrap(). The best we can do is
* verify at compile-time (see GSS_KRB5_SLACK_CHECK) that the
* largest expected shift will fit within RPC_MAX_AUTH_SIZE.
* At run-time we can verify that a single invocation of this
* function doesn't attempt to use more the RPC_MAX_AUTH_SIZE.
*/
int
xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen)
{
u8 *p;
if (shiftlen == 0)
return 0;
BUILD_BUG_ON(GSS_KRB5_MAX_SLACK_NEEDED > RPC_MAX_AUTH_SIZE);
BUG_ON(shiftlen > RPC_MAX_AUTH_SIZE);
p = buf->head[0].iov_base + base;
memmove(p + shiftlen, p, buf->head[0].iov_len - base);
buf->head[0].iov_len += shiftlen;
buf->len += shiftlen;
return 0;
}
static u32
gss_krb5_cts_crypt(struct crypto_blkcipher *cipher, struct xdr_buf *buf,
u32 offset, u8 *iv, struct page **pages, int encrypt)
{
u32 ret;
struct scatterlist sg[1];
struct blkcipher_desc desc = { .tfm = cipher, .info = iv };
u8 data[crypto_blkcipher_blocksize(cipher) * 2];
struct page **save_pages;
u32 len = buf->len - offset;
BUG_ON(len > crypto_blkcipher_blocksize(cipher) * 2);
/*
* For encryption, we want to read from the cleartext
* page cache pages, and write the encrypted data to
* the supplied xdr_buf pages.
*/
save_pages = buf->pages;
if (encrypt)
buf->pages = pages;
ret = read_bytes_from_xdr_buf(buf, offset, data, len);
buf->pages = save_pages;
if (ret)
goto out;
sg_init_one(sg, data, len);
if (encrypt)
ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, len);
else
ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, len);
if (ret)
goto out;
ret = write_bytes_to_xdr_buf(buf, offset, data, len);
out:
return ret;
}
u32
gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
struct xdr_buf *buf, int ec, struct page **pages)
{
u32 err;
struct xdr_netobj hmac;
u8 *cksumkey;
u8 *ecptr;
struct crypto_blkcipher *cipher, *aux_cipher;
int blocksize;
struct page **save_pages;
int nblocks, nbytes;
struct encryptor_desc desc;
u32 cbcbytes;
unsigned int usage;
if (kctx->initiate) {
cipher = kctx->initiator_enc;
aux_cipher = kctx->initiator_enc_aux;
cksumkey = kctx->initiator_integ;
usage = KG_USAGE_INITIATOR_SEAL;
} else {
cipher = kctx->acceptor_enc;
aux_cipher = kctx->acceptor_enc_aux;
cksumkey = kctx->acceptor_integ;
usage = KG_USAGE_ACCEPTOR_SEAL;
}
blocksize = crypto_blkcipher_blocksize(cipher);
/* hide the gss token header and insert the confounder */
offset += GSS_KRB5_TOK_HDR_LEN;
if (xdr_extend_head(buf, offset, kctx->gk5e->conflen))
return GSS_S_FAILURE;
gss_krb5_make_confounder(buf->head[0].iov_base + offset, kctx->gk5e->conflen);
offset -= GSS_KRB5_TOK_HDR_LEN;
if (buf->tail[0].iov_base != NULL) {
ecptr = buf->tail[0].iov_base + buf->tail[0].iov_len;
} else {
buf->tail[0].iov_base = buf->head[0].iov_base
+ buf->head[0].iov_len;
buf->tail[0].iov_len = 0;
ecptr = buf->tail[0].iov_base;
}
memset(ecptr, 'X', ec);
buf->tail[0].iov_len += ec;
buf->len += ec;
/* copy plaintext gss token header after filler (if any) */
memcpy(ecptr + ec, buf->head[0].iov_base + offset,
GSS_KRB5_TOK_HDR_LEN);
buf->tail[0].iov_len += GSS_KRB5_TOK_HDR_LEN;
buf->len += GSS_KRB5_TOK_HDR_LEN;
/* Do the HMAC */
hmac.len = GSS_KRB5_MAX_CKSUM_LEN;
hmac.data = buf->tail[0].iov_base + buf->tail[0].iov_len;
/*
* When we are called, pages points to the real page cache
* data -- which we can't go and encrypt! buf->pages points
* to scratch pages which we are going to send off to the
* client/server. Swap in the plaintext pages to calculate
* the hmac.
*/
save_pages = buf->pages;
buf->pages = pages;
err = make_checksum_v2(kctx, NULL, 0, buf,
offset + GSS_KRB5_TOK_HDR_LEN,
cksumkey, usage, &hmac);
buf->pages = save_pages;
if (err)
return GSS_S_FAILURE;
nbytes = buf->len - offset - GSS_KRB5_TOK_HDR_LEN;
nblocks = (nbytes + blocksize - 1) / blocksize;
cbcbytes = 0;
if (nblocks > 2)
cbcbytes = (nblocks - 2) * blocksize;
memset(desc.iv, 0, sizeof(desc.iv));
if (cbcbytes) {
desc.pos = offset + GSS_KRB5_TOK_HDR_LEN;
desc.fragno = 0;
desc.fraglen = 0;
desc.pages = pages;
desc.outbuf = buf;
desc.desc.info = desc.iv;
desc.desc.flags = 0;
desc.desc.tfm = aux_cipher;
sg_init_table(desc.infrags, 4);
sg_init_table(desc.outfrags, 4);
err = xdr_process_buf(buf, offset + GSS_KRB5_TOK_HDR_LEN,
cbcbytes, encryptor, &desc);
if (err)
goto out_err;
}
/* Make sure IV carries forward from any CBC results. */
err = gss_krb5_cts_crypt(cipher, buf,
offset + GSS_KRB5_TOK_HDR_LEN + cbcbytes,
desc.iv, pages, 1);
if (err) {
err = GSS_S_FAILURE;
goto out_err;
}
/* Now update buf to account for HMAC */
buf->tail[0].iov_len += kctx->gk5e->cksumlength;
buf->len += kctx->gk5e->cksumlength;
out_err:
if (err)
err = GSS_S_FAILURE;
return err;
}
u32
gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
u32 *headskip, u32 *tailskip)
{
struct xdr_buf subbuf;
u32 ret = 0;
u8 *cksum_key;
struct crypto_blkcipher *cipher, *aux_cipher;
struct xdr_netobj our_hmac_obj;
u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN];
u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN];
int nblocks, blocksize, cbcbytes;
struct decryptor_desc desc;
unsigned int usage;
if (kctx->initiate) {
cipher = kctx->acceptor_enc;
aux_cipher = kctx->acceptor_enc_aux;
cksum_key = kctx->acceptor_integ;
usage = KG_USAGE_ACCEPTOR_SEAL;
} else {
cipher = kctx->initiator_enc;
aux_cipher = kctx->initiator_enc_aux;
cksum_key = kctx->initiator_integ;
usage = KG_USAGE_INITIATOR_SEAL;
}
blocksize = crypto_blkcipher_blocksize(cipher);
/* create a segment skipping the header and leaving out the checksum */
xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN,
(buf->len - offset - GSS_KRB5_TOK_HDR_LEN -
kctx->gk5e->cksumlength));
nblocks = (subbuf.len + blocksize - 1) / blocksize;
cbcbytes = 0;
if (nblocks > 2)
cbcbytes = (nblocks - 2) * blocksize;
memset(desc.iv, 0, sizeof(desc.iv));
if (cbcbytes) {
desc.fragno = 0;
desc.fraglen = 0;
desc.desc.info = desc.iv;
desc.desc.flags = 0;
desc.desc.tfm = aux_cipher;
sg_init_table(desc.frags, 4);
ret = xdr_process_buf(&subbuf, 0, cbcbytes, decryptor, &desc);
if (ret)
goto out_err;
}
/* Make sure IV carries forward from any CBC results. */
ret = gss_krb5_cts_crypt(cipher, &subbuf, cbcbytes, desc.iv, NULL, 0);
if (ret)
goto out_err;
/* Calculate our hmac over the plaintext data */
our_hmac_obj.len = sizeof(our_hmac);
our_hmac_obj.data = our_hmac;
ret = make_checksum_v2(kctx, NULL, 0, &subbuf, 0,
cksum_key, usage, &our_hmac_obj);
if (ret)
goto out_err;
/* Get the packet's hmac value */
ret = read_bytes_from_xdr_buf(buf, buf->len - kctx->gk5e->cksumlength,
pkt_hmac, kctx->gk5e->cksumlength);
if (ret)
goto out_err;
if (memcmp(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) {
ret = GSS_S_BAD_SIG;
goto out_err;
}
*headskip = kctx->gk5e->conflen;
*tailskip = kctx->gk5e->cksumlength;
out_err:
if (ret && ret != GSS_S_BAD_SIG)
ret = GSS_S_FAILURE;
return ret;
}
/*
* Compute Kseq given the initial session key and the checksum.
* Set the key of the given cipher.
*/
int
krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_blkcipher *cipher,
unsigned char *cksum)
{
struct crypto_hash *hmac;
struct hash_desc desc;
struct scatterlist sg[1];
u8 Kseq[GSS_KRB5_MAX_KEYLEN];
u32 zeroconstant = 0;
int err;
dprintk("%s: entered\n", __func__);
hmac = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(hmac)) {
dprintk("%s: error %ld, allocating hash '%s'\n",
__func__, PTR_ERR(hmac), kctx->gk5e->cksum_name);
return PTR_ERR(hmac);
}
desc.tfm = hmac;
desc.flags = 0;
err = crypto_hash_init(&desc);
if (err)
goto out_err;
/* Compute intermediate Kseq from session key */
err = crypto_hash_setkey(hmac, kctx->Ksess, kctx->gk5e->keylength);
if (err)
goto out_err;
sg_init_table(sg, 1);
sg_set_buf(sg, &zeroconstant, 4);
err = crypto_hash_digest(&desc, sg, 4, Kseq);
if (err)
goto out_err;
/* Compute final Kseq from the checksum and intermediate Kseq */
err = crypto_hash_setkey(hmac, Kseq, kctx->gk5e->keylength);
if (err)
goto out_err;
sg_set_buf(sg, cksum, 8);
err = crypto_hash_digest(&desc, sg, 8, Kseq);
if (err)
goto out_err;
err = crypto_blkcipher_setkey(cipher, Kseq, kctx->gk5e->keylength);
if (err)
goto out_err;
err = 0;
out_err:
crypto_free_hash(hmac);
dprintk("%s: returning %d\n", __func__, err);
return err;
}
/*
* Compute Kcrypt given the initial session key and the plaintext seqnum.
* Set the key of cipher kctx->enc.
*/
int
krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_blkcipher *cipher,
s32 seqnum)
{
struct crypto_hash *hmac;
struct hash_desc desc;
struct scatterlist sg[1];
u8 Kcrypt[GSS_KRB5_MAX_KEYLEN];
u8 zeroconstant[4] = {0};
u8 seqnumarray[4];
int err, i;
dprintk("%s: entered, seqnum %u\n", __func__, seqnum);
hmac = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(hmac)) {
dprintk("%s: error %ld, allocating hash '%s'\n",
__func__, PTR_ERR(hmac), kctx->gk5e->cksum_name);
return PTR_ERR(hmac);
}
desc.tfm = hmac;
desc.flags = 0;
err = crypto_hash_init(&desc);
if (err)
goto out_err;
/* Compute intermediate Kcrypt from session key */
for (i = 0; i < kctx->gk5e->keylength; i++)
Kcrypt[i] = kctx->Ksess[i] ^ 0xf0;
err = crypto_hash_setkey(hmac, Kcrypt, kctx->gk5e->keylength);
if (err)
goto out_err;
sg_init_table(sg, 1);
sg_set_buf(sg, zeroconstant, 4);
err = crypto_hash_digest(&desc, sg, 4, Kcrypt);
if (err)
goto out_err;
/* Compute final Kcrypt from the seqnum and intermediate Kcrypt */
err = crypto_hash_setkey(hmac, Kcrypt, kctx->gk5e->keylength);
if (err)
goto out_err;
seqnumarray[0] = (unsigned char) ((seqnum >> 24) & 0xff);
seqnumarray[1] = (unsigned char) ((seqnum >> 16) & 0xff);
seqnumarray[2] = (unsigned char) ((seqnum >> 8) & 0xff);
seqnumarray[3] = (unsigned char) ((seqnum >> 0) & 0xff);
sg_set_buf(sg, seqnumarray, 4);
err = crypto_hash_digest(&desc, sg, 4, Kcrypt);
if (err)
goto out_err;
err = crypto_blkcipher_setkey(cipher, Kcrypt, kctx->gk5e->keylength);
if (err)
goto out_err;
err = 0;
out_err:
crypto_free_hash(hmac);
dprintk("%s: returning %d\n", __func__, err);
return err;
}
| gpl-2.0 |
ViciousAOSP/platform_kernel_ViciousKernel | drivers/input/misc/pcf8574_keypad.c | 3298 | 4856 | /*
* Driver for a keypad w/16 buttons connected to a PCF8574 I2C I/O expander
*
* Copyright 2005-2008 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#define DRV_NAME "pcf8574_keypad"
static const unsigned char pcf8574_kp_btncode[] = {
[0] = KEY_RESERVED,
[1] = KEY_ENTER,
[2] = KEY_BACKSLASH,
[3] = KEY_0,
[4] = KEY_RIGHTBRACE,
[5] = KEY_C,
[6] = KEY_9,
[7] = KEY_8,
[8] = KEY_7,
[9] = KEY_B,
[10] = KEY_6,
[11] = KEY_5,
[12] = KEY_4,
[13] = KEY_A,
[14] = KEY_3,
[15] = KEY_2,
[16] = KEY_1
};
struct kp_data {
unsigned short btncode[ARRAY_SIZE(pcf8574_kp_btncode)];
struct input_dev *idev;
struct i2c_client *client;
char name[64];
char phys[32];
unsigned char laststate;
};
static short read_state(struct kp_data *lp)
{
unsigned char x, y, a, b;
i2c_smbus_write_byte(lp->client, 240);
x = 0xF & (~(i2c_smbus_read_byte(lp->client) >> 4));
i2c_smbus_write_byte(lp->client, 15);
y = 0xF & (~i2c_smbus_read_byte(lp->client));
for (a = 0; x > 0; a++)
x = x >> 1;
for (b = 0; y > 0; b++)
y = y >> 1;
return ((a - 1) * 4) + b;
}
static irqreturn_t pcf8574_kp_irq_handler(int irq, void *dev_id)
{
struct kp_data *lp = dev_id;
unsigned char nextstate = read_state(lp);
if (lp->laststate != nextstate) {
int key_down = nextstate < ARRAY_SIZE(lp->btncode);
unsigned short keycode = key_down ?
lp->btncode[nextstate] : lp->btncode[lp->laststate];
input_report_key(lp->idev, keycode, key_down);
input_sync(lp->idev);
lp->laststate = nextstate;
}
return IRQ_HANDLED;
}
static int __devinit pcf8574_kp_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
int i, ret;
struct input_dev *idev;
struct kp_data *lp;
if (i2c_smbus_write_byte(client, 240) < 0) {
dev_err(&client->dev, "probe: write fail\n");
return -ENODEV;
}
lp = kzalloc(sizeof(*lp), GFP_KERNEL);
if (!lp)
return -ENOMEM;
idev = input_allocate_device();
if (!idev) {
dev_err(&client->dev, "Can't allocate input device\n");
ret = -ENOMEM;
goto fail_allocate;
}
lp->idev = idev;
lp->client = client;
idev->evbit[0] = BIT_MASK(EV_KEY);
idev->keycode = lp->btncode;
idev->keycodesize = sizeof(lp->btncode[0]);
idev->keycodemax = ARRAY_SIZE(lp->btncode);
for (i = 0; i < ARRAY_SIZE(pcf8574_kp_btncode); i++) {
lp->btncode[i] = pcf8574_kp_btncode[i];
__set_bit(lp->btncode[i] & KEY_MAX, idev->keybit);
}
sprintf(lp->name, DRV_NAME);
sprintf(lp->phys, "kp_data/input0");
idev->name = lp->name;
idev->phys = lp->phys;
idev->id.bustype = BUS_I2C;
idev->id.vendor = 0x0001;
idev->id.product = 0x0001;
idev->id.version = 0x0100;
lp->laststate = read_state(lp);
ret = request_threaded_irq(client->irq, NULL, pcf8574_kp_irq_handler,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
DRV_NAME, lp);
if (ret) {
dev_err(&client->dev, "IRQ %d is not free\n", client->irq);
goto fail_free_device;
}
ret = input_register_device(idev);
if (ret) {
dev_err(&client->dev, "input_register_device() failed\n");
goto fail_free_irq;
}
i2c_set_clientdata(client, lp);
return 0;
fail_free_irq:
free_irq(client->irq, lp);
fail_free_device:
input_free_device(idev);
fail_allocate:
kfree(lp);
return ret;
}
static int __devexit pcf8574_kp_remove(struct i2c_client *client)
{
struct kp_data *lp = i2c_get_clientdata(client);
free_irq(client->irq, lp);
input_unregister_device(lp->idev);
kfree(lp);
return 0;
}
#ifdef CONFIG_PM
static int pcf8574_kp_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
enable_irq(client->irq);
return 0;
}
static int pcf8574_kp_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
disable_irq(client->irq);
return 0;
}
static const struct dev_pm_ops pcf8574_kp_pm_ops = {
.suspend = pcf8574_kp_suspend,
.resume = pcf8574_kp_resume,
};
#else
# define pcf8574_kp_resume NULL
# define pcf8574_kp_suspend NULL
#endif
static const struct i2c_device_id pcf8574_kp_id[] = {
{ DRV_NAME, 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, pcf8574_kp_id);
static struct i2c_driver pcf8574_kp_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &pcf8574_kp_pm_ops,
#endif
},
.probe = pcf8574_kp_probe,
.remove = __devexit_p(pcf8574_kp_remove),
.id_table = pcf8574_kp_id,
};
static int __init pcf8574_kp_init(void)
{
return i2c_add_driver(&pcf8574_kp_driver);
}
module_init(pcf8574_kp_init);
static void __exit pcf8574_kp_exit(void)
{
i2c_del_driver(&pcf8574_kp_driver);
}
module_exit(pcf8574_kp_exit);
MODULE_AUTHOR("Michael Hennerich");
MODULE_DESCRIPTION("Keypad input driver for 16 keys connected to PCF8574");
MODULE_LICENSE("GPL");
| gpl-2.0 |
zarboz/Monarudo_GPU_M7 | arch/arm/plat-orion/pcie.c | 4834 | 7329 | /*
* arch/arm/plat-orion/pcie.c
*
* Marvell Orion SoC PCIe handling.
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/mbus.h>
#include <asm/mach/pci.h>
#include <plat/pcie.h>
#include <plat/addr-map.h>
#include <linux/delay.h>
/*
* PCIe unit register offsets.
*/
#define PCIE_DEV_ID_OFF 0x0000
#define PCIE_CMD_OFF 0x0004
#define PCIE_DEV_REV_OFF 0x0008
#define PCIE_BAR_LO_OFF(n) (0x0010 + ((n) << 3))
#define PCIE_BAR_HI_OFF(n) (0x0014 + ((n) << 3))
#define PCIE_HEADER_LOG_4_OFF 0x0128
#define PCIE_BAR_CTRL_OFF(n) (0x1804 + ((n - 1) * 4))
#define PCIE_WIN04_CTRL_OFF(n) (0x1820 + ((n) << 4))
#define PCIE_WIN04_BASE_OFF(n) (0x1824 + ((n) << 4))
#define PCIE_WIN04_REMAP_OFF(n) (0x182c + ((n) << 4))
#define PCIE_WIN5_CTRL_OFF 0x1880
#define PCIE_WIN5_BASE_OFF 0x1884
#define PCIE_WIN5_REMAP_OFF 0x188c
#define PCIE_CONF_ADDR_OFF 0x18f8
#define PCIE_CONF_ADDR_EN 0x80000000
#define PCIE_CONF_REG(r) ((((r) & 0xf00) << 16) | ((r) & 0xfc))
#define PCIE_CONF_BUS(b) (((b) & 0xff) << 16)
#define PCIE_CONF_DEV(d) (((d) & 0x1f) << 11)
#define PCIE_CONF_FUNC(f) (((f) & 0x7) << 8)
#define PCIE_CONF_DATA_OFF 0x18fc
#define PCIE_MASK_OFF 0x1910
#define PCIE_CTRL_OFF 0x1a00
#define PCIE_CTRL_X1_MODE 0x0001
#define PCIE_STAT_OFF 0x1a04
#define PCIE_STAT_DEV_OFFS 20
#define PCIE_STAT_DEV_MASK 0x1f
#define PCIE_STAT_BUS_OFFS 8
#define PCIE_STAT_BUS_MASK 0xff
#define PCIE_STAT_LINK_DOWN 1
#define PCIE_DEBUG_CTRL 0x1a60
#define PCIE_DEBUG_SOFT_RESET (1<<20)
u32 __init orion_pcie_dev_id(void __iomem *base)
{
return readl(base + PCIE_DEV_ID_OFF) >> 16;
}
u32 __init orion_pcie_rev(void __iomem *base)
{
return readl(base + PCIE_DEV_REV_OFF) & 0xff;
}
int orion_pcie_link_up(void __iomem *base)
{
return !(readl(base + PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN);
}
int __init orion_pcie_x4_mode(void __iomem *base)
{
return !(readl(base + PCIE_CTRL_OFF) & PCIE_CTRL_X1_MODE);
}
int orion_pcie_get_local_bus_nr(void __iomem *base)
{
u32 stat = readl(base + PCIE_STAT_OFF);
return (stat >> PCIE_STAT_BUS_OFFS) & PCIE_STAT_BUS_MASK;
}
void __init orion_pcie_set_local_bus_nr(void __iomem *base, int nr)
{
u32 stat;
stat = readl(base + PCIE_STAT_OFF);
stat &= ~(PCIE_STAT_BUS_MASK << PCIE_STAT_BUS_OFFS);
stat |= nr << PCIE_STAT_BUS_OFFS;
writel(stat, base + PCIE_STAT_OFF);
}
void __init orion_pcie_reset(void __iomem *base)
{
u32 reg;
int i;
/*
* MV-S104860-U0, Rev. C:
* PCI Express Unit Soft Reset
* When set, generates an internal reset in the PCI Express unit.
* This bit should be cleared after the link is re-established.
*/
reg = readl(base + PCIE_DEBUG_CTRL);
reg |= PCIE_DEBUG_SOFT_RESET;
writel(reg, base + PCIE_DEBUG_CTRL);
for (i = 0; i < 20; i++) {
mdelay(10);
if (orion_pcie_link_up(base))
break;
}
reg &= ~(PCIE_DEBUG_SOFT_RESET);
writel(reg, base + PCIE_DEBUG_CTRL);
}
/*
* Setup PCIE BARs and Address Decode Wins:
* BAR[0,2] -> disabled, BAR[1] -> covers all DRAM banks
* WIN[0-3] -> DRAM bank[0-3]
*/
static void __init orion_pcie_setup_wins(void __iomem *base,
struct mbus_dram_target_info *dram)
{
u32 size;
int i;
/*
* First, disable and clear BARs and windows.
*/
for (i = 1; i <= 2; i++) {
writel(0, base + PCIE_BAR_CTRL_OFF(i));
writel(0, base + PCIE_BAR_LO_OFF(i));
writel(0, base + PCIE_BAR_HI_OFF(i));
}
for (i = 0; i < 5; i++) {
writel(0, base + PCIE_WIN04_CTRL_OFF(i));
writel(0, base + PCIE_WIN04_BASE_OFF(i));
writel(0, base + PCIE_WIN04_REMAP_OFF(i));
}
writel(0, base + PCIE_WIN5_CTRL_OFF);
writel(0, base + PCIE_WIN5_BASE_OFF);
writel(0, base + PCIE_WIN5_REMAP_OFF);
/*
* Setup windows for DDR banks. Count total DDR size on the fly.
*/
size = 0;
for (i = 0; i < dram->num_cs; i++) {
struct mbus_dram_window *cs = dram->cs + i;
writel(cs->base & 0xffff0000, base + PCIE_WIN04_BASE_OFF(i));
writel(0, base + PCIE_WIN04_REMAP_OFF(i));
writel(((cs->size - 1) & 0xffff0000) |
(cs->mbus_attr << 8) |
(dram->mbus_dram_target_id << 4) | 1,
base + PCIE_WIN04_CTRL_OFF(i));
size += cs->size;
}
/*
* Round up 'size' to the nearest power of two.
*/
if ((size & (size - 1)) != 0)
size = 1 << fls(size);
/*
* Setup BAR[1] to all DRAM banks.
*/
writel(dram->cs[0].base, base + PCIE_BAR_LO_OFF(1));
writel(0, base + PCIE_BAR_HI_OFF(1));
writel(((size - 1) & 0xffff0000) | 1, base + PCIE_BAR_CTRL_OFF(1));
}
void __init orion_pcie_setup(void __iomem *base)
{
u16 cmd;
u32 mask;
/*
* Point PCIe unit MBUS decode windows to DRAM space.
*/
orion_pcie_setup_wins(base, &orion_mbus_dram_info);
/*
* Master + slave enable.
*/
cmd = readw(base + PCIE_CMD_OFF);
cmd |= PCI_COMMAND_IO;
cmd |= PCI_COMMAND_MEMORY;
cmd |= PCI_COMMAND_MASTER;
writew(cmd, base + PCIE_CMD_OFF);
/*
* Enable interrupt lines A-D.
*/
mask = readl(base + PCIE_MASK_OFF);
mask |= 0x0f000000;
writel(mask, base + PCIE_MASK_OFF);
}
int orion_pcie_rd_conf(void __iomem *base, struct pci_bus *bus,
u32 devfn, int where, int size, u32 *val)
{
writel(PCIE_CONF_BUS(bus->number) |
PCIE_CONF_DEV(PCI_SLOT(devfn)) |
PCIE_CONF_FUNC(PCI_FUNC(devfn)) |
PCIE_CONF_REG(where) | PCIE_CONF_ADDR_EN,
base + PCIE_CONF_ADDR_OFF);
*val = readl(base + PCIE_CONF_DATA_OFF);
if (size == 1)
*val = (*val >> (8 * (where & 3))) & 0xff;
else if (size == 2)
*val = (*val >> (8 * (where & 3))) & 0xffff;
return PCIBIOS_SUCCESSFUL;
}
int orion_pcie_rd_conf_tlp(void __iomem *base, struct pci_bus *bus,
u32 devfn, int where, int size, u32 *val)
{
writel(PCIE_CONF_BUS(bus->number) |
PCIE_CONF_DEV(PCI_SLOT(devfn)) |
PCIE_CONF_FUNC(PCI_FUNC(devfn)) |
PCIE_CONF_REG(where) | PCIE_CONF_ADDR_EN,
base + PCIE_CONF_ADDR_OFF);
*val = readl(base + PCIE_CONF_DATA_OFF);
if (bus->number != orion_pcie_get_local_bus_nr(base) ||
PCI_FUNC(devfn) != 0)
*val = readl(base + PCIE_HEADER_LOG_4_OFF);
if (size == 1)
*val = (*val >> (8 * (where & 3))) & 0xff;
else if (size == 2)
*val = (*val >> (8 * (where & 3))) & 0xffff;
return PCIBIOS_SUCCESSFUL;
}
int orion_pcie_rd_conf_wa(void __iomem *wa_base, struct pci_bus *bus,
u32 devfn, int where, int size, u32 *val)
{
*val = readl(wa_base + (PCIE_CONF_BUS(bus->number) |
PCIE_CONF_DEV(PCI_SLOT(devfn)) |
PCIE_CONF_FUNC(PCI_FUNC(devfn)) |
PCIE_CONF_REG(where)));
if (size == 1)
*val = (*val >> (8 * (where & 3))) & 0xff;
else if (size == 2)
*val = (*val >> (8 * (where & 3))) & 0xffff;
return PCIBIOS_SUCCESSFUL;
}
int orion_pcie_wr_conf(void __iomem *base, struct pci_bus *bus,
u32 devfn, int where, int size, u32 val)
{
int ret = PCIBIOS_SUCCESSFUL;
writel(PCIE_CONF_BUS(bus->number) |
PCIE_CONF_DEV(PCI_SLOT(devfn)) |
PCIE_CONF_FUNC(PCI_FUNC(devfn)) |
PCIE_CONF_REG(where) | PCIE_CONF_ADDR_EN,
base + PCIE_CONF_ADDR_OFF);
if (size == 4) {
writel(val, base + PCIE_CONF_DATA_OFF);
} else if (size == 2) {
writew(val, base + PCIE_CONF_DATA_OFF + (where & 3));
} else if (size == 1) {
writeb(val, base + PCIE_CONF_DATA_OFF + (where & 3));
} else {
ret = PCIBIOS_BAD_REGISTER_NUMBER;
}
return ret;
}
| gpl-2.0 |
sub-b/android_kernel_samsung_s3ve3g-old | fs/xattr_acl.c | 4834 | 2351 | /*
* linux/fs/xattr_acl.c
*
* Almost all from linux/fs/ext2/acl.c:
* Copyright (C) 2001 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
*/
#include <linux/export.h>
#include <linux/fs.h>
#include <linux/posix_acl_xattr.h>
#include <linux/gfp.h>
/*
* Convert from extended attribute to in-memory representation.
*/
struct posix_acl *
posix_acl_from_xattr(const void *value, size_t size)
{
posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
int count;
struct posix_acl *acl;
struct posix_acl_entry *acl_e;
if (!value)
return NULL;
if (size < sizeof(posix_acl_xattr_header))
return ERR_PTR(-EINVAL);
if (header->a_version != cpu_to_le32(POSIX_ACL_XATTR_VERSION))
return ERR_PTR(-EOPNOTSUPP);
count = posix_acl_xattr_count(size);
if (count < 0)
return ERR_PTR(-EINVAL);
if (count == 0)
return NULL;
acl = posix_acl_alloc(count, GFP_NOFS);
if (!acl)
return ERR_PTR(-ENOMEM);
acl_e = acl->a_entries;
for (end = entry + count; entry != end; acl_e++, entry++) {
acl_e->e_tag = le16_to_cpu(entry->e_tag);
acl_e->e_perm = le16_to_cpu(entry->e_perm);
switch(acl_e->e_tag) {
case ACL_USER_OBJ:
case ACL_GROUP_OBJ:
case ACL_MASK:
case ACL_OTHER:
acl_e->e_id = ACL_UNDEFINED_ID;
break;
case ACL_USER:
case ACL_GROUP:
acl_e->e_id = le32_to_cpu(entry->e_id);
break;
default:
goto fail;
}
}
return acl;
fail:
posix_acl_release(acl);
return ERR_PTR(-EINVAL);
}
EXPORT_SYMBOL (posix_acl_from_xattr);
/*
* Convert from in-memory to extended attribute representation.
*/
int
posix_acl_to_xattr(const struct posix_acl *acl, void *buffer, size_t size)
{
posix_acl_xattr_header *ext_acl = (posix_acl_xattr_header *)buffer;
posix_acl_xattr_entry *ext_entry = ext_acl->a_entries;
int real_size, n;
real_size = posix_acl_xattr_size(acl->a_count);
if (!buffer)
return real_size;
if (real_size > size)
return -ERANGE;
ext_acl->a_version = cpu_to_le32(POSIX_ACL_XATTR_VERSION);
for (n=0; n < acl->a_count; n++, ext_entry++) {
ext_entry->e_tag = cpu_to_le16(acl->a_entries[n].e_tag);
ext_entry->e_perm = cpu_to_le16(acl->a_entries[n].e_perm);
ext_entry->e_id = cpu_to_le32(acl->a_entries[n].e_id);
}
return real_size;
}
EXPORT_SYMBOL (posix_acl_to_xattr);
| gpl-2.0 |
atondwal/linux-2 | fs/minix/bitmap.c | 7906 | 6771 | /*
* linux/fs/minix/bitmap.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
/*
* Modified for 680x0 by Hamish Macdonald
* Fixed for 680x0 by Andreas Schwab
*/
/* bitmap.c contains the code that handles the inode and block bitmaps */
#include "minix.h"
#include <linux/buffer_head.h>
#include <linux/bitops.h>
#include <linux/sched.h>
static DEFINE_SPINLOCK(bitmap_lock);
/*
* bitmap consists of blocks filled with 16bit words
* bit set == busy, bit clear == free
* endianness is a mess, but for counting zero bits it really doesn't matter...
*/
static __u32 count_free(struct buffer_head *map[], unsigned blocksize, __u32 numbits)
{
__u32 sum = 0;
unsigned blocks = DIV_ROUND_UP(numbits, blocksize * 8);
while (blocks--) {
unsigned words = blocksize / 2;
__u16 *p = (__u16 *)(*map++)->b_data;
while (words--)
sum += 16 - hweight16(*p++);
}
return sum;
}
void minix_free_block(struct inode *inode, unsigned long block)
{
struct super_block *sb = inode->i_sb;
struct minix_sb_info *sbi = minix_sb(sb);
struct buffer_head *bh;
int k = sb->s_blocksize_bits + 3;
unsigned long bit, zone;
if (block < sbi->s_firstdatazone || block >= sbi->s_nzones) {
printk("Trying to free block not in datazone\n");
return;
}
zone = block - sbi->s_firstdatazone + 1;
bit = zone & ((1<<k) - 1);
zone >>= k;
if (zone >= sbi->s_zmap_blocks) {
printk("minix_free_block: nonexistent bitmap buffer\n");
return;
}
bh = sbi->s_zmap[zone];
spin_lock(&bitmap_lock);
if (!minix_test_and_clear_bit(bit, bh->b_data))
printk("minix_free_block (%s:%lu): bit already cleared\n",
sb->s_id, block);
spin_unlock(&bitmap_lock);
mark_buffer_dirty(bh);
return;
}
int minix_new_block(struct inode * inode)
{
struct minix_sb_info *sbi = minix_sb(inode->i_sb);
int bits_per_zone = 8 * inode->i_sb->s_blocksize;
int i;
for (i = 0; i < sbi->s_zmap_blocks; i++) {
struct buffer_head *bh = sbi->s_zmap[i];
int j;
spin_lock(&bitmap_lock);
j = minix_find_first_zero_bit(bh->b_data, bits_per_zone);
if (j < bits_per_zone) {
minix_set_bit(j, bh->b_data);
spin_unlock(&bitmap_lock);
mark_buffer_dirty(bh);
j += i * bits_per_zone + sbi->s_firstdatazone-1;
if (j < sbi->s_firstdatazone || j >= sbi->s_nzones)
break;
return j;
}
spin_unlock(&bitmap_lock);
}
return 0;
}
unsigned long minix_count_free_blocks(struct super_block *sb)
{
struct minix_sb_info *sbi = minix_sb(sb);
u32 bits = sbi->s_nzones - (sbi->s_firstdatazone + 1);
return (count_free(sbi->s_zmap, sb->s_blocksize, bits)
<< sbi->s_log_zone_size);
}
struct minix_inode *
minix_V1_raw_inode(struct super_block *sb, ino_t ino, struct buffer_head **bh)
{
int block;
struct minix_sb_info *sbi = minix_sb(sb);
struct minix_inode *p;
if (!ino || ino > sbi->s_ninodes) {
printk("Bad inode number on dev %s: %ld is out of range\n",
sb->s_id, (long)ino);
return NULL;
}
ino--;
block = 2 + sbi->s_imap_blocks + sbi->s_zmap_blocks +
ino / MINIX_INODES_PER_BLOCK;
*bh = sb_bread(sb, block);
if (!*bh) {
printk("Unable to read inode block\n");
return NULL;
}
p = (void *)(*bh)->b_data;
return p + ino % MINIX_INODES_PER_BLOCK;
}
struct minix2_inode *
minix_V2_raw_inode(struct super_block *sb, ino_t ino, struct buffer_head **bh)
{
int block;
struct minix_sb_info *sbi = minix_sb(sb);
struct minix2_inode *p;
int minix2_inodes_per_block = sb->s_blocksize / sizeof(struct minix2_inode);
*bh = NULL;
if (!ino || ino > sbi->s_ninodes) {
printk("Bad inode number on dev %s: %ld is out of range\n",
sb->s_id, (long)ino);
return NULL;
}
ino--;
block = 2 + sbi->s_imap_blocks + sbi->s_zmap_blocks +
ino / minix2_inodes_per_block;
*bh = sb_bread(sb, block);
if (!*bh) {
printk("Unable to read inode block\n");
return NULL;
}
p = (void *)(*bh)->b_data;
return p + ino % minix2_inodes_per_block;
}
/* Clear the link count and mode of a deleted inode on disk. */
static void minix_clear_inode(struct inode *inode)
{
struct buffer_head *bh = NULL;
if (INODE_VERSION(inode) == MINIX_V1) {
struct minix_inode *raw_inode;
raw_inode = minix_V1_raw_inode(inode->i_sb, inode->i_ino, &bh);
if (raw_inode) {
raw_inode->i_nlinks = 0;
raw_inode->i_mode = 0;
}
} else {
struct minix2_inode *raw_inode;
raw_inode = minix_V2_raw_inode(inode->i_sb, inode->i_ino, &bh);
if (raw_inode) {
raw_inode->i_nlinks = 0;
raw_inode->i_mode = 0;
}
}
if (bh) {
mark_buffer_dirty(bh);
brelse (bh);
}
}
void minix_free_inode(struct inode * inode)
{
struct super_block *sb = inode->i_sb;
struct minix_sb_info *sbi = minix_sb(inode->i_sb);
struct buffer_head *bh;
int k = sb->s_blocksize_bits + 3;
unsigned long ino, bit;
ino = inode->i_ino;
if (ino < 1 || ino > sbi->s_ninodes) {
printk("minix_free_inode: inode 0 or nonexistent inode\n");
return;
}
bit = ino & ((1<<k) - 1);
ino >>= k;
if (ino >= sbi->s_imap_blocks) {
printk("minix_free_inode: nonexistent imap in superblock\n");
return;
}
minix_clear_inode(inode); /* clear on-disk copy */
bh = sbi->s_imap[ino];
spin_lock(&bitmap_lock);
if (!minix_test_and_clear_bit(bit, bh->b_data))
printk("minix_free_inode: bit %lu already cleared\n", bit);
spin_unlock(&bitmap_lock);
mark_buffer_dirty(bh);
}
struct inode *minix_new_inode(const struct inode *dir, umode_t mode, int *error)
{
struct super_block *sb = dir->i_sb;
struct minix_sb_info *sbi = minix_sb(sb);
struct inode *inode = new_inode(sb);
struct buffer_head * bh;
int bits_per_zone = 8 * sb->s_blocksize;
unsigned long j;
int i;
if (!inode) {
*error = -ENOMEM;
return NULL;
}
j = bits_per_zone;
bh = NULL;
*error = -ENOSPC;
spin_lock(&bitmap_lock);
for (i = 0; i < sbi->s_imap_blocks; i++) {
bh = sbi->s_imap[i];
j = minix_find_first_zero_bit(bh->b_data, bits_per_zone);
if (j < bits_per_zone)
break;
}
if (!bh || j >= bits_per_zone) {
spin_unlock(&bitmap_lock);
iput(inode);
return NULL;
}
if (minix_test_and_set_bit(j, bh->b_data)) { /* shouldn't happen */
spin_unlock(&bitmap_lock);
printk("minix_new_inode: bit already set\n");
iput(inode);
return NULL;
}
spin_unlock(&bitmap_lock);
mark_buffer_dirty(bh);
j += i * bits_per_zone;
if (!j || j > sbi->s_ninodes) {
iput(inode);
return NULL;
}
inode_init_owner(inode, dir, mode);
inode->i_ino = j;
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
inode->i_blocks = 0;
memset(&minix_i(inode)->u, 0, sizeof(minix_i(inode)->u));
insert_inode_hash(inode);
mark_inode_dirty(inode);
*error = 0;
return inode;
}
unsigned long minix_count_free_inodes(struct super_block *sb)
{
struct minix_sb_info *sbi = minix_sb(sb);
u32 bits = sbi->s_ninodes + 1;
return count_free(sbi->s_imap, sb->s_blocksize, bits);
}
| gpl-2.0 |
syhost/android_kernel_xiaomi_armani | drivers/infiniband/hw/amso1100/c2_qp.c | 8162 | 25087 | /*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Cisco Systems. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
* Copyright (c) 2004 Voltaire, Inc. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/delay.h>
#include <linux/gfp.h>
#include "c2.h"
#include "c2_vq.h"
#include "c2_status.h"
#define C2_MAX_ORD_PER_QP 128
#define C2_MAX_IRD_PER_QP 128
#define C2_HINT_MAKE(q_index, hint_count) (((q_index) << 16) | hint_count)
#define C2_HINT_GET_INDEX(hint) (((hint) & 0x7FFF0000) >> 16)
#define C2_HINT_GET_COUNT(hint) ((hint) & 0x0000FFFF)
#define NO_SUPPORT -1
static const u8 c2_opcode[] = {
[IB_WR_SEND] = C2_WR_TYPE_SEND,
[IB_WR_SEND_WITH_IMM] = NO_SUPPORT,
[IB_WR_RDMA_WRITE] = C2_WR_TYPE_RDMA_WRITE,
[IB_WR_RDMA_WRITE_WITH_IMM] = NO_SUPPORT,
[IB_WR_RDMA_READ] = C2_WR_TYPE_RDMA_READ,
[IB_WR_ATOMIC_CMP_AND_SWP] = NO_SUPPORT,
[IB_WR_ATOMIC_FETCH_AND_ADD] = NO_SUPPORT,
};
static int to_c2_state(enum ib_qp_state ib_state)
{
switch (ib_state) {
case IB_QPS_RESET:
return C2_QP_STATE_IDLE;
case IB_QPS_RTS:
return C2_QP_STATE_RTS;
case IB_QPS_SQD:
return C2_QP_STATE_CLOSING;
case IB_QPS_SQE:
return C2_QP_STATE_CLOSING;
case IB_QPS_ERR:
return C2_QP_STATE_ERROR;
default:
return -1;
}
}
static int to_ib_state(enum c2_qp_state c2_state)
{
switch (c2_state) {
case C2_QP_STATE_IDLE:
return IB_QPS_RESET;
case C2_QP_STATE_CONNECTING:
return IB_QPS_RTR;
case C2_QP_STATE_RTS:
return IB_QPS_RTS;
case C2_QP_STATE_CLOSING:
return IB_QPS_SQD;
case C2_QP_STATE_ERROR:
return IB_QPS_ERR;
case C2_QP_STATE_TERMINATE:
return IB_QPS_SQE;
default:
return -1;
}
}
static const char *to_ib_state_str(int ib_state)
{
static const char *state_str[] = {
"IB_QPS_RESET",
"IB_QPS_INIT",
"IB_QPS_RTR",
"IB_QPS_RTS",
"IB_QPS_SQD",
"IB_QPS_SQE",
"IB_QPS_ERR"
};
if (ib_state < IB_QPS_RESET ||
ib_state > IB_QPS_ERR)
return "<invalid IB QP state>";
ib_state -= IB_QPS_RESET;
return state_str[ib_state];
}
void c2_set_qp_state(struct c2_qp *qp, int c2_state)
{
int new_state = to_ib_state(c2_state);
pr_debug("%s: qp[%p] state modify %s --> %s\n",
__func__,
qp,
to_ib_state_str(qp->state),
to_ib_state_str(new_state));
qp->state = new_state;
}
#define C2_QP_NO_ATTR_CHANGE 0xFFFFFFFF
int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
struct ib_qp_attr *attr, int attr_mask)
{
struct c2wr_qp_modify_req wr;
struct c2wr_qp_modify_rep *reply;
struct c2_vq_req *vq_req;
unsigned long flags;
u8 next_state;
int err;
pr_debug("%s:%d qp=%p, %s --> %s\n",
__func__, __LINE__,
qp,
to_ib_state_str(qp->state),
to_ib_state_str(attr->qp_state));
vq_req = vq_req_alloc(c2dev);
if (!vq_req)
return -ENOMEM;
c2_wr_set_id(&wr, CCWR_QP_MODIFY);
wr.hdr.context = (unsigned long) vq_req;
wr.rnic_handle = c2dev->adapter_handle;
wr.qp_handle = qp->adapter_handle;
wr.ord = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
wr.ird = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
wr.sq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
wr.rq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
if (attr_mask & IB_QP_STATE) {
/* Ensure the state is valid */
if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR) {
err = -EINVAL;
goto bail0;
}
wr.next_qp_state = cpu_to_be32(to_c2_state(attr->qp_state));
if (attr->qp_state == IB_QPS_ERR) {
spin_lock_irqsave(&qp->lock, flags);
if (qp->cm_id && qp->state == IB_QPS_RTS) {
pr_debug("Generating CLOSE event for QP-->ERR, "
"qp=%p, cm_id=%p\n",qp,qp->cm_id);
/* Generate an CLOSE event */
vq_req->cm_id = qp->cm_id;
vq_req->event = IW_CM_EVENT_CLOSE;
}
spin_unlock_irqrestore(&qp->lock, flags);
}
next_state = attr->qp_state;
} else if (attr_mask & IB_QP_CUR_STATE) {
if (attr->cur_qp_state != IB_QPS_RTR &&
attr->cur_qp_state != IB_QPS_RTS &&
attr->cur_qp_state != IB_QPS_SQD &&
attr->cur_qp_state != IB_QPS_SQE) {
err = -EINVAL;
goto bail0;
} else
wr.next_qp_state =
cpu_to_be32(to_c2_state(attr->cur_qp_state));
next_state = attr->cur_qp_state;
} else {
err = 0;
goto bail0;
}
/* reference the request struct */
vq_req_get(c2dev, vq_req);
err = vq_send_wr(c2dev, (union c2wr *) & wr);
if (err) {
vq_req_put(c2dev, vq_req);
goto bail0;
}
err = vq_wait_for_reply(c2dev, vq_req);
if (err)
goto bail0;
reply = (struct c2wr_qp_modify_rep *) (unsigned long) vq_req->reply_msg;
if (!reply) {
err = -ENOMEM;
goto bail0;
}
err = c2_errno(reply);
if (!err)
qp->state = next_state;
#ifdef DEBUG
else
pr_debug("%s: c2_errno=%d\n", __func__, err);
#endif
/*
* If we're going to error and generating the event here, then
* we need to remove the reference because there will be no
* close event generated by the adapter
*/
spin_lock_irqsave(&qp->lock, flags);
if (vq_req->event==IW_CM_EVENT_CLOSE && qp->cm_id) {
qp->cm_id->rem_ref(qp->cm_id);
qp->cm_id = NULL;
}
spin_unlock_irqrestore(&qp->lock, flags);
vq_repbuf_free(c2dev, reply);
bail0:
vq_req_free(c2dev, vq_req);
pr_debug("%s:%d qp=%p, cur_state=%s\n",
__func__, __LINE__,
qp,
to_ib_state_str(qp->state));
return err;
}
int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp,
int ord, int ird)
{
struct c2wr_qp_modify_req wr;
struct c2wr_qp_modify_rep *reply;
struct c2_vq_req *vq_req;
int err;
vq_req = vq_req_alloc(c2dev);
if (!vq_req)
return -ENOMEM;
c2_wr_set_id(&wr, CCWR_QP_MODIFY);
wr.hdr.context = (unsigned long) vq_req;
wr.rnic_handle = c2dev->adapter_handle;
wr.qp_handle = qp->adapter_handle;
wr.ord = cpu_to_be32(ord);
wr.ird = cpu_to_be32(ird);
wr.sq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
wr.rq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
wr.next_qp_state = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
/* reference the request struct */
vq_req_get(c2dev, vq_req);
err = vq_send_wr(c2dev, (union c2wr *) & wr);
if (err) {
vq_req_put(c2dev, vq_req);
goto bail0;
}
err = vq_wait_for_reply(c2dev, vq_req);
if (err)
goto bail0;
reply = (struct c2wr_qp_modify_rep *) (unsigned long)
vq_req->reply_msg;
if (!reply) {
err = -ENOMEM;
goto bail0;
}
err = c2_errno(reply);
vq_repbuf_free(c2dev, reply);
bail0:
vq_req_free(c2dev, vq_req);
return err;
}
static int destroy_qp(struct c2_dev *c2dev, struct c2_qp *qp)
{
struct c2_vq_req *vq_req;
struct c2wr_qp_destroy_req wr;
struct c2wr_qp_destroy_rep *reply;
unsigned long flags;
int err;
/*
* Allocate a verb request message
*/
vq_req = vq_req_alloc(c2dev);
if (!vq_req) {
return -ENOMEM;
}
/*
* Initialize the WR
*/
c2_wr_set_id(&wr, CCWR_QP_DESTROY);
wr.hdr.context = (unsigned long) vq_req;
wr.rnic_handle = c2dev->adapter_handle;
wr.qp_handle = qp->adapter_handle;
/*
* reference the request struct. dereferenced in the int handler.
*/
vq_req_get(c2dev, vq_req);
spin_lock_irqsave(&qp->lock, flags);
if (qp->cm_id && qp->state == IB_QPS_RTS) {
pr_debug("destroy_qp: generating CLOSE event for QP-->ERR, "
"qp=%p, cm_id=%p\n",qp,qp->cm_id);
/* Generate an CLOSE event */
vq_req->qp = qp;
vq_req->cm_id = qp->cm_id;
vq_req->event = IW_CM_EVENT_CLOSE;
}
spin_unlock_irqrestore(&qp->lock, flags);
/*
* Send WR to adapter
*/
err = vq_send_wr(c2dev, (union c2wr *) & wr);
if (err) {
vq_req_put(c2dev, vq_req);
goto bail0;
}
/*
* Wait for reply from adapter
*/
err = vq_wait_for_reply(c2dev, vq_req);
if (err) {
goto bail0;
}
/*
* Process reply
*/
reply = (struct c2wr_qp_destroy_rep *) (unsigned long) (vq_req->reply_msg);
if (!reply) {
err = -ENOMEM;
goto bail0;
}
spin_lock_irqsave(&qp->lock, flags);
if (qp->cm_id) {
qp->cm_id->rem_ref(qp->cm_id);
qp->cm_id = NULL;
}
spin_unlock_irqrestore(&qp->lock, flags);
vq_repbuf_free(c2dev, reply);
bail0:
vq_req_free(c2dev, vq_req);
return err;
}
static int c2_alloc_qpn(struct c2_dev *c2dev, struct c2_qp *qp)
{
int ret;
do {
spin_lock_irq(&c2dev->qp_table.lock);
ret = idr_get_new_above(&c2dev->qp_table.idr, qp,
c2dev->qp_table.last++, &qp->qpn);
spin_unlock_irq(&c2dev->qp_table.lock);
} while ((ret == -EAGAIN) &&
idr_pre_get(&c2dev->qp_table.idr, GFP_KERNEL));
return ret;
}
static void c2_free_qpn(struct c2_dev *c2dev, int qpn)
{
spin_lock_irq(&c2dev->qp_table.lock);
idr_remove(&c2dev->qp_table.idr, qpn);
spin_unlock_irq(&c2dev->qp_table.lock);
}
struct c2_qp *c2_find_qpn(struct c2_dev *c2dev, int qpn)
{
unsigned long flags;
struct c2_qp *qp;
spin_lock_irqsave(&c2dev->qp_table.lock, flags);
qp = idr_find(&c2dev->qp_table.idr, qpn);
spin_unlock_irqrestore(&c2dev->qp_table.lock, flags);
return qp;
}
int c2_alloc_qp(struct c2_dev *c2dev,
struct c2_pd *pd,
struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp)
{
struct c2wr_qp_create_req wr;
struct c2wr_qp_create_rep *reply;
struct c2_vq_req *vq_req;
struct c2_cq *send_cq = to_c2cq(qp_attrs->send_cq);
struct c2_cq *recv_cq = to_c2cq(qp_attrs->recv_cq);
unsigned long peer_pa;
u32 q_size, msg_size, mmap_size;
void __iomem *mmap;
int err;
err = c2_alloc_qpn(c2dev, qp);
if (err)
return err;
qp->ibqp.qp_num = qp->qpn;
qp->ibqp.qp_type = IB_QPT_RC;
/* Allocate the SQ and RQ shared pointers */
qp->sq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
&qp->sq_mq.shared_dma, GFP_KERNEL);
if (!qp->sq_mq.shared) {
err = -ENOMEM;
goto bail0;
}
qp->rq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
&qp->rq_mq.shared_dma, GFP_KERNEL);
if (!qp->rq_mq.shared) {
err = -ENOMEM;
goto bail1;
}
/* Allocate the verbs request */
vq_req = vq_req_alloc(c2dev);
if (vq_req == NULL) {
err = -ENOMEM;
goto bail2;
}
/* Initialize the work request */
memset(&wr, 0, sizeof(wr));
c2_wr_set_id(&wr, CCWR_QP_CREATE);
wr.hdr.context = (unsigned long) vq_req;
wr.rnic_handle = c2dev->adapter_handle;
wr.sq_cq_handle = send_cq->adapter_handle;
wr.rq_cq_handle = recv_cq->adapter_handle;
wr.sq_depth = cpu_to_be32(qp_attrs->cap.max_send_wr + 1);
wr.rq_depth = cpu_to_be32(qp_attrs->cap.max_recv_wr + 1);
wr.srq_handle = 0;
wr.flags = cpu_to_be32(QP_RDMA_READ | QP_RDMA_WRITE | QP_MW_BIND |
QP_ZERO_STAG | QP_RDMA_READ_RESPONSE);
wr.send_sgl_depth = cpu_to_be32(qp_attrs->cap.max_send_sge);
wr.recv_sgl_depth = cpu_to_be32(qp_attrs->cap.max_recv_sge);
wr.rdma_write_sgl_depth = cpu_to_be32(qp_attrs->cap.max_send_sge);
wr.shared_sq_ht = cpu_to_be64(qp->sq_mq.shared_dma);
wr.shared_rq_ht = cpu_to_be64(qp->rq_mq.shared_dma);
wr.ord = cpu_to_be32(C2_MAX_ORD_PER_QP);
wr.ird = cpu_to_be32(C2_MAX_IRD_PER_QP);
wr.pd_id = pd->pd_id;
wr.user_context = (unsigned long) qp;
vq_req_get(c2dev, vq_req);
/* Send the WR to the adapter */
err = vq_send_wr(c2dev, (union c2wr *) & wr);
if (err) {
vq_req_put(c2dev, vq_req);
goto bail3;
}
/* Wait for the verb reply */
err = vq_wait_for_reply(c2dev, vq_req);
if (err) {
goto bail3;
}
/* Process the reply */
reply = (struct c2wr_qp_create_rep *) (unsigned long) (vq_req->reply_msg);
if (!reply) {
err = -ENOMEM;
goto bail3;
}
if ((err = c2_wr_get_result(reply)) != 0) {
goto bail4;
}
/* Fill in the kernel QP struct */
atomic_set(&qp->refcount, 1);
qp->adapter_handle = reply->qp_handle;
qp->state = IB_QPS_RESET;
qp->send_sgl_depth = qp_attrs->cap.max_send_sge;
qp->rdma_write_sgl_depth = qp_attrs->cap.max_send_sge;
qp->recv_sgl_depth = qp_attrs->cap.max_recv_sge;
init_waitqueue_head(&qp->wait);
/* Initialize the SQ MQ */
q_size = be32_to_cpu(reply->sq_depth);
msg_size = be32_to_cpu(reply->sq_msg_size);
peer_pa = c2dev->pa + be32_to_cpu(reply->sq_mq_start);
mmap_size = PAGE_ALIGN(sizeof(struct c2_mq_shared) + msg_size * q_size);
mmap = ioremap_nocache(peer_pa, mmap_size);
if (!mmap) {
err = -ENOMEM;
goto bail5;
}
c2_mq_req_init(&qp->sq_mq,
be32_to_cpu(reply->sq_mq_index),
q_size,
msg_size,
mmap + sizeof(struct c2_mq_shared), /* pool start */
mmap, /* peer */
C2_MQ_ADAPTER_TARGET);
/* Initialize the RQ mq */
q_size = be32_to_cpu(reply->rq_depth);
msg_size = be32_to_cpu(reply->rq_msg_size);
peer_pa = c2dev->pa + be32_to_cpu(reply->rq_mq_start);
mmap_size = PAGE_ALIGN(sizeof(struct c2_mq_shared) + msg_size * q_size);
mmap = ioremap_nocache(peer_pa, mmap_size);
if (!mmap) {
err = -ENOMEM;
goto bail6;
}
c2_mq_req_init(&qp->rq_mq,
be32_to_cpu(reply->rq_mq_index),
q_size,
msg_size,
mmap + sizeof(struct c2_mq_shared), /* pool start */
mmap, /* peer */
C2_MQ_ADAPTER_TARGET);
vq_repbuf_free(c2dev, reply);
vq_req_free(c2dev, vq_req);
return 0;
bail6:
iounmap(qp->sq_mq.peer);
bail5:
destroy_qp(c2dev, qp);
bail4:
vq_repbuf_free(c2dev, reply);
bail3:
vq_req_free(c2dev, vq_req);
bail2:
c2_free_mqsp(qp->rq_mq.shared);
bail1:
c2_free_mqsp(qp->sq_mq.shared);
bail0:
c2_free_qpn(c2dev, qp->qpn);
return err;
}
static inline void c2_lock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq)
{
if (send_cq == recv_cq)
spin_lock_irq(&send_cq->lock);
else if (send_cq > recv_cq) {
spin_lock_irq(&send_cq->lock);
spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
} else {
spin_lock_irq(&recv_cq->lock);
spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
}
}
static inline void c2_unlock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq)
{
if (send_cq == recv_cq)
spin_unlock_irq(&send_cq->lock);
else if (send_cq > recv_cq) {
spin_unlock(&recv_cq->lock);
spin_unlock_irq(&send_cq->lock);
} else {
spin_unlock(&send_cq->lock);
spin_unlock_irq(&recv_cq->lock);
}
}
void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp)
{
struct c2_cq *send_cq;
struct c2_cq *recv_cq;
send_cq = to_c2cq(qp->ibqp.send_cq);
recv_cq = to_c2cq(qp->ibqp.recv_cq);
/*
* Lock CQs here, so that CQ polling code can do QP lookup
* without taking a lock.
*/
c2_lock_cqs(send_cq, recv_cq);
c2_free_qpn(c2dev, qp->qpn);
c2_unlock_cqs(send_cq, recv_cq);
/*
* Destroy qp in the rnic...
*/
destroy_qp(c2dev, qp);
/*
* Mark any unreaped CQEs as null and void.
*/
c2_cq_clean(c2dev, qp, send_cq->cqn);
if (send_cq != recv_cq)
c2_cq_clean(c2dev, qp, recv_cq->cqn);
/*
* Unmap the MQs and return the shared pointers
* to the message pool.
*/
iounmap(qp->sq_mq.peer);
iounmap(qp->rq_mq.peer);
c2_free_mqsp(qp->sq_mq.shared);
c2_free_mqsp(qp->rq_mq.shared);
atomic_dec(&qp->refcount);
wait_event(qp->wait, !atomic_read(&qp->refcount));
}
/*
* Function: move_sgl
*
* Description:
* Move an SGL from the user's work request struct into a CCIL Work Request
* message, swapping to WR byte order and ensure the total length doesn't
* overflow.
*
* IN:
* dst - ptr to CCIL Work Request message SGL memory.
* src - ptr to the consumers SGL memory.
*
* OUT: none
*
* Return:
* CCIL status codes.
*/
static int
move_sgl(struct c2_data_addr * dst, struct ib_sge *src, int count, u32 * p_len,
u8 * actual_count)
{
u32 tot = 0; /* running total */
u8 acount = 0; /* running total non-0 len sge's */
while (count > 0) {
/*
* If the addition of this SGE causes the
* total SGL length to exceed 2^32-1, then
* fail-n-bail.
*
* If the current total plus the next element length
* wraps, then it will go negative and be less than the
* current total...
*/
if ((tot + src->length) < tot) {
return -EINVAL;
}
/*
* Bug: 1456 (as well as 1498 & 1643)
* Skip over any sge's supplied with len=0
*/
if (src->length) {
tot += src->length;
dst->stag = cpu_to_be32(src->lkey);
dst->to = cpu_to_be64(src->addr);
dst->length = cpu_to_be32(src->length);
dst++;
acount++;
}
src++;
count--;
}
if (acount == 0) {
/*
* Bug: 1476 (as well as 1498, 1456 and 1643)
* Setup the SGL in the WR to make it easier for the RNIC.
* This way, the FW doesn't have to deal with special cases.
* Setting length=0 should be sufficient.
*/
dst->stag = 0;
dst->to = 0;
dst->length = 0;
}
*p_len = tot;
*actual_count = acount;
return 0;
}
/*
* Function: c2_activity (private function)
*
* Description:
* Post an mq index to the host->adapter activity fifo.
*
* IN:
* c2dev - ptr to c2dev structure
* mq_index - mq index to post
* shared - value most recently written to shared
*
* OUT:
*
* Return:
* none
*/
static inline void c2_activity(struct c2_dev *c2dev, u32 mq_index, u16 shared)
{
/*
* First read the register to see if the FIFO is full, and if so,
* spin until it's not. This isn't perfect -- there is no
* synchronization among the clients of the register, but in
* practice it prevents multiple CPU from hammering the bus
* with PCI RETRY. Note that when this does happen, the card
* cannot get on the bus and the card and system hang in a
* deadlock -- thus the need for this code. [TOT]
*/
while (readl(c2dev->regs + PCI_BAR0_ADAPTER_HINT) & 0x80000000)
udelay(10);
__raw_writel(C2_HINT_MAKE(mq_index, shared),
c2dev->regs + PCI_BAR0_ADAPTER_HINT);
}
/*
* Function: qp_wr_post
*
* Description:
* This in-line function allocates a MQ msg, then moves the host-copy of
* the completed WR into msg. Then it posts the message.
*
* IN:
* q - ptr to user MQ.
* wr - ptr to host-copy of the WR.
* qp - ptr to user qp
* size - Number of bytes to post. Assumed to be divisible by 4.
*
* OUT: none
*
* Return:
* CCIL status codes.
*/
static int qp_wr_post(struct c2_mq *q, union c2wr * wr, struct c2_qp *qp, u32 size)
{
union c2wr *msg;
msg = c2_mq_alloc(q);
if (msg == NULL) {
return -EINVAL;
}
#ifdef CCMSGMAGIC
((c2wr_hdr_t *) wr)->magic = cpu_to_be32(CCWR_MAGIC);
#endif
/*
* Since all header fields in the WR are the same as the
* CQE, set the following so the adapter need not.
*/
c2_wr_set_result(wr, CCERR_PENDING);
/*
* Copy the wr down to the adapter
*/
memcpy((void *) msg, (void *) wr, size);
c2_mq_produce(q);
return 0;
}
int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
struct ib_send_wr **bad_wr)
{
struct c2_dev *c2dev = to_c2dev(ibqp->device);
struct c2_qp *qp = to_c2qp(ibqp);
union c2wr wr;
unsigned long lock_flags;
int err = 0;
u32 flags;
u32 tot_len;
u8 actual_sge_count;
u32 msg_size;
if (qp->state > IB_QPS_RTS) {
err = -EINVAL;
goto out;
}
while (ib_wr) {
flags = 0;
wr.sqwr.sq_hdr.user_hdr.hdr.context = ib_wr->wr_id;
if (ib_wr->send_flags & IB_SEND_SIGNALED) {
flags |= SQ_SIGNALED;
}
switch (ib_wr->opcode) {
case IB_WR_SEND:
case IB_WR_SEND_WITH_INV:
if (ib_wr->opcode == IB_WR_SEND) {
if (ib_wr->send_flags & IB_SEND_SOLICITED)
c2_wr_set_id(&wr, C2_WR_TYPE_SEND_SE);
else
c2_wr_set_id(&wr, C2_WR_TYPE_SEND);
wr.sqwr.send.remote_stag = 0;
} else {
if (ib_wr->send_flags & IB_SEND_SOLICITED)
c2_wr_set_id(&wr, C2_WR_TYPE_SEND_SE_INV);
else
c2_wr_set_id(&wr, C2_WR_TYPE_SEND_INV);
wr.sqwr.send.remote_stag =
cpu_to_be32(ib_wr->ex.invalidate_rkey);
}
msg_size = sizeof(struct c2wr_send_req) +
sizeof(struct c2_data_addr) * ib_wr->num_sge;
if (ib_wr->num_sge > qp->send_sgl_depth) {
err = -EINVAL;
break;
}
if (ib_wr->send_flags & IB_SEND_FENCE) {
flags |= SQ_READ_FENCE;
}
err = move_sgl((struct c2_data_addr *) & (wr.sqwr.send.data),
ib_wr->sg_list,
ib_wr->num_sge,
&tot_len, &actual_sge_count);
wr.sqwr.send.sge_len = cpu_to_be32(tot_len);
c2_wr_set_sge_count(&wr, actual_sge_count);
break;
case IB_WR_RDMA_WRITE:
c2_wr_set_id(&wr, C2_WR_TYPE_RDMA_WRITE);
msg_size = sizeof(struct c2wr_rdma_write_req) +
(sizeof(struct c2_data_addr) * ib_wr->num_sge);
if (ib_wr->num_sge > qp->rdma_write_sgl_depth) {
err = -EINVAL;
break;
}
if (ib_wr->send_flags & IB_SEND_FENCE) {
flags |= SQ_READ_FENCE;
}
wr.sqwr.rdma_write.remote_stag =
cpu_to_be32(ib_wr->wr.rdma.rkey);
wr.sqwr.rdma_write.remote_to =
cpu_to_be64(ib_wr->wr.rdma.remote_addr);
err = move_sgl((struct c2_data_addr *)
& (wr.sqwr.rdma_write.data),
ib_wr->sg_list,
ib_wr->num_sge,
&tot_len, &actual_sge_count);
wr.sqwr.rdma_write.sge_len = cpu_to_be32(tot_len);
c2_wr_set_sge_count(&wr, actual_sge_count);
break;
case IB_WR_RDMA_READ:
c2_wr_set_id(&wr, C2_WR_TYPE_RDMA_READ);
msg_size = sizeof(struct c2wr_rdma_read_req);
/* IWarp only suppots 1 sge for RDMA reads */
if (ib_wr->num_sge > 1) {
err = -EINVAL;
break;
}
/*
* Move the local and remote stag/to/len into the WR.
*/
wr.sqwr.rdma_read.local_stag =
cpu_to_be32(ib_wr->sg_list->lkey);
wr.sqwr.rdma_read.local_to =
cpu_to_be64(ib_wr->sg_list->addr);
wr.sqwr.rdma_read.remote_stag =
cpu_to_be32(ib_wr->wr.rdma.rkey);
wr.sqwr.rdma_read.remote_to =
cpu_to_be64(ib_wr->wr.rdma.remote_addr);
wr.sqwr.rdma_read.length =
cpu_to_be32(ib_wr->sg_list->length);
break;
default:
/* error */
msg_size = 0;
err = -EINVAL;
break;
}
/*
* If we had an error on the last wr build, then
* break out. Possible errors include bogus WR
* type, and a bogus SGL length...
*/
if (err) {
break;
}
/*
* Store flags
*/
c2_wr_set_flags(&wr, flags);
/*
* Post the puppy!
*/
spin_lock_irqsave(&qp->lock, lock_flags);
err = qp_wr_post(&qp->sq_mq, &wr, qp, msg_size);
if (err) {
spin_unlock_irqrestore(&qp->lock, lock_flags);
break;
}
/*
* Enqueue mq index to activity FIFO.
*/
c2_activity(c2dev, qp->sq_mq.index, qp->sq_mq.hint_count);
spin_unlock_irqrestore(&qp->lock, lock_flags);
ib_wr = ib_wr->next;
}
out:
if (err)
*bad_wr = ib_wr;
return err;
}
int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
struct ib_recv_wr **bad_wr)
{
struct c2_dev *c2dev = to_c2dev(ibqp->device);
struct c2_qp *qp = to_c2qp(ibqp);
union c2wr wr;
unsigned long lock_flags;
int err = 0;
if (qp->state > IB_QPS_RTS) {
err = -EINVAL;
goto out;
}
/*
* Try and post each work request
*/
while (ib_wr) {
u32 tot_len;
u8 actual_sge_count;
if (ib_wr->num_sge > qp->recv_sgl_depth) {
err = -EINVAL;
break;
}
/*
* Create local host-copy of the WR
*/
wr.rqwr.rq_hdr.user_hdr.hdr.context = ib_wr->wr_id;
c2_wr_set_id(&wr, CCWR_RECV);
c2_wr_set_flags(&wr, 0);
/* sge_count is limited to eight bits. */
BUG_ON(ib_wr->num_sge >= 256);
err = move_sgl((struct c2_data_addr *) & (wr.rqwr.data),
ib_wr->sg_list,
ib_wr->num_sge, &tot_len, &actual_sge_count);
c2_wr_set_sge_count(&wr, actual_sge_count);
/*
* If we had an error on the last wr build, then
* break out. Possible errors include bogus WR
* type, and a bogus SGL length...
*/
if (err) {
break;
}
spin_lock_irqsave(&qp->lock, lock_flags);
err = qp_wr_post(&qp->rq_mq, &wr, qp, qp->rq_mq.msg_size);
if (err) {
spin_unlock_irqrestore(&qp->lock, lock_flags);
break;
}
/*
* Enqueue mq index to activity FIFO
*/
c2_activity(c2dev, qp->rq_mq.index, qp->rq_mq.hint_count);
spin_unlock_irqrestore(&qp->lock, lock_flags);
ib_wr = ib_wr->next;
}
out:
if (err)
*bad_wr = ib_wr;
return err;
}
void __devinit c2_init_qp_table(struct c2_dev *c2dev)
{
spin_lock_init(&c2dev->qp_table.lock);
idr_init(&c2dev->qp_table.idr);
}
void __devexit c2_cleanup_qp_table(struct c2_dev *c2dev)
{
idr_destroy(&c2dev->qp_table.idr);
}
| gpl-2.0 |
jmarcet/linux-amlogic | arch/sh/kernel/cpu/sh3/clock-sh7705.c | 9186 | 2170 | /*
* arch/sh/kernel/cpu/sh3/clock-sh7705.c
*
* SH7705 support for the clock framework
*
* Copyright (C) 2005 Paul Mundt
*
* FRQCR parsing hacked out of arch/sh/kernel/time.c
*
* Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
* Copyright (C) 2002, 2003, 2004 Paul Mundt
* Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/clock.h>
#include <asm/freq.h>
#include <asm/io.h>
/*
* SH7705 uses the same divisors as the generic SH-3 case, it's just the
* FRQCR layout that is a bit different..
*/
static int stc_multipliers[] = { 1, 2, 3, 4, 6, 1, 1, 1 };
static int ifc_divisors[] = { 1, 2, 3, 4, 1, 1, 1, 1 };
static int pfc_divisors[] = { 1, 2, 3, 4, 6, 1, 1, 1 };
static void master_clk_init(struct clk *clk)
{
clk->rate *= pfc_divisors[__raw_readw(FRQCR) & 0x0003];
}
static struct sh_clk_ops sh7705_master_clk_ops = {
.init = master_clk_init,
};
static unsigned long module_clk_recalc(struct clk *clk)
{
int idx = __raw_readw(FRQCR) & 0x0003;
return clk->parent->rate / pfc_divisors[idx];
}
static struct sh_clk_ops sh7705_module_clk_ops = {
.recalc = module_clk_recalc,
};
static unsigned long bus_clk_recalc(struct clk *clk)
{
int idx = (__raw_readw(FRQCR) & 0x0300) >> 8;
return clk->parent->rate / stc_multipliers[idx];
}
static struct sh_clk_ops sh7705_bus_clk_ops = {
.recalc = bus_clk_recalc,
};
static unsigned long cpu_clk_recalc(struct clk *clk)
{
int idx = (__raw_readw(FRQCR) & 0x0030) >> 4;
return clk->parent->rate / ifc_divisors[idx];
}
static struct sh_clk_ops sh7705_cpu_clk_ops = {
.recalc = cpu_clk_recalc,
};
static struct sh_clk_ops *sh7705_clk_ops[] = {
&sh7705_master_clk_ops,
&sh7705_module_clk_ops,
&sh7705_bus_clk_ops,
&sh7705_cpu_clk_ops,
};
void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
{
if (idx < ARRAY_SIZE(sh7705_clk_ops))
*ops = sh7705_clk_ops[idx];
}
| gpl-2.0 |
acroreiser/kernel_samsung_msm | drivers/message/i2o/exec-osm.c | 11746 | 16794 | /*
* Executive OSM
*
* Copyright (C) 1999-2002 Red Hat Software
*
* Written by Alan Cox, Building Number Three Ltd
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* A lot of the I2O message side code from this is taken from the Red
* Creek RCPCI45 adapter driver by Red Creek Communications
*
* Fixes/additions:
* Philipp Rumpf
* Juha Sievänen <Juha.Sievanen@cs.Helsinki.FI>
* Auvo Häkkinen <Auvo.Hakkinen@cs.Helsinki.FI>
* Deepak Saxena <deepak@plexity.net>
* Boji T Kannanthanam <boji.t.kannanthanam@intel.com>
* Alan Cox <alan@lxorguk.ukuu.org.uk>:
* Ported to Linux 2.5.
* Markus Lidel <Markus.Lidel@shadowconnect.com>:
* Minor fixes for 2.6.
* Markus Lidel <Markus.Lidel@shadowconnect.com>:
* Support for sysfs included.
*/
#include <linux/module.h>
#include <linux/i2o.h>
#include <linux/delay.h>
#include <linux/workqueue.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/sched.h> /* wait_event_interruptible_timeout() needs this */
#include <asm/param.h> /* HZ */
#include "core.h"
#define OSM_NAME "exec-osm"
struct i2o_driver i2o_exec_driver;
/* global wait list for POST WAIT */
static LIST_HEAD(i2o_exec_wait_list);
/* Wait struct needed for POST WAIT */
struct i2o_exec_wait {
wait_queue_head_t *wq; /* Pointer to Wait queue */
struct i2o_dma dma; /* DMA buffers to free on failure */
u32 tcntxt; /* transaction context from reply */
int complete; /* 1 if reply received otherwise 0 */
u32 m; /* message id */
struct i2o_message *msg; /* pointer to the reply message */
struct list_head list; /* node in global wait list */
spinlock_t lock; /* lock before modifying */
};
/* Work struct needed to handle LCT NOTIFY replies */
struct i2o_exec_lct_notify_work {
struct work_struct work; /* work struct */
struct i2o_controller *c; /* controller on which the LCT NOTIFY
was received */
};
/* Exec OSM class handling definition */
static struct i2o_class_id i2o_exec_class_id[] = {
{I2O_CLASS_EXECUTIVE},
{I2O_CLASS_END}
};
/**
* i2o_exec_wait_alloc - Allocate a i2o_exec_wait struct an initialize it
*
* Allocate the i2o_exec_wait struct and initialize the wait.
*
* Returns i2o_exec_wait pointer on success or negative error code on
* failure.
*/
static struct i2o_exec_wait *i2o_exec_wait_alloc(void)
{
struct i2o_exec_wait *wait;
wait = kzalloc(sizeof(*wait), GFP_KERNEL);
if (!wait)
return NULL;
INIT_LIST_HEAD(&wait->list);
spin_lock_init(&wait->lock);
return wait;
};
/**
* i2o_exec_wait_free - Free an i2o_exec_wait struct
* @wait: I2O wait data which should be cleaned up
*/
static void i2o_exec_wait_free(struct i2o_exec_wait *wait)
{
kfree(wait);
};
/**
* i2o_msg_post_wait_mem - Post and wait a message with DMA buffers
* @c: controller
* @msg: message to post
* @timeout: time in seconds to wait
* @dma: i2o_dma struct of the DMA buffer to free on failure
*
* This API allows an OSM to post a message and then be told whether or
* not the system received a successful reply. If the message times out
* then the value '-ETIMEDOUT' is returned. This is a special case. In
* this situation the message may (should) complete at an indefinite time
* in the future. When it completes it will use the memory buffer
* attached to the request. If -ETIMEDOUT is returned then the memory
* buffer must not be freed. Instead the event completion will free them
* for you. In all other cases the buffer are your problem.
*
* Returns 0 on success, negative error code on timeout or positive error
* code from reply.
*/
int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg,
unsigned long timeout, struct i2o_dma *dma)
{
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
struct i2o_exec_wait *wait;
static u32 tcntxt = 0x80000000;
unsigned long flags;
int rc = 0;
wait = i2o_exec_wait_alloc();
if (!wait) {
i2o_msg_nop(c, msg);
return -ENOMEM;
}
if (tcntxt == 0xffffffff)
tcntxt = 0x80000000;
if (dma)
wait->dma = *dma;
/*
* Fill in the message initiator context and transaction context.
* We will only use transaction contexts >= 0x80000000 for POST WAIT,
* so we could find a POST WAIT reply easier in the reply handler.
*/
msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
wait->tcntxt = tcntxt++;
msg->u.s.tcntxt = cpu_to_le32(wait->tcntxt);
wait->wq = &wq;
/*
* we add elements to the head, because if a entry in the list will
* never be removed, we have to iterate over it every time
*/
list_add(&wait->list, &i2o_exec_wait_list);
/*
* Post the message to the controller. At some point later it will
* return. If we time out before it returns then complete will be zero.
*/
i2o_msg_post(c, msg);
wait_event_interruptible_timeout(wq, wait->complete, timeout * HZ);
spin_lock_irqsave(&wait->lock, flags);
wait->wq = NULL;
if (wait->complete)
rc = le32_to_cpu(wait->msg->body[0]) >> 24;
else {
/*
* We cannot remove it now. This is important. When it does
* terminate (which it must do if the controller has not
* died...) then it will otherwise scribble on stuff.
*
* FIXME: try abort message
*/
if (dma)
dma->virt = NULL;
rc = -ETIMEDOUT;
}
spin_unlock_irqrestore(&wait->lock, flags);
if (rc != -ETIMEDOUT) {
i2o_flush_reply(c, wait->m);
i2o_exec_wait_free(wait);
}
return rc;
};
/**
* i2o_msg_post_wait_complete - Reply to a i2o_msg_post request from IOP
* @c: I2O controller which answers
* @m: message id
* @msg: pointer to the I2O reply message
* @context: transaction context of request
*
* This function is called in interrupt context only. If the reply reached
* before the timeout, the i2o_exec_wait struct is filled with the message
* and the task will be waked up. The task is now responsible for returning
* the message m back to the controller! If the message reaches us after
* the timeout clean up the i2o_exec_wait struct (including allocated
* DMA buffer).
*
* Return 0 on success and if the message m should not be given back to the
* I2O controller, or >0 on success and if the message should be given back
* afterwords. Returns negative error code on failure. In this case the
* message must also be given back to the controller.
*/
static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
struct i2o_message *msg, u32 context)
{
struct i2o_exec_wait *wait, *tmp;
unsigned long flags;
int rc = 1;
/*
* We need to search through the i2o_exec_wait_list to see if the given
* message is still outstanding. If not, it means that the IOP took
* longer to respond to the message than we had allowed and timer has
* already expired. Not much we can do about that except log it for
* debug purposes, increase timeout, and recompile.
*/
list_for_each_entry_safe(wait, tmp, &i2o_exec_wait_list, list) {
if (wait->tcntxt == context) {
spin_lock_irqsave(&wait->lock, flags);
list_del(&wait->list);
wait->m = m;
wait->msg = msg;
wait->complete = 1;
if (wait->wq)
rc = 0;
else
rc = -1;
spin_unlock_irqrestore(&wait->lock, flags);
if (rc) {
struct device *dev;
dev = &c->pdev->dev;
pr_debug("%s: timedout reply received!\n",
c->name);
i2o_dma_free(dev, &wait->dma);
i2o_exec_wait_free(wait);
} else
wake_up_interruptible(wait->wq);
return rc;
}
}
osm_warn("%s: Bogus reply in POST WAIT (tr-context: %08x)!\n", c->name,
context);
return -1;
};
/**
* i2o_exec_show_vendor_id - Displays Vendor ID of controller
* @d: device of which the Vendor ID should be displayed
* @attr: device_attribute to display
* @buf: buffer into which the Vendor ID should be printed
*
* Returns number of bytes printed into buffer.
*/
static ssize_t i2o_exec_show_vendor_id(struct device *d,
struct device_attribute *attr, char *buf)
{
struct i2o_device *dev = to_i2o_device(d);
u16 id;
if (!i2o_parm_field_get(dev, 0x0000, 0, &id, 2)) {
sprintf(buf, "0x%04x", le16_to_cpu(id));
return strlen(buf) + 1;
}
return 0;
};
/**
* i2o_exec_show_product_id - Displays Product ID of controller
* @d: device of which the Product ID should be displayed
* @attr: device_attribute to display
* @buf: buffer into which the Product ID should be printed
*
* Returns number of bytes printed into buffer.
*/
static ssize_t i2o_exec_show_product_id(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct i2o_device *dev = to_i2o_device(d);
u16 id;
if (!i2o_parm_field_get(dev, 0x0000, 1, &id, 2)) {
sprintf(buf, "0x%04x", le16_to_cpu(id));
return strlen(buf) + 1;
}
return 0;
};
/* Exec-OSM device attributes */
static DEVICE_ATTR(vendor_id, S_IRUGO, i2o_exec_show_vendor_id, NULL);
static DEVICE_ATTR(product_id, S_IRUGO, i2o_exec_show_product_id, NULL);
/**
* i2o_exec_probe - Called if a new I2O device (executive class) appears
* @dev: I2O device which should be probed
*
* Registers event notification for every event from Executive device. The
* return is always 0, because we want all devices of class Executive.
*
* Returns 0 on success.
*/
static int i2o_exec_probe(struct device *dev)
{
struct i2o_device *i2o_dev = to_i2o_device(dev);
int rc;
rc = i2o_event_register(i2o_dev, &i2o_exec_driver, 0, 0xffffffff);
if (rc) goto err_out;
rc = device_create_file(dev, &dev_attr_vendor_id);
if (rc) goto err_evtreg;
rc = device_create_file(dev, &dev_attr_product_id);
if (rc) goto err_vid;
i2o_dev->iop->exec = i2o_dev;
return 0;
err_vid:
device_remove_file(dev, &dev_attr_vendor_id);
err_evtreg:
i2o_event_register(to_i2o_device(dev), &i2o_exec_driver, 0, 0);
err_out:
return rc;
};
/**
* i2o_exec_remove - Called on I2O device removal
* @dev: I2O device which was removed
*
* Unregisters event notification from Executive I2O device.
*
* Returns 0 on success.
*/
static int i2o_exec_remove(struct device *dev)
{
device_remove_file(dev, &dev_attr_product_id);
device_remove_file(dev, &dev_attr_vendor_id);
i2o_event_register(to_i2o_device(dev), &i2o_exec_driver, 0, 0);
return 0;
};
#ifdef CONFIG_I2O_LCT_NOTIFY_ON_CHANGES
/**
* i2o_exec_lct_notify - Send a asynchronus LCT NOTIFY request
* @c: I2O controller to which the request should be send
* @change_ind: change indicator
*
* This function sends a LCT NOTIFY request to the I2O controller with
* the change indicator change_ind. If the change_ind == 0 the controller
* replies immediately after the request. If change_ind > 0 the reply is
* send after change indicator of the LCT is > change_ind.
*/
static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind)
{
i2o_status_block *sb = c->status_block.virt;
struct device *dev;
struct i2o_message *msg;
mutex_lock(&c->lct_lock);
dev = &c->pdev->dev;
if (i2o_dma_realloc(dev, &c->dlct,
le32_to_cpu(sb->expected_lct_size))) {
mutex_unlock(&c->lct_lock);
return -ENOMEM;
}
msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
if (IS_ERR(msg)) {
mutex_unlock(&c->lct_lock);
return PTR_ERR(msg);
}
msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6);
msg->u.head[1] = cpu_to_le32(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 |
ADAPTER_TID);
msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
msg->u.s.tcntxt = cpu_to_le32(0x00000000);
msg->body[0] = cpu_to_le32(0xffffffff);
msg->body[1] = cpu_to_le32(change_ind);
msg->body[2] = cpu_to_le32(0xd0000000 | c->dlct.len);
msg->body[3] = cpu_to_le32(c->dlct.phys);
i2o_msg_post(c, msg);
mutex_unlock(&c->lct_lock);
return 0;
}
#endif
/**
* i2o_exec_lct_modified - Called on LCT NOTIFY reply
* @_work: work struct for a specific controller
*
* This function handles asynchronus LCT NOTIFY replies. It parses the
* new LCT and if the buffer for the LCT was to small sends a LCT NOTIFY
* again, otherwise send LCT NOTIFY to get informed on next LCT change.
*/
static void i2o_exec_lct_modified(struct work_struct *_work)
{
struct i2o_exec_lct_notify_work *work =
container_of(_work, struct i2o_exec_lct_notify_work, work);
u32 change_ind = 0;
struct i2o_controller *c = work->c;
kfree(work);
if (i2o_device_parse_lct(c) != -EAGAIN)
change_ind = c->lct->change_ind + 1;
#ifdef CONFIG_I2O_LCT_NOTIFY_ON_CHANGES
i2o_exec_lct_notify(c, change_ind);
#endif
};
/**
* i2o_exec_reply - I2O Executive reply handler
* @c: I2O controller from which the reply comes
* @m: message id
* @msg: pointer to the I2O reply message
*
* This function is always called from interrupt context. If a POST WAIT
* reply was received, pass it to the complete function. If a LCT NOTIFY
* reply was received, a new event is created to handle the update.
*
* Returns 0 on success and if the reply should not be flushed or > 0
* on success and if the reply should be flushed. Returns negative error
* code on failure and if the reply should be flushed.
*/
static int i2o_exec_reply(struct i2o_controller *c, u32 m,
struct i2o_message *msg)
{
u32 context;
if (le32_to_cpu(msg->u.head[0]) & MSG_FAIL) {
struct i2o_message __iomem *pmsg;
u32 pm;
/*
* If Fail bit is set we must take the transaction context of
* the preserved message to find the right request again.
*/
pm = le32_to_cpu(msg->body[3]);
pmsg = i2o_msg_in_to_virt(c, pm);
context = readl(&pmsg->u.s.tcntxt);
i2o_report_status(KERN_INFO, "i2o_core", msg);
/* Release the preserved msg */
i2o_msg_nop_mfa(c, pm);
} else
context = le32_to_cpu(msg->u.s.tcntxt);
if (context & 0x80000000)
return i2o_msg_post_wait_complete(c, m, msg, context);
if ((le32_to_cpu(msg->u.head[1]) >> 24) == I2O_CMD_LCT_NOTIFY) {
struct i2o_exec_lct_notify_work *work;
pr_debug("%s: LCT notify received\n", c->name);
work = kmalloc(sizeof(*work), GFP_ATOMIC);
if (!work)
return -ENOMEM;
work->c = c;
INIT_WORK(&work->work, i2o_exec_lct_modified);
queue_work(i2o_exec_driver.event_queue, &work->work);
return 1;
}
/*
* If this happens, we want to dump the message to the syslog so
* it can be sent back to the card manufacturer by the end user
* to aid in debugging.
*
*/
printk(KERN_WARNING "%s: Unsolicited message reply sent to core!"
"Message dumped to syslog\n", c->name);
i2o_dump_message(msg);
return -EFAULT;
}
/**
* i2o_exec_event - Event handling function
* @work: Work item in occurring event
*
* Handles events send by the Executive device. At the moment does not do
* anything useful.
*/
static void i2o_exec_event(struct work_struct *work)
{
struct i2o_event *evt = container_of(work, struct i2o_event, work);
if (likely(evt->i2o_dev))
osm_debug("Event received from device: %d\n",
evt->i2o_dev->lct_data.tid);
kfree(evt);
};
/**
* i2o_exec_lct_get - Get the IOP's Logical Configuration Table
* @c: I2O controller from which the LCT should be fetched
*
* Send a LCT NOTIFY request to the controller, and wait
* I2O_TIMEOUT_LCT_GET seconds until arrival of response. If the LCT is
* to large, retry it.
*
* Returns 0 on success or negative error code on failure.
*/
int i2o_exec_lct_get(struct i2o_controller *c)
{
struct i2o_message *msg;
int i = 0;
int rc = -EAGAIN;
for (i = 1; i <= I2O_LCT_GET_TRIES; i++) {
msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
if (IS_ERR(msg))
return PTR_ERR(msg);
msg->u.head[0] =
cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6);
msg->u.head[1] =
cpu_to_le32(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 |
ADAPTER_TID);
msg->body[0] = cpu_to_le32(0xffffffff);
msg->body[1] = cpu_to_le32(0x00000000);
msg->body[2] = cpu_to_le32(0xd0000000 | c->dlct.len);
msg->body[3] = cpu_to_le32(c->dlct.phys);
rc = i2o_msg_post_wait(c, msg, I2O_TIMEOUT_LCT_GET);
if (rc < 0)
break;
rc = i2o_device_parse_lct(c);
if (rc != -EAGAIN)
break;
}
return rc;
}
/* Exec OSM driver struct */
struct i2o_driver i2o_exec_driver = {
.name = OSM_NAME,
.reply = i2o_exec_reply,
.event = i2o_exec_event,
.classes = i2o_exec_class_id,
.driver = {
.probe = i2o_exec_probe,
.remove = i2o_exec_remove,
},
};
/**
* i2o_exec_init - Registers the Exec OSM
*
* Registers the Exec OSM in the I2O core.
*
* Returns 0 on success or negative error code on failure.
*/
int __init i2o_exec_init(void)
{
return i2o_driver_register(&i2o_exec_driver);
};
/**
* i2o_exec_exit - Removes the Exec OSM
*
* Unregisters the Exec OSM from the I2O core.
*/
void i2o_exec_exit(void)
{
i2o_driver_unregister(&i2o_exec_driver);
};
EXPORT_SYMBOL(i2o_msg_post_wait_mem);
EXPORT_SYMBOL(i2o_exec_lct_get);
| gpl-2.0 |
denkem/enru-3.1.10-g7f360be | drivers/usb/misc/sisusbvga/sisusb_init.c | 11746 | 25368 | /*
* sisusb - usb kernel driver for SiS315(E) based USB2VGA dongles
*
* Display mode initializing code
*
* Copyright (C) 2001-2005 by Thomas Winischhofer, Vienna, Austria
*
* If distributed as part of the Linux kernel, this code is licensed under the
* terms of the GPL v2.
*
* Otherwise, the following license terms apply:
*
* * Redistribution and use in source and binary forms, with or without
* * modification, are permitted provided that the following conditions
* * are met:
* * 1) Redistributions of source code must retain the above copyright
* * notice, this list of conditions and the following disclaimer.
* * 2) Redistributions in binary form must reproduce the above copyright
* * notice, this list of conditions and the following disclaimer in the
* * documentation and/or other materials provided with the distribution.
* * 3) The name of the author may not be used to endorse or promote products
* * derived from this software without specific prior written permission.
* *
* * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Author: Thomas Winischhofer <thomas@winischhofer.net>
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/poll.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include "sisusb.h"
#ifdef INCL_SISUSB_CON
#include "sisusb_init.h"
/*********************************************/
/* POINTER INITIALIZATION */
/*********************************************/
static void SiSUSB_InitPtr(struct SiS_Private *SiS_Pr)
{
SiS_Pr->SiS_ModeResInfo = SiSUSB_ModeResInfo;
SiS_Pr->SiS_StandTable = SiSUSB_StandTable;
SiS_Pr->SiS_SModeIDTable = SiSUSB_SModeIDTable;
SiS_Pr->SiS_EModeIDTable = SiSUSB_EModeIDTable;
SiS_Pr->SiS_RefIndex = SiSUSB_RefIndex;
SiS_Pr->SiS_CRT1Table = SiSUSB_CRT1Table;
SiS_Pr->SiS_VCLKData = SiSUSB_VCLKData;
}
/*********************************************/
/* HELPER: SetReg, GetReg */
/*********************************************/
static void
SiS_SetReg(struct SiS_Private *SiS_Pr, unsigned long port,
unsigned short index, unsigned short data)
{
sisusb_setidxreg(SiS_Pr->sisusb, port, index, data);
}
static void
SiS_SetRegByte(struct SiS_Private *SiS_Pr, unsigned long port,
unsigned short data)
{
sisusb_setreg(SiS_Pr->sisusb, port, data);
}
static unsigned char
SiS_GetReg(struct SiS_Private *SiS_Pr, unsigned long port, unsigned short index)
{
u8 data;
sisusb_getidxreg(SiS_Pr->sisusb, port, index, &data);
return data;
}
static unsigned char
SiS_GetRegByte(struct SiS_Private *SiS_Pr, unsigned long port)
{
u8 data;
sisusb_getreg(SiS_Pr->sisusb, port, &data);
return data;
}
static void
SiS_SetRegANDOR(struct SiS_Private *SiS_Pr, unsigned long port,
unsigned short index, unsigned short DataAND,
unsigned short DataOR)
{
sisusb_setidxregandor(SiS_Pr->sisusb, port, index, DataAND, DataOR);
}
static void
SiS_SetRegAND(struct SiS_Private *SiS_Pr, unsigned long port,
unsigned short index, unsigned short DataAND)
{
sisusb_setidxregand(SiS_Pr->sisusb, port, index, DataAND);
}
static void
SiS_SetRegOR(struct SiS_Private *SiS_Pr, unsigned long port,
unsigned short index, unsigned short DataOR)
{
sisusb_setidxregor(SiS_Pr->sisusb, port, index, DataOR);
}
/*********************************************/
/* HELPER: DisplayOn, DisplayOff */
/*********************************************/
static void SiS_DisplayOn(struct SiS_Private *SiS_Pr)
{
SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x01, 0xDF);
}
/*********************************************/
/* HELPER: Init Port Addresses */
/*********************************************/
static void SiSUSBRegInit(struct SiS_Private *SiS_Pr, unsigned long BaseAddr)
{
SiS_Pr->SiS_P3c4 = BaseAddr + 0x14;
SiS_Pr->SiS_P3d4 = BaseAddr + 0x24;
SiS_Pr->SiS_P3c0 = BaseAddr + 0x10;
SiS_Pr->SiS_P3ce = BaseAddr + 0x1e;
SiS_Pr->SiS_P3c2 = BaseAddr + 0x12;
SiS_Pr->SiS_P3ca = BaseAddr + 0x1a;
SiS_Pr->SiS_P3c6 = BaseAddr + 0x16;
SiS_Pr->SiS_P3c7 = BaseAddr + 0x17;
SiS_Pr->SiS_P3c8 = BaseAddr + 0x18;
SiS_Pr->SiS_P3c9 = BaseAddr + 0x19;
SiS_Pr->SiS_P3cb = BaseAddr + 0x1b;
SiS_Pr->SiS_P3cc = BaseAddr + 0x1c;
SiS_Pr->SiS_P3cd = BaseAddr + 0x1d;
SiS_Pr->SiS_P3da = BaseAddr + 0x2a;
SiS_Pr->SiS_Part1Port = BaseAddr + SIS_CRT2_PORT_04;
}
/*********************************************/
/* HELPER: GetSysFlags */
/*********************************************/
static void SiS_GetSysFlags(struct SiS_Private *SiS_Pr)
{
SiS_Pr->SiS_MyCR63 = 0x63;
}
/*********************************************/
/* HELPER: Init PCI & Engines */
/*********************************************/
static void SiSInitPCIetc(struct SiS_Private *SiS_Pr)
{
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x20, 0xa1);
/* - Enable 2D (0x40)
* - Enable 3D (0x02)
* - Enable 3D vertex command fetch (0x10)
* - Enable 3D command parser (0x08)
* - Enable 3D G/L transformation engine (0x80)
*/
SiS_SetRegOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x1E, 0xDA);
}
/*********************************************/
/* HELPER: SET SEGMENT REGISTERS */
/*********************************************/
static void SiS_SetSegRegLower(struct SiS_Private *SiS_Pr, unsigned short value)
{
unsigned short temp;
value &= 0x00ff;
temp = SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3cb) & 0xf0;
temp |= (value >> 4);
SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3cb, temp);
temp = SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3cd) & 0xf0;
temp |= (value & 0x0f);
SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3cd, temp);
}
static void SiS_SetSegRegUpper(struct SiS_Private *SiS_Pr, unsigned short value)
{
unsigned short temp;
value &= 0x00ff;
temp = SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3cb) & 0x0f;
temp |= (value & 0xf0);
SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3cb, temp);
temp = SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3cd) & 0x0f;
temp |= (value << 4);
SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3cd, temp);
}
static void SiS_SetSegmentReg(struct SiS_Private *SiS_Pr, unsigned short value)
{
SiS_SetSegRegLower(SiS_Pr, value);
SiS_SetSegRegUpper(SiS_Pr, value);
}
static void SiS_ResetSegmentReg(struct SiS_Private *SiS_Pr)
{
SiS_SetSegmentReg(SiS_Pr, 0);
}
static void
SiS_SetSegmentRegOver(struct SiS_Private *SiS_Pr, unsigned short value)
{
unsigned short temp = value >> 8;
temp &= 0x07;
temp |= (temp << 4);
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x1d, temp);
SiS_SetSegmentReg(SiS_Pr, value);
}
static void SiS_ResetSegmentRegOver(struct SiS_Private *SiS_Pr)
{
SiS_SetSegmentRegOver(SiS_Pr, 0);
}
static void SiS_ResetSegmentRegisters(struct SiS_Private *SiS_Pr)
{
SiS_ResetSegmentReg(SiS_Pr);
SiS_ResetSegmentRegOver(SiS_Pr);
}
/*********************************************/
/* HELPER: SearchModeID */
/*********************************************/
static int
SiS_SearchModeID(struct SiS_Private *SiS_Pr, unsigned short *ModeNo,
unsigned short *ModeIdIndex)
{
if ((*ModeNo) <= 0x13) {
if ((*ModeNo) != 0x03)
return 0;
(*ModeIdIndex) = 0;
} else {
for (*ModeIdIndex = 0;; (*ModeIdIndex)++) {
if (SiS_Pr->SiS_EModeIDTable[*ModeIdIndex].Ext_ModeID ==
(*ModeNo))
break;
if (SiS_Pr->SiS_EModeIDTable[*ModeIdIndex].Ext_ModeID ==
0xFF)
return 0;
}
}
return 1;
}
/*********************************************/
/* HELPER: ENABLE CRT1 */
/*********************************************/
static void SiS_HandleCRT1(struct SiS_Private *SiS_Pr)
{
/* Enable CRT1 gating */
SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3d4, SiS_Pr->SiS_MyCR63, 0xbf);
}
/*********************************************/
/* HELPER: GetColorDepth */
/*********************************************/
static unsigned short
SiS_GetColorDepth(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
unsigned short ModeIdIndex)
{
static const unsigned short ColorDepth[6] = { 1, 2, 4, 4, 6, 8 };
unsigned short modeflag;
short index;
if (ModeNo <= 0x13) {
modeflag = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_ModeFlag;
} else {
modeflag = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
}
index = (modeflag & ModeTypeMask) - ModeEGA;
if (index < 0)
index = 0;
return ColorDepth[index];
}
/*********************************************/
/* HELPER: GetOffset */
/*********************************************/
static unsigned short
SiS_GetOffset(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
unsigned short ModeIdIndex, unsigned short rrti)
{
unsigned short xres, temp, colordepth, infoflag;
infoflag = SiS_Pr->SiS_RefIndex[rrti].Ext_InfoFlag;
xres = SiS_Pr->SiS_RefIndex[rrti].XRes;
colordepth = SiS_GetColorDepth(SiS_Pr, ModeNo, ModeIdIndex);
temp = xres / 16;
if (infoflag & InterlaceMode)
temp <<= 1;
temp *= colordepth;
if (xres % 16)
temp += (colordepth >> 1);
return temp;
}
/*********************************************/
/* SEQ */
/*********************************************/
static void
SiS_SetSeqRegs(struct SiS_Private *SiS_Pr, unsigned short StandTableIndex)
{
unsigned char SRdata;
int i;
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x00, 0x03);
SRdata = SiS_Pr->SiS_StandTable[StandTableIndex].SR[0] | 0x20;
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x01, SRdata);
for (i = 2; i <= 4; i++) {
SRdata = SiS_Pr->SiS_StandTable[StandTableIndex].SR[i - 1];
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, i, SRdata);
}
}
/*********************************************/
/* MISC */
/*********************************************/
static void
SiS_SetMiscRegs(struct SiS_Private *SiS_Pr, unsigned short StandTableIndex)
{
unsigned char Miscdata = SiS_Pr->SiS_StandTable[StandTableIndex].MISC;
SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c2, Miscdata);
}
/*********************************************/
/* CRTC */
/*********************************************/
static void
SiS_SetCRTCRegs(struct SiS_Private *SiS_Pr, unsigned short StandTableIndex)
{
unsigned char CRTCdata;
unsigned short i;
SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3d4, 0x11, 0x7f);
for (i = 0; i <= 0x18; i++) {
CRTCdata = SiS_Pr->SiS_StandTable[StandTableIndex].CRTC[i];
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, i, CRTCdata);
}
}
/*********************************************/
/* ATT */
/*********************************************/
static void
SiS_SetATTRegs(struct SiS_Private *SiS_Pr, unsigned short StandTableIndex)
{
unsigned char ARdata;
unsigned short i;
for (i = 0; i <= 0x13; i++) {
ARdata = SiS_Pr->SiS_StandTable[StandTableIndex].ATTR[i];
SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3da);
SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c0, i);
SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c0, ARdata);
}
SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3da);
SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c0, 0x14);
SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c0, 0x00);
SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3da);
SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c0, 0x20);
SiS_GetRegByte(SiS_Pr, SiS_Pr->SiS_P3da);
}
/*********************************************/
/* GRC */
/*********************************************/
static void
SiS_SetGRCRegs(struct SiS_Private *SiS_Pr, unsigned short StandTableIndex)
{
unsigned char GRdata;
unsigned short i;
for (i = 0; i <= 0x08; i++) {
GRdata = SiS_Pr->SiS_StandTable[StandTableIndex].GRC[i];
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3ce, i, GRdata);
}
if (SiS_Pr->SiS_ModeType > ModeVGA) {
/* 256 color disable */
SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3ce, 0x05, 0xBF);
}
}
/*********************************************/
/* CLEAR EXTENDED REGISTERS */
/*********************************************/
static void SiS_ClearExt1Regs(struct SiS_Private *SiS_Pr, unsigned short ModeNo)
{
int i;
for (i = 0x0A; i <= 0x0E; i++) {
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, i, 0x00);
}
SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x37, 0xFE);
}
/*********************************************/
/* Get rate index */
/*********************************************/
static unsigned short
SiS_GetRatePtr(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
unsigned short ModeIdIndex)
{
unsigned short rrti, i, index, temp;
if (ModeNo <= 0x13)
return 0xFFFF;
index = SiS_GetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x33) & 0x0F;
if (index > 0)
index--;
rrti = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].REFindex;
ModeNo = SiS_Pr->SiS_RefIndex[rrti].ModeID;
i = 0;
do {
if (SiS_Pr->SiS_RefIndex[rrti + i].ModeID != ModeNo)
break;
temp =
SiS_Pr->SiS_RefIndex[rrti + i].Ext_InfoFlag & ModeTypeMask;
if (temp < SiS_Pr->SiS_ModeType)
break;
i++;
index--;
} while (index != 0xFFFF);
i--;
return (rrti + i);
}
/*********************************************/
/* SYNC */
/*********************************************/
static void SiS_SetCRT1Sync(struct SiS_Private *SiS_Pr, unsigned short rrti)
{
unsigned short sync = SiS_Pr->SiS_RefIndex[rrti].Ext_InfoFlag >> 8;
sync &= 0xC0;
sync |= 0x2f;
SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c2, sync);
}
/*********************************************/
/* CRTC/2 */
/*********************************************/
static void
SiS_SetCRT1CRTC(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
unsigned short ModeIdIndex, unsigned short rrti)
{
unsigned char index;
unsigned short temp, i, j, modeflag;
SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3d4, 0x11, 0x7f);
modeflag = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
index = SiS_Pr->SiS_RefIndex[rrti].Ext_CRT1CRTC;
for (i = 0, j = 0; i <= 7; i++, j++) {
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, j,
SiS_Pr->SiS_CRT1Table[index].CR[i]);
}
for (j = 0x10; i <= 10; i++, j++) {
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, j,
SiS_Pr->SiS_CRT1Table[index].CR[i]);
}
for (j = 0x15; i <= 12; i++, j++) {
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, j,
SiS_Pr->SiS_CRT1Table[index].CR[i]);
}
for (j = 0x0A; i <= 15; i++, j++) {
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, j,
SiS_Pr->SiS_CRT1Table[index].CR[i]);
}
temp = SiS_Pr->SiS_CRT1Table[index].CR[16] & 0xE0;
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x0E, temp);
temp = ((SiS_Pr->SiS_CRT1Table[index].CR[16]) & 0x01) << 5;
if (modeflag & DoubleScanMode)
temp |= 0x80;
SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3d4, 0x09, 0x5F, temp);
if (SiS_Pr->SiS_ModeType > ModeVGA)
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x14, 0x4F);
}
/*********************************************/
/* OFFSET & PITCH */
/*********************************************/
/* (partly overruled by SetPitch() in XF86) */
/*********************************************/
static void
SiS_SetCRT1Offset(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
unsigned short ModeIdIndex, unsigned short rrti)
{
unsigned short du = SiS_GetOffset(SiS_Pr, ModeNo, ModeIdIndex, rrti);
unsigned short infoflag = SiS_Pr->SiS_RefIndex[rrti].Ext_InfoFlag;
unsigned short temp;
temp = (du >> 8) & 0x0f;
SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x0E, 0xF0, temp);
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x13, (du & 0xFF));
if (infoflag & InterlaceMode)
du >>= 1;
du <<= 5;
temp = (du >> 8) & 0xff;
if (du & 0xff)
temp++;
temp++;
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x10, temp);
}
/*********************************************/
/* VCLK */
/*********************************************/
static void
SiS_SetCRT1VCLK(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
unsigned short rrti)
{
unsigned short index = SiS_Pr->SiS_RefIndex[rrti].Ext_CRTVCLK;
unsigned short clka = SiS_Pr->SiS_VCLKData[index].SR2B;
unsigned short clkb = SiS_Pr->SiS_VCLKData[index].SR2C;
SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x31, 0xCF);
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x2B, clka);
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x2C, clkb);
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x2D, 0x01);
}
/*********************************************/
/* FIFO */
/*********************************************/
static void
SiS_SetCRT1FIFO_310(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
unsigned short mi)
{
unsigned short modeflag = SiS_Pr->SiS_EModeIDTable[mi].Ext_ModeFlag;
/* disable auto-threshold */
SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x3D, 0xFE);
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x08, 0xAE);
SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x09, 0xF0);
if (ModeNo <= 0x13)
return;
if ((!(modeflag & DoubleScanMode)) || (!(modeflag & HalfDCLK))) {
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x08, 0x34);
SiS_SetRegOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x3D, 0x01);
}
}
/*********************************************/
/* MODE REGISTERS */
/*********************************************/
static void
SiS_SetVCLKState(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
unsigned short rrti)
{
unsigned short data = 0, VCLK = 0, index = 0;
if (ModeNo > 0x13) {
index = SiS_Pr->SiS_RefIndex[rrti].Ext_CRTVCLK;
VCLK = SiS_Pr->SiS_VCLKData[index].CLOCK;
}
if (VCLK >= 166)
data |= 0x0c;
SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x32, 0xf3, data);
if (VCLK >= 166)
SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x1f, 0xe7);
/* DAC speed */
data = 0x03;
if (VCLK >= 260)
data = 0x00;
else if (VCLK >= 160)
data = 0x01;
else if (VCLK >= 135)
data = 0x02;
SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x07, 0xF8, data);
}
static void
SiS_SetCRT1ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
unsigned short ModeIdIndex, unsigned short rrti)
{
unsigned short data, infoflag = 0, modeflag;
if (ModeNo <= 0x13)
modeflag = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_ModeFlag;
else {
modeflag = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
infoflag = SiS_Pr->SiS_RefIndex[rrti].Ext_InfoFlag;
}
/* Disable DPMS */
SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x1F, 0x3F);
data = 0;
if (ModeNo > 0x13) {
if (SiS_Pr->SiS_ModeType > ModeEGA) {
data |= 0x02;
data |= ((SiS_Pr->SiS_ModeType - ModeVGA) << 2);
}
if (infoflag & InterlaceMode)
data |= 0x20;
}
SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x06, 0xC0, data);
data = 0;
if (infoflag & InterlaceMode) {
/* data = (Hsync / 8) - ((Htotal / 8) / 2) + 3 */
unsigned short hrs =
(SiS_GetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x04) |
((SiS_GetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x0b) & 0xc0) << 2))
- 3;
unsigned short hto =
(SiS_GetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x00) |
((SiS_GetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x0b) & 0x03) << 8))
+ 5;
data = hrs - (hto >> 1) + 3;
}
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x19, (data & 0xFF));
SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3d4, 0x1a, 0xFC, (data >> 8));
if (modeflag & HalfDCLK)
SiS_SetRegOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x01, 0x08);
data = 0;
if (modeflag & LineCompareOff)
data = 0x08;
SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x0F, 0xB7, data);
if ((SiS_Pr->SiS_ModeType == ModeEGA) && (ModeNo > 0x13))
SiS_SetRegOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x0F, 0x40);
SiS_SetRegAND(SiS_Pr, SiS_Pr->SiS_P3c4, 0x31, 0xfb);
data = 0x60;
if (SiS_Pr->SiS_ModeType != ModeText) {
data ^= 0x60;
if (SiS_Pr->SiS_ModeType != ModeEGA)
data ^= 0xA0;
}
SiS_SetRegANDOR(SiS_Pr, SiS_Pr->SiS_P3c4, 0x21, 0x1F, data);
SiS_SetVCLKState(SiS_Pr, ModeNo, rrti);
if (SiS_GetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x31) & 0x40)
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x52, 0x2c);
else
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x52, 0x6c);
}
/*********************************************/
/* LOAD DAC */
/*********************************************/
static void
SiS_WriteDAC(struct SiS_Private *SiS_Pr, unsigned long DACData,
unsigned short shiftflag, unsigned short dl, unsigned short ah,
unsigned short al, unsigned short dh)
{
unsigned short d1, d2, d3;
switch (dl) {
case 0:
d1 = dh;
d2 = ah;
d3 = al;
break;
case 1:
d1 = ah;
d2 = al;
d3 = dh;
break;
default:
d1 = al;
d2 = dh;
d3 = ah;
}
SiS_SetRegByte(SiS_Pr, DACData, (d1 << shiftflag));
SiS_SetRegByte(SiS_Pr, DACData, (d2 << shiftflag));
SiS_SetRegByte(SiS_Pr, DACData, (d3 << shiftflag));
}
static void
SiS_LoadDAC(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
unsigned short mi)
{
unsigned short data, data2, time, i, j, k, m, n, o;
unsigned short si, di, bx, sf;
unsigned long DACAddr, DACData;
const unsigned char *table = NULL;
if (ModeNo < 0x13)
data = SiS_Pr->SiS_SModeIDTable[mi].St_ModeFlag;
else
data = SiS_Pr->SiS_EModeIDTable[mi].Ext_ModeFlag;
data &= DACInfoFlag;
j = time = 64;
if (data == 0x00)
table = SiS_MDA_DAC;
else if (data == 0x08)
table = SiS_CGA_DAC;
else if (data == 0x10)
table = SiS_EGA_DAC;
else {
j = 16;
time = 256;
table = SiS_VGA_DAC;
}
DACAddr = SiS_Pr->SiS_P3c8;
DACData = SiS_Pr->SiS_P3c9;
sf = 0;
SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c6, 0xFF);
SiS_SetRegByte(SiS_Pr, DACAddr, 0x00);
for (i = 0; i < j; i++) {
data = table[i];
for (k = 0; k < 3; k++) {
data2 = 0;
if (data & 0x01)
data2 += 0x2A;
if (data & 0x02)
data2 += 0x15;
SiS_SetRegByte(SiS_Pr, DACData, (data2 << sf));
data >>= 2;
}
}
if (time == 256) {
for (i = 16; i < 32; i++) {
data = table[i] << sf;
for (k = 0; k < 3; k++)
SiS_SetRegByte(SiS_Pr, DACData, data);
}
si = 32;
for (m = 0; m < 9; m++) {
di = si;
bx = si + 4;
for (n = 0; n < 3; n++) {
for (o = 0; o < 5; o++) {
SiS_WriteDAC(SiS_Pr, DACData, sf, n,
table[di], table[bx],
table[si]);
si++;
}
si -= 2;
for (o = 0; o < 3; o++) {
SiS_WriteDAC(SiS_Pr, DACData, sf, n,
table[di], table[si],
table[bx]);
si--;
}
}
si += 5;
}
}
}
/*********************************************/
/* SET CRT1 REGISTER GROUP */
/*********************************************/
static void
SiS_SetCRT1Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
unsigned short ModeIdIndex)
{
unsigned short StandTableIndex, rrti;
SiS_Pr->SiS_CRT1Mode = ModeNo;
if (ModeNo <= 0x13)
StandTableIndex = 0;
else
StandTableIndex = 1;
SiS_ResetSegmentRegisters(SiS_Pr);
SiS_SetSeqRegs(SiS_Pr, StandTableIndex);
SiS_SetMiscRegs(SiS_Pr, StandTableIndex);
SiS_SetCRTCRegs(SiS_Pr, StandTableIndex);
SiS_SetATTRegs(SiS_Pr, StandTableIndex);
SiS_SetGRCRegs(SiS_Pr, StandTableIndex);
SiS_ClearExt1Regs(SiS_Pr, ModeNo);
rrti = SiS_GetRatePtr(SiS_Pr, ModeNo, ModeIdIndex);
if (rrti != 0xFFFF) {
SiS_SetCRT1Sync(SiS_Pr, rrti);
SiS_SetCRT1CRTC(SiS_Pr, ModeNo, ModeIdIndex, rrti);
SiS_SetCRT1Offset(SiS_Pr, ModeNo, ModeIdIndex, rrti);
SiS_SetCRT1VCLK(SiS_Pr, ModeNo, rrti);
}
SiS_SetCRT1FIFO_310(SiS_Pr, ModeNo, ModeIdIndex);
SiS_SetCRT1ModeRegs(SiS_Pr, ModeNo, ModeIdIndex, rrti);
SiS_LoadDAC(SiS_Pr, ModeNo, ModeIdIndex);
SiS_DisplayOn(SiS_Pr);
}
/*********************************************/
/* SiSSetMode() */
/*********************************************/
int SiSUSBSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo)
{
unsigned short ModeIdIndex;
unsigned long BaseAddr = SiS_Pr->IOAddress;
SiSUSB_InitPtr(SiS_Pr);
SiSUSBRegInit(SiS_Pr, BaseAddr);
SiS_GetSysFlags(SiS_Pr);
if (!(SiS_SearchModeID(SiS_Pr, &ModeNo, &ModeIdIndex)))
return 0;
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3c4, 0x05, 0x86);
SiSInitPCIetc(SiS_Pr);
ModeNo &= 0x7f;
SiS_Pr->SiS_ModeType =
SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag & ModeTypeMask;
SiS_Pr->SiS_SetFlag = LowModeTests;
/* Set mode on CRT1 */
SiS_SetCRT1Group(SiS_Pr, ModeNo, ModeIdIndex);
SiS_HandleCRT1(SiS_Pr);
SiS_DisplayOn(SiS_Pr);
SiS_SetRegByte(SiS_Pr, SiS_Pr->SiS_P3c6, 0xFF);
/* Store mode number */
SiS_SetReg(SiS_Pr, SiS_Pr->SiS_P3d4, 0x34, ModeNo);
return 1;
}
int SiSUSBSetVESAMode(struct SiS_Private *SiS_Pr, unsigned short VModeNo)
{
unsigned short ModeNo = 0;
int i;
SiSUSB_InitPtr(SiS_Pr);
if (VModeNo == 0x03) {
ModeNo = 0x03;
} else {
i = 0;
do {
if (SiS_Pr->SiS_EModeIDTable[i].Ext_VESAID == VModeNo) {
ModeNo = SiS_Pr->SiS_EModeIDTable[i].Ext_ModeID;
break;
}
} while (SiS_Pr->SiS_EModeIDTable[i++].Ext_ModeID != 0xff);
}
if (!ModeNo)
return 0;
return SiSUSBSetMode(SiS_Pr, ModeNo);
}
#endif /* INCL_SISUSB_CON */
| gpl-2.0 |
pzhaoyang/OpenWRT | target/linux/adm5120/files-3.18/arch/mips/adm5120/generic/eb-214a.c | 483 | 3047 | /*
* EB-214A board support
*
* Copyright (C) 2007-2008 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2010 Cezary Jackiewicz <cezary@eko.one.pl>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
*/
#include <asm/addrspace.h>
#include <asm/byteorder.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/gpio.h>
#include <linux/irq.h>
#include <linux/etherdevice.h>
#include <asm/mips_machine.h>
#include <asm/mach-adm5120/adm5120_info.h>
#include <asm/mach-adm5120/adm5120_platform.h>
#include <asm/mach-adm5120/adm5120_defs.h>
#define EB214A_CONFIG_OFFSET 0x4000
#define EB214A_KEYS_POLL_INTERVAL 20
#define EB214A_KEYS_DEBOUNCE_INTERVAL (3 * EB214A_KEYS_POLL_INTERVAL)
static struct mtd_partition eb214a_partitions[] = {
{
.name = "bootloader",
.offset = 0,
.size = 32*1024,
.mask_flags = MTD_WRITEABLE,
} , {
.name = "config",
.offset = MTDPART_OFS_APPEND,
.size = 32*1024,
} , {
.name = "firmware",
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL,
}
};
static struct adm5120_pci_irq eb214a_pci_irqs[] __initdata = {
PCIIRQ(4, 0, 1, ADM5120_IRQ_PCI0),
PCIIRQ(4, 1, 2, ADM5120_IRQ_PCI0),
PCIIRQ(4, 2, 3, ADM5120_IRQ_PCI0),
};
static struct gpio_led eb214a_gpio_leds[] __initdata = {
GPIO_LED_INV(ADM5120_GPIO_PIN7, "power", NULL),
GPIO_LED_INV(ADM5120_GPIO_P0L0, "lan", NULL),
GPIO_LED_INV(ADM5120_GPIO_P4L0, "usb1", NULL),
GPIO_LED_INV(ADM5120_GPIO_P4L1, "usb2", NULL),
GPIO_LED_INV(ADM5120_GPIO_P4L2, "usb3", NULL),
GPIO_LED_INV(ADM5120_GPIO_P3L0, "usb4", NULL),
};
static struct gpio_keys_button eb214a_gpio_buttons[] __initdata = {
{
.desc = "reset",
.type = EV_KEY,
.code = KEY_RESTART,
.debounce_interval = EB214A_KEYS_DEBOUNCE_INTERVAL,
.gpio = ADM5120_GPIO_PIN1,
}
};
static u8 eb214a_vlans[6] __initdata = {
0x41, 0x42, 0x44, 0x48, 0x50, 0x00
};
static void __init eb214a_mac_setup(void)
{
u8 mac_base[6];
u8 *cfg;
int i;
cfg = (u8 *) KSEG1ADDR(ADM5120_SRAM0_BASE + EB214A_CONFIG_OFFSET);
for (i = 0; i < 6; i++)
mac_base[i] = cfg[i];
if (!is_valid_ether_addr(mac_base))
random_ether_addr(mac_base);
adm5120_setup_eth_macs(mac_base);
}
static void __init eb214a_setup(void)
{
adm5120_flash0_data.nr_parts = ARRAY_SIZE(eb214a_partitions);
adm5120_flash0_data.parts = eb214a_partitions;
adm5120_add_device_flash(0);
adm5120_add_device_uart(0);
/* adm5120_add_device_uart(1); */
adm5120_add_device_switch(5, eb214a_vlans);
eb214a_mac_setup();
adm5120_register_gpio_buttons(-1, EB214A_KEYS_POLL_INTERVAL,
ARRAY_SIZE(eb214a_gpio_buttons),
eb214a_gpio_buttons);
adm5120_add_device_gpio_leds(ARRAY_SIZE(eb214a_gpio_leds),
eb214a_gpio_leds);
adm5120_pci_set_irq_map(ARRAY_SIZE(eb214a_pci_irqs),
eb214a_pci_irqs);
/* adm5120_add_device_usb(); */
}
MIPS_MACHINE(MACH_ADM5120_EB_214A, "EB-214A", "Generic EB-214A", eb214a_setup);
| gpl-2.0 |
gamerman123x/kernel_oneplus_msm8974 | net/ipv4/syncookies.c | 483 | 10751 | /*
* Syncookies implementation for the Linux kernel
*
* Copyright (C) 1997 Andi Kleen
* Based on ideas by D.J.Bernstein and Eric Schenk.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/tcp.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/cryptohash.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <net/tcp.h>
#include <net/route.h>
/* Timestamps: lowest bits store TCP options */
#define TSBITS 6
#define TSMASK (((__u32)1 << TSBITS) - 1)
extern int sysctl_tcp_syncookies;
__u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
EXPORT_SYMBOL(syncookie_secret);
static __init int init_syncookies(void)
{
get_random_bytes(syncookie_secret, sizeof(syncookie_secret));
return 0;
}
__initcall(init_syncookies);
#define COOKIEBITS 24 /* Upper bits store count */
#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS],
ipv4_cookie_scratch);
static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
u32 count, int c)
{
__u32 *tmp = __get_cpu_var(ipv4_cookie_scratch);
memcpy(tmp + 4, syncookie_secret[c], sizeof(syncookie_secret[c]));
tmp[0] = (__force u32)saddr;
tmp[1] = (__force u32)daddr;
tmp[2] = ((__force u32)sport << 16) + (__force u32)dport;
tmp[3] = count;
sha_transform(tmp + 16, (__u8 *)tmp, tmp + 16 + 5);
return tmp[17];
}
/*
* when syncookies are in effect and tcp timestamps are enabled we encode
* tcp options in the lower bits of the timestamp value that will be
* sent in the syn-ack.
* Since subsequent timestamps use the normal tcp_time_stamp value, we
* must make sure that the resulting initial timestamp is <= tcp_time_stamp.
*/
__u32 cookie_init_timestamp(struct request_sock *req)
{
struct inet_request_sock *ireq;
u32 ts, ts_now = tcp_time_stamp;
u32 options = 0;
ireq = inet_rsk(req);
options = ireq->wscale_ok ? ireq->snd_wscale : 0xf;
options |= ireq->sack_ok << 4;
options |= ireq->ecn_ok << 5;
ts = ts_now & ~TSMASK;
ts |= options;
if (ts > ts_now) {
ts >>= TSBITS;
ts--;
ts <<= TSBITS;
ts |= options;
}
return ts;
}
static __u32 secure_tcp_syn_cookie(__be32 saddr, __be32 daddr, __be16 sport,
__be16 dport, __u32 sseq, __u32 count,
__u32 data)
{
/*
* Compute the secure sequence number.
* The output should be:
* HASH(sec1,saddr,sport,daddr,dport,sec1) + sseq + (count * 2^24)
* + (HASH(sec2,saddr,sport,daddr,dport,count,sec2) % 2^24).
* Where sseq is their sequence number and count increases every
* minute by 1.
* As an extra hack, we add a small "data" value that encodes the
* MSS into the second hash value.
*/
return (cookie_hash(saddr, daddr, sport, dport, 0, 0) +
sseq + (count << COOKIEBITS) +
((cookie_hash(saddr, daddr, sport, dport, count, 1) + data)
& COOKIEMASK));
}
/*
* This retrieves the small "data" value from the syncookie.
* If the syncookie is bad, the data returned will be out of
* range. This must be checked by the caller.
*
* The count value used to generate the cookie must be within
* "maxdiff" if the current (passed-in) "count". The return value
* is (__u32)-1 if this test fails.
*/
static __u32 check_tcp_syn_cookie(__u32 cookie, __be32 saddr, __be32 daddr,
__be16 sport, __be16 dport, __u32 sseq,
__u32 count, __u32 maxdiff)
{
__u32 diff;
/* Strip away the layers from the cookie */
cookie -= cookie_hash(saddr, daddr, sport, dport, 0, 0) + sseq;
/* Cookie is now reduced to (count * 2^24) ^ (hash % 2^24) */
diff = (count - (cookie >> COOKIEBITS)) & ((__u32) - 1 >> COOKIEBITS);
if (diff >= maxdiff)
return (__u32)-1;
return (cookie -
cookie_hash(saddr, daddr, sport, dport, count - diff, 1))
& COOKIEMASK; /* Leaving the data behind */
}
/*
* MSS Values are taken from the 2009 paper
* 'Measuring TCP Maximum Segment Size' by S. Alcock and R. Nelson:
* - values 1440 to 1460 accounted for 80% of observed mss values
* - values outside the 536-1460 range are rare (<0.2%).
*
* Table must be sorted.
*/
static __u16 const msstab[] = {
64,
512,
536,
1024,
1440,
1460,
4312,
8960,
};
/*
* Generate a syncookie. mssp points to the mss, which is returned
* rounded down to the value encoded in the cookie.
*/
__u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
{
const struct iphdr *iph = ip_hdr(skb);
const struct tcphdr *th = tcp_hdr(skb);
int mssind;
const __u16 mss = *mssp;
tcp_synq_overflow(sk);
for (mssind = ARRAY_SIZE(msstab) - 1; mssind ; mssind--)
if (mss >= msstab[mssind])
break;
*mssp = msstab[mssind];
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
return secure_tcp_syn_cookie(iph->saddr, iph->daddr,
th->source, th->dest, ntohl(th->seq),
jiffies / (HZ * 60), mssind);
}
/*
* This (misnamed) value is the age of syncookie which is permitted.
* Its ideal value should be dependent on TCP_TIMEOUT_INIT and
* sysctl_tcp_retries1. It's a rather complicated formula (exponential
* backoff) to compute at runtime so it's currently hardcoded here.
*/
#define COUNTER_TRIES 4
/*
* Check if a ack sequence number is a valid syncookie.
* Return the decoded mss if it is, or 0 if not.
*/
static inline int cookie_check(struct sk_buff *skb, __u32 cookie)
{
const struct iphdr *iph = ip_hdr(skb);
const struct tcphdr *th = tcp_hdr(skb);
__u32 seq = ntohl(th->seq) - 1;
__u32 mssind = check_tcp_syn_cookie(cookie, iph->saddr, iph->daddr,
th->source, th->dest, seq,
jiffies / (HZ * 60),
COUNTER_TRIES);
return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0;
}
static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
struct dst_entry *dst)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct sock *child;
child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst);
if (child)
inet_csk_reqsk_queue_add(sk, req, child);
else
reqsk_free(req);
return child;
}
/*
* when syncookies are in effect and tcp timestamps are enabled we stored
* additional tcp options in the timestamp.
* This extracts these options from the timestamp echo.
*
* The lowest 4 bits store snd_wscale.
* next 2 bits indicate SACK and ECN support.
*
* return false if we decode an option that should not be.
*/
bool cookie_check_timestamp(struct tcp_options_received *tcp_opt, bool *ecn_ok)
{
/* echoed timestamp, lowest bits contain options */
u32 options = tcp_opt->rcv_tsecr & TSMASK;
if (!tcp_opt->saw_tstamp) {
tcp_clear_options(tcp_opt);
return true;
}
if (!sysctl_tcp_timestamps)
return false;
tcp_opt->sack_ok = (options & (1 << 4)) ? TCP_SACK_SEEN : 0;
*ecn_ok = (options >> 5) & 1;
if (*ecn_ok && !sysctl_tcp_ecn)
return false;
if (tcp_opt->sack_ok && !sysctl_tcp_sack)
return false;
if ((options & 0xf) == 0xf)
return true; /* no window scaling */
tcp_opt->wscale_ok = 1;
tcp_opt->snd_wscale = options & 0xf;
return sysctl_tcp_window_scaling != 0;
}
EXPORT_SYMBOL(cookie_check_timestamp);
struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
struct ip_options *opt)
{
struct tcp_options_received tcp_opt;
const u8 *hash_location;
struct inet_request_sock *ireq;
struct tcp_request_sock *treq;
struct tcp_sock *tp = tcp_sk(sk);
const struct tcphdr *th = tcp_hdr(skb);
__u32 cookie = ntohl(th->ack_seq) - 1;
struct sock *ret = sk;
struct request_sock *req;
int mss;
struct rtable *rt;
__u8 rcv_wscale;
bool ecn_ok = false;
struct flowi4 fl4;
if (!sysctl_tcp_syncookies || !th->ack || th->rst)
goto out;
if (tcp_synq_no_recent_overflow(sk) ||
(mss = cookie_check(skb, cookie)) == 0) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
goto out;
}
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
/* check for timestamp cookie support */
memset(&tcp_opt, 0, sizeof(tcp_opt));
tcp_parse_options(skb, &tcp_opt, &hash_location, 0);
if (!cookie_check_timestamp(&tcp_opt, &ecn_ok))
goto out;
ret = NULL;
req = inet_reqsk_alloc(&tcp_request_sock_ops); /* for safety */
if (!req)
goto out;
ireq = inet_rsk(req);
treq = tcp_rsk(req);
treq->rcv_isn = ntohl(th->seq) - 1;
treq->snt_isn = cookie;
req->mss = mss;
ireq->loc_port = th->dest;
ireq->rmt_port = th->source;
ireq->loc_addr = ip_hdr(skb)->daddr;
ireq->rmt_addr = ip_hdr(skb)->saddr;
ireq->ir_mark = inet_request_mark(sk, skb);
ireq->ecn_ok = ecn_ok;
ireq->snd_wscale = tcp_opt.snd_wscale;
ireq->sack_ok = tcp_opt.sack_ok;
ireq->wscale_ok = tcp_opt.wscale_ok;
ireq->tstamp_ok = tcp_opt.saw_tstamp;
req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
treq->snt_synack = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsecr : 0;
/* We throwed the options of the initial SYN away, so we hope
* the ACK carries the same options again (see RFC1122 4.2.3.8)
*/
if (opt && opt->optlen) {
int opt_size = sizeof(struct ip_options_rcu) + opt->optlen;
ireq->opt = kmalloc(opt_size, GFP_ATOMIC);
if (ireq->opt != NULL && ip_options_echo(&ireq->opt->opt, skb)) {
kfree(ireq->opt);
ireq->opt = NULL;
}
}
if (security_inet_conn_request(sk, skb, req)) {
reqsk_free(req);
goto out;
}
req->expires = 0UL;
req->retrans = 0;
/*
* We need to lookup the route here to get at the correct
* window size. We should better make sure that the window size
* hasn't changed since we received the original syn, but I see
* no easy way to do this.
*/
flowi4_init_output(&fl4, sk->sk_bound_dev_if, ireq->ir_mark,
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP,
inet_sk_flowi_flags(sk),
(opt && opt->srr) ? opt->faddr : ireq->rmt_addr,
ireq->loc_addr, th->source, th->dest,
sock_i_uid(sk));
security_req_classify_flow(req, flowi4_to_flowi(&fl4));
rt = ip_route_output_key(sock_net(sk), &fl4);
if (IS_ERR(rt)) {
reqsk_free(req);
goto out;
}
/* Try to redo what tcp_v4_send_synack did. */
req->window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW);
tcp_select_initial_window(tcp_full_space(sk), req->mss,
&req->rcv_wnd, &req->window_clamp,
ireq->wscale_ok, &rcv_wscale,
dst_metric(&rt->dst, RTAX_INITRWND));
ireq->rcv_wscale = rcv_wscale;
ret = get_cookie_sock(sk, skb, req, &rt->dst);
/* ip_queue_xmit() depends on our flow being setup
* Normal sockets get it right from inet_csk_route_child_sock()
*/
if (ret)
inet_sk(ret)->cork.fl.u.ip4 = fl4;
out: return ret;
}
| gpl-2.0 |
penhoi/linux-3.13.11.lbrpmu | drivers/staging/speakup/speakup_decpc.c | 483 | 15394 | /*
* This is the DECtalk PC speakup driver
*
* Some constants from DEC's DOS driver:
* Copyright (c) by Digital Equipment Corp.
*
* 386BSD DECtalk PC driver:
* Copyright (c) 1996 Brian Buhrow <buhrow@lothlorien.nfbcal.org>
*
* Linux DECtalk PC driver:
* Copyright (c) 1997 Nicolas Pitre <nico@cam.org>
*
* speakup DECtalk PC Internal driver:
* Copyright (c) 2003 David Borowski <david575@golden.net>
*
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/jiffies.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/kthread.h>
#include "spk_priv.h"
#include "speakup.h"
#define MODULE_init 0x0dec /* module in boot code */
#define MODULE_self_test 0x8800 /* module in self-test */
#define MODULE_reset 0xffff /* reinit the whole module */
#define MODE_mask 0xf000 /* mode bits in high nibble */
#define MODE_null 0x0000
#define MODE_test 0x2000 /* in testing mode */
#define MODE_status 0x8000
#define STAT_int 0x0001 /* running in interrupt mode */
#define STAT_tr_char 0x0002 /* character data to transmit */
#define STAT_rr_char 0x0004 /* ready to receive char data */
#define STAT_cmd_ready 0x0008 /* ready to accept commands */
#define STAT_dma_ready 0x0010 /* dma command ready */
#define STAT_digitized 0x0020 /* spc in digitized mode */
#define STAT_new_index 0x0040 /* new last index ready */
#define STAT_new_status 0x0080 /* new status posted */
#define STAT_dma_state 0x0100 /* dma state toggle */
#define STAT_index_valid 0x0200 /* indexs are valid */
#define STAT_flushing 0x0400 /* flush in progress */
#define STAT_self_test 0x0800 /* module in self test */
#define MODE_ready 0xc000 /* module ready for next phase */
#define READY_boot 0x0000
#define READY_kernel 0x0001
#define MODE_error 0xf000
#define CMD_mask 0xf000 /* mask for command nibble */
#define CMD_null 0x0000 /* post status */
#define CMD_control 0x1000 /* hard control command */
#define CTRL_mask 0x0F00 /* mask off control nibble */
#define CTRL_data 0x00FF /* mask to get data byte */
#define CTRL_null 0x0000 /* null control */
#define CTRL_vol_up 0x0100 /* increase volume */
#define CTRL_vol_down 0x0200 /* decrease volume */
#define CTRL_vol_set 0x0300 /* set volume */
#define CTRL_pause 0x0400 /* pause spc */
#define CTRL_resume 0x0500 /* resume spc clock */
#define CTRL_resume_spc 0x0001 /* resume spc soft pause */
#define CTRL_flush 0x0600 /* flush all buffers */
#define CTRL_int_enable 0x0700 /* enable status change ints */
#define CTRL_buff_free 0x0800 /* buffer remain count */
#define CTRL_buff_used 0x0900 /* buffer in use */
#define CTRL_speech 0x0a00 /* immediate speech change */
#define CTRL_SP_voice 0x0001 /* voice change */
#define CTRL_SP_rate 0x0002 /* rate change */
#define CTRL_SP_comma 0x0003 /* comma pause change */
#define CTRL_SP_period 0x0004 /* period pause change */
#define CTRL_SP_rate_delta 0x0005 /* delta rate change */
#define CTRL_SP_get_param 0x0006 /* return the desired parameter */
#define CTRL_last_index 0x0b00 /* get last index spoken */
#define CTRL_io_priority 0x0c00 /* change i/o priority */
#define CTRL_free_mem 0x0d00 /* get free paragraphs on module */
#define CTRL_get_lang 0x0e00 /* return bit mask of loaded
* languages */
#define CMD_test 0x2000 /* self-test request */
#define TEST_mask 0x0F00 /* isolate test field */
#define TEST_null 0x0000 /* no test requested */
#define TEST_isa_int 0x0100 /* assert isa irq */
#define TEST_echo 0x0200 /* make data in == data out */
#define TEST_seg 0x0300 /* set peek/poke segment */
#define TEST_off 0x0400 /* set peek/poke offset */
#define TEST_peek 0x0500 /* data out == *peek */
#define TEST_poke 0x0600 /* *peek == data in */
#define TEST_sub_code 0x00FF /* user defined test sub codes */
#define CMD_id 0x3000 /* return software id */
#define ID_null 0x0000 /* null id */
#define ID_kernel 0x0100 /* kernel code executing */
#define ID_boot 0x0200 /* boot code executing */
#define CMD_dma 0x4000 /* force a dma start */
#define CMD_reset 0x5000 /* reset module status */
#define CMD_sync 0x6000 /* kernel sync command */
#define CMD_char_in 0x7000 /* single character send */
#define CMD_char_out 0x8000 /* single character get */
#define CHAR_count_1 0x0100 /* one char in cmd_low */
#define CHAR_count_2 0x0200 /* the second in data_low */
#define CHAR_count_3 0x0300 /* the third in data_high */
#define CMD_spc_mode 0x9000 /* change spc mode */
#define CMD_spc_to_text 0x0100 /* set to text mode */
#define CMD_spc_to_digit 0x0200 /* set to digital mode */
#define CMD_spc_rate 0x0400 /* change spc data rate */
#define CMD_error 0xf000 /* severe error */
enum { PRIMARY_DIC = 0, USER_DIC, COMMAND_DIC, ABBREV_DIC };
#define DMA_single_in 0x01
#define DMA_single_out 0x02
#define DMA_buff_in 0x03
#define DMA_buff_out 0x04
#define DMA_control 0x05
#define DT_MEM_ALLOC 0x03
#define DT_SET_DIC 0x04
#define DT_START_TASK 0x05
#define DT_LOAD_MEM 0x06
#define DT_READ_MEM 0x07
#define DT_DIGITAL_IN 0x08
#define DMA_sync 0x06
#define DMA_sync_char 0x07
#define DRV_VERSION "2.12"
#define PROCSPEECH 0x0b
#define SYNTH_IO_EXTENT 8
static int synth_probe(struct spk_synth *synth);
static void dtpc_release(void);
static const char *synth_immediate(struct spk_synth *synth, const char *buf);
static void do_catch_up(struct spk_synth *synth);
static void synth_flush(struct spk_synth *synth);
static int synth_portlist[] = { 0x340, 0x350, 0x240, 0x250, 0 };
static int in_escape, is_flushing;
static int dt_stat, dma_state;
static struct var_t vars[] = {
{ CAPS_START, .u.s = {"[:dv ap 200]" } },
{ CAPS_STOP, .u.s = {"[:dv ap 100]" } },
{ RATE, .u.n = {"[:ra %d]", 9, 0, 18, 150, 25, NULL } },
{ PITCH, .u.n = {"[:dv ap %d]", 80, 0, 100, 20, 0, NULL } },
{ VOL, .u.n = {"[:vo se %d]", 5, 0, 9, 5, 10, NULL } },
{ PUNCT, .u.n = {"[:pu %c]", 0, 0, 2, 0, 0, "nsa" } },
{ VOICE, .u.n = {"[:n%c]", 0, 0, 9, 0, 0, "phfdburwkv" } },
{ DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } },
V_LAST_VAR
};
/*
* These attributes will appear in /sys/accessibility/speakup/decpc.
*/
static struct kobj_attribute caps_start_attribute =
__ATTR(caps_start, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute caps_stop_attribute =
__ATTR(caps_stop, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute pitch_attribute =
__ATTR(pitch, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute punct_attribute =
__ATTR(punct, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute =
__ATTR(rate, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute voice_attribute =
__ATTR(voice, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute vol_attribute =
__ATTR(vol, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute delay_time_attribute =
__ATTR(delay_time, ROOT_W, spk_var_show, spk_var_store);
static struct kobj_attribute direct_attribute =
__ATTR(direct, USER_RW, spk_var_show, spk_var_store);
static struct kobj_attribute full_time_attribute =
__ATTR(full_time, ROOT_W, spk_var_show, spk_var_store);
static struct kobj_attribute jiffy_delta_attribute =
__ATTR(jiffy_delta, ROOT_W, spk_var_show, spk_var_store);
static struct kobj_attribute trigger_time_attribute =
__ATTR(trigger_time, ROOT_W, spk_var_show, spk_var_store);
/*
* Create a group of attributes so that we can create and destroy them all
* at once.
*/
static struct attribute *synth_attrs[] = {
&caps_start_attribute.attr,
&caps_stop_attribute.attr,
&pitch_attribute.attr,
&punct_attribute.attr,
&rate_attribute.attr,
&voice_attribute.attr,
&vol_attribute.attr,
&delay_time_attribute.attr,
&direct_attribute.attr,
&full_time_attribute.attr,
&jiffy_delta_attribute.attr,
&trigger_time_attribute.attr,
NULL, /* need to NULL terminate the list of attributes */
};
static struct spk_synth synth_dec_pc = {
.name = "decpc",
.version = DRV_VERSION,
.long_name = "Dectalk PC",
.init = "[:pe -380]",
.procspeech = PROCSPEECH,
.delay = 500,
.trigger = 50,
.jiffies = 50,
.full = 1000,
.flags = SF_DEC,
.startup = SYNTH_START,
.checkval = SYNTH_CHECK,
.vars = vars,
.probe = synth_probe,
.release = dtpc_release,
.synth_immediate = synth_immediate,
.catch_up = do_catch_up,
.flush = synth_flush,
.is_alive = spk_synth_is_alive_nop,
.synth_adjust = NULL,
.read_buff_add = NULL,
.get_index = NULL,
.indexing = {
.command = NULL,
.lowindex = 0,
.highindex = 0,
.currindex = 0,
},
.attributes = {
.attrs = synth_attrs,
.name = "decpc",
},
};
static int dt_getstatus(void)
{
dt_stat = inb_p(speakup_info.port_tts) |
(inb_p(speakup_info.port_tts + 1) << 8);
return dt_stat;
}
static void dt_sendcmd(u_int cmd)
{
outb_p(cmd & 0xFF, speakup_info.port_tts);
outb_p((cmd >> 8) & 0xFF, speakup_info.port_tts+1);
}
static int dt_waitbit(int bit)
{
int timeout = 100;
while (--timeout > 0) {
if ((dt_getstatus() & bit) == bit)
return 1;
udelay(50);
}
return 0;
}
static int dt_wait_dma(void)
{
int timeout = 100, state = dma_state;
if (!dt_waitbit(STAT_dma_ready))
return 0;
while (--timeout > 0) {
if ((dt_getstatus()&STAT_dma_state) == state)
return 1;
udelay(50);
}
dma_state = dt_getstatus() & STAT_dma_state;
return 1;
}
static int dt_ctrl(u_int cmd)
{
int timeout = 10;
if (!dt_waitbit(STAT_cmd_ready))
return -1;
outb_p(0, speakup_info.port_tts+2);
outb_p(0, speakup_info.port_tts+3);
dt_getstatus();
dt_sendcmd(CMD_control|cmd);
outb_p(0, speakup_info.port_tts+6);
while (dt_getstatus() & STAT_cmd_ready) {
udelay(20);
if (--timeout == 0)
break;
}
dt_sendcmd(CMD_null);
return 0;
}
static void synth_flush(struct spk_synth *synth)
{
int timeout = 10;
if (is_flushing)
return;
is_flushing = 4;
in_escape = 0;
while (dt_ctrl(CTRL_flush)) {
if (--timeout == 0)
break;
udelay(50);
}
for (timeout = 0; timeout < 10; timeout++) {
if (dt_waitbit(STAT_dma_ready))
break;
udelay(50);
}
outb_p(DMA_sync, speakup_info.port_tts+4);
outb_p(0, speakup_info.port_tts+4);
udelay(100);
for (timeout = 0; timeout < 10; timeout++) {
if (!(dt_getstatus() & STAT_flushing))
break;
udelay(50);
}
dma_state = dt_getstatus() & STAT_dma_state;
dma_state ^= STAT_dma_state;
is_flushing = 0;
}
static int dt_sendchar(char ch)
{
if (!dt_wait_dma())
return -1;
if (!(dt_stat & STAT_rr_char))
return -2;
outb_p(DMA_single_in, speakup_info.port_tts+4);
outb_p(ch, speakup_info.port_tts+4);
dma_state ^= STAT_dma_state;
return 0;
}
static int testkernel(void)
{
int status = 0;
if (dt_getstatus() == 0xffff) {
status = -1;
goto oops;
}
dt_sendcmd(CMD_sync);
if (!dt_waitbit(STAT_cmd_ready))
status = -2;
else if (dt_stat&0x8000)
return 0;
else if (dt_stat == 0x0dec)
pr_warn("dec_pc at 0x%x, software not loaded\n",
speakup_info.port_tts);
status = -3;
oops: synth_release_region(speakup_info.port_tts, SYNTH_IO_EXTENT);
speakup_info.port_tts = 0;
return status;
}
static void do_catch_up(struct spk_synth *synth)
{
u_char ch;
static u_char last;
unsigned long flags;
unsigned long jiff_max;
struct var_t *jiffy_delta;
struct var_t *delay_time;
int jiffy_delta_val;
int delay_time_val;
jiffy_delta = spk_get_var(JIFFY);
delay_time = spk_get_var(DELAY);
spin_lock_irqsave(&speakup_info.spinlock, flags);
jiffy_delta_val = jiffy_delta->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
jiff_max = jiffies + jiffy_delta_val;
while (!kthread_should_stop()) {
spin_lock_irqsave(&speakup_info.spinlock, flags);
if (speakup_info.flushing) {
speakup_info.flushing = 0;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
synth->flush(synth);
continue;
}
if (synth_buffer_empty()) {
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
break;
}
ch = synth_buffer_peek();
set_current_state(TASK_INTERRUPTIBLE);
delay_time_val = delay_time->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (ch == '\n')
ch = 0x0D;
if (dt_sendchar(ch)) {
schedule_timeout(msecs_to_jiffies(delay_time_val));
continue;
}
set_current_state(TASK_RUNNING);
spin_lock_irqsave(&speakup_info.spinlock, flags);
synth_buffer_getc();
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (ch == '[')
in_escape = 1;
else if (ch == ']')
in_escape = 0;
else if (ch <= SPACE) {
if (!in_escape && strchr(",.!?;:", last))
dt_sendchar(PROCSPEECH);
if (jiffies >= jiff_max) {
if (!in_escape)
dt_sendchar(PROCSPEECH);
spin_lock_irqsave(&speakup_info.spinlock, flags);
jiffy_delta_val = jiffy_delta->u.n.value;
delay_time_val = delay_time->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
schedule_timeout(msecs_to_jiffies
(delay_time_val));
jiff_max = jiffies + jiffy_delta_val;
}
}
last = ch;
ch = 0;
}
if (!in_escape)
dt_sendchar(PROCSPEECH);
}
static const char *synth_immediate(struct spk_synth *synth, const char *buf)
{
u_char ch;
while ((ch = *buf)) {
if (ch == '\n')
ch = PROCSPEECH;
if (dt_sendchar(ch))
return buf;
buf++;
}
return NULL;
}
static int synth_probe(struct spk_synth *synth)
{
int i = 0, failed = 0;
pr_info("Probing for %s.\n", synth->long_name);
for (i = 0; synth_portlist[i]; i++) {
if (synth_request_region(synth_portlist[i], SYNTH_IO_EXTENT)) {
pr_warn("request_region: failed with 0x%x, %d\n",
synth_portlist[i], SYNTH_IO_EXTENT);
continue;
}
speakup_info.port_tts = synth_portlist[i];
failed = testkernel();
if (failed == 0)
break;
}
if (failed) {
pr_info("%s: not found\n", synth->long_name);
return -ENODEV;
}
pr_info("%s: %03x-%03x, Driver Version %s,\n", synth->long_name,
speakup_info.port_tts, speakup_info.port_tts + 7,
synth->version);
synth->alive = 1;
return 0;
}
static void dtpc_release(void)
{
if (speakup_info.port_tts)
synth_release_region(speakup_info.port_tts, SYNTH_IO_EXTENT);
speakup_info.port_tts = 0;
}
module_param_named(start, synth_dec_pc.startup, short, S_IRUGO);
MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded.");
static int __init decpc_init(void)
{
return synth_add(&synth_dec_pc);
}
static void __exit decpc_exit(void)
{
synth_remove(&synth_dec_pc);
}
module_init(decpc_init);
module_exit(decpc_exit);
MODULE_AUTHOR("Kirk Reiser <kirk@braille.uwo.ca>");
MODULE_AUTHOR("David Borowski");
MODULE_DESCRIPTION("Speakup support for DECtalk PC synthesizers");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
| gpl-2.0 |
sombree/android_kernel_samsung_jf | drivers/media/video/msm-bayer/imx072.c | 1763 | 29074 | /* Copyright (c) 2011, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/delay.h>
#include <linux/debugfs.h>
#include <linux/types.h>
#include <linux/i2c.h>
#include <linux/uaccess.h>
#include <linux/miscdevice.h>
#include <linux/slab.h>
#include <media/msm_camera.h>
#include <mach/gpio.h>
#include <mach/camera.h>
#include "imx072.h"
/* SENSOR REGISTER DEFINES */
#define REG_GROUPED_PARAMETER_HOLD 0x0104
#define GROUPED_PARAMETER_HOLD_OFF 0x00
#define GROUPED_PARAMETER_HOLD 0x01
/* Integration Time */
#define REG_COARSE_INTEGRATION_TIME 0x0202
/* Gain */
#define REG_GLOBAL_GAIN 0x0204
/* PLL registers */
#define REG_FRAME_LENGTH_LINES 0x0340
#define REG_LINE_LENGTH_PCK 0x0342
/* 16bit address - 8 bit context register structure */
#define Q8 0x00000100
#define Q10 0x00000400
#define IMX072_MASTER_CLK_RATE 24000000
#define IMX072_OFFSET 3
/* AF Total steps parameters */
#define IMX072_AF_I2C_ADDR 0x18
#define IMX072_TOTAL_STEPS_NEAR_TO_FAR 30
static uint16_t imx072_step_position_table[IMX072_TOTAL_STEPS_NEAR_TO_FAR+1];
static uint16_t imx072_nl_region_boundary1;
static uint16_t imx072_nl_region_code_per_step1;
static uint16_t imx072_l_region_code_per_step = 12;
static uint16_t imx072_sw_damping_time_wait = 8;
static uint16_t imx072_af_initial_code = 350;
static uint16_t imx072_damping_threshold = 10;
struct imx072_work_t {
struct work_struct work;
};
static struct imx072_work_t *imx072_sensorw;
static struct i2c_client *imx072_client;
struct imx072_ctrl_t {
const struct msm_camera_sensor_info *sensordata;
uint32_t sensormode;
uint32_t fps_divider;/* init to 1 * 0x00000400 */
uint32_t pict_fps_divider;/* init to 1 * 0x00000400 */
uint16_t fps;
uint16_t curr_lens_pos;
uint16_t curr_step_pos;
uint16_t my_reg_gain;
uint32_t my_reg_line_count;
uint16_t total_lines_per_frame;
enum imx072_resolution_t prev_res;
enum imx072_resolution_t pict_res;
enum imx072_resolution_t curr_res;
enum imx072_test_mode_t set_test;
enum imx072_cam_mode_t cam_mode;
};
static uint16_t prev_line_length_pck;
static uint16_t prev_frame_length_lines;
static uint16_t snap_line_length_pck;
static uint16_t snap_frame_length_lines;
static bool CSI_CONFIG;
static struct imx072_ctrl_t *imx072_ctrl;
static DECLARE_WAIT_QUEUE_HEAD(imx072_wait_queue);
DEFINE_MUTEX(imx072_mut);
#ifdef CONFIG_DEBUG_FS
static int cam_debug_init(void);
static struct dentry *debugfs_base;
#endif
static int imx072_i2c_rxdata(unsigned short saddr,
unsigned char *rxdata, int length)
{
struct i2c_msg msgs[] = {
{
.addr = saddr,
.flags = 0,
.len = length,
.buf = rxdata,
},
{
.addr = saddr,
.flags = I2C_M_RD,
.len = length,
.buf = rxdata,
},
};
if (i2c_transfer(imx072_client->adapter, msgs, 2) < 0) {
pr_err("imx072_i2c_rxdata faild 0x%x\n", saddr);
return -EIO;
}
return 0;
}
static int32_t imx072_i2c_txdata(unsigned short saddr,
unsigned char *txdata, int length)
{
struct i2c_msg msg[] = {
{
.addr = saddr,
.flags = 0,
.len = length,
.buf = txdata,
},
};
if (i2c_transfer(imx072_client->adapter, msg, 1) < 0) {
pr_err("imx072_i2c_txdata faild 0x%x\n", saddr);
return -EIO;
}
return 0;
}
static int32_t imx072_i2c_read(unsigned short raddr,
unsigned short *rdata, int rlen)
{
int32_t rc = 0;
unsigned char buf[2];
if (!rdata)
return -EIO;
memset(buf, 0, sizeof(buf));
buf[0] = (raddr & 0xFF00) >> 8;
buf[1] = (raddr & 0x00FF);
rc = imx072_i2c_rxdata(imx072_client->addr>>1, buf, rlen);
if (rc < 0) {
pr_err("imx072_i2c_read 0x%x failed!\n", raddr);
return rc;
}
*rdata = (rlen == 2 ? buf[0] << 8 | buf[1] : buf[0]);
CDBG("imx072_i2c_read 0x%x val = 0x%x!\n", raddr, *rdata);
return rc;
}
static int32_t imx072_i2c_write_w_sensor(unsigned short waddr,
uint16_t wdata)
{
int32_t rc = -EFAULT;
unsigned char buf[4];
memset(buf, 0, sizeof(buf));
buf[0] = (waddr & 0xFF00) >> 8;
buf[1] = (waddr & 0x00FF);
buf[2] = (wdata & 0xFF00) >> 8;
buf[3] = (wdata & 0x00FF);
CDBG("i2c_write_b addr = 0x%x, val = 0x%x\n", waddr, wdata);
rc = imx072_i2c_txdata(imx072_client->addr>>1, buf, 4);
if (rc < 0) {
pr_err("i2c_write_b failed, addr = 0x%x, val = 0x%x!\n",
waddr, wdata);
}
return rc;
}
static int32_t imx072_i2c_write_b_sensor(unsigned short waddr,
uint8_t bdata)
{
int32_t rc = -EFAULT;
unsigned char buf[3];
memset(buf, 0, sizeof(buf));
buf[0] = (waddr & 0xFF00) >> 8;
buf[1] = (waddr & 0x00FF);
buf[2] = bdata;
CDBG("i2c_write_b addr = 0x%x, val = 0x%x\n", waddr, bdata);
rc = imx072_i2c_txdata(imx072_client->addr>>1, buf, 3);
if (rc < 0)
pr_err("i2c_write_b failed, addr = 0x%x, val = 0x%x!\n",
waddr, bdata);
return rc;
}
static int32_t imx072_i2c_write_b_af(uint8_t msb, uint8_t lsb)
{
int32_t rc = -EFAULT;
unsigned char buf[2];
buf[0] = msb;
buf[1] = lsb;
rc = imx072_i2c_txdata(IMX072_AF_I2C_ADDR>>1, buf, 2);
if (rc < 0)
pr_err("af_i2c_write faield msb = 0x%x lsb = 0x%x",
msb, lsb);
return rc;
}
static int32_t imx072_i2c_write_w_table(struct imx072_i2c_reg_conf const
*reg_conf_tbl, int num)
{
int i;
int32_t rc = -EIO;
for (i = 0; i < num; i++) {
rc = imx072_i2c_write_b_sensor(reg_conf_tbl->waddr,
reg_conf_tbl->wdata);
if (rc < 0)
break;
reg_conf_tbl++;
}
return rc;
}
static void imx072_group_hold_on(void)
{
imx072_i2c_write_b_sensor(REG_GROUPED_PARAMETER_HOLD,
GROUPED_PARAMETER_HOLD);
}
static void imx072_group_hold_off(void)
{
imx072_i2c_write_b_sensor(REG_GROUPED_PARAMETER_HOLD,
GROUPED_PARAMETER_HOLD_OFF);
}
static void imx072_start_stream(void)
{
imx072_i2c_write_b_sensor(0x0100, 0x01);
}
static void imx072_stop_stream(void)
{
imx072_i2c_write_b_sensor(0x0100, 0x00);
}
static void imx072_get_pict_fps(uint16_t fps, uint16_t *pfps)
{
/* input fps is preview fps in Q8 format */
uint32_t divider, d1, d2;
d1 = prev_frame_length_lines * 0x00000400 / snap_frame_length_lines;
d2 = prev_line_length_pck * 0x00000400 / snap_line_length_pck;
divider = d1 * d2 / 0x400;
/*Verify PCLK settings and frame sizes.*/
*pfps = (uint16_t) (fps * divider / 0x400);
}
static uint16_t imx072_get_prev_lines_pf(void)
{
return prev_frame_length_lines;
}
static uint16_t imx072_get_prev_pixels_pl(void)
{
return prev_line_length_pck;
}
static uint16_t imx072_get_pict_lines_pf(void)
{
return snap_frame_length_lines;
}
static uint16_t imx072_get_pict_pixels_pl(void)
{
return snap_line_length_pck;
}
static uint32_t imx072_get_pict_max_exp_lc(void)
{
return snap_frame_length_lines * 24;
}
static int32_t imx072_set_fps(struct fps_cfg *fps)
{
uint16_t total_lines_per_frame;
int32_t rc = 0;
total_lines_per_frame = (uint16_t)
((prev_frame_length_lines *
imx072_ctrl->fps_divider)/0x400);
imx072_ctrl->fps_divider = fps->fps_div;
imx072_ctrl->pict_fps_divider = fps->pict_fps_div;
imx072_group_hold_on();
rc = imx072_i2c_write_w_sensor(REG_FRAME_LENGTH_LINES,
total_lines_per_frame);
imx072_group_hold_off();
return rc;
}
static int32_t imx072_write_exp_gain(uint16_t gain, uint32_t line)
{
uint32_t fl_lines = 0;
uint8_t offset;
int32_t rc = 0;
if (imx072_ctrl->curr_res == imx072_ctrl->prev_res)
fl_lines = prev_frame_length_lines;
else if (imx072_ctrl->curr_res == imx072_ctrl->pict_res)
fl_lines = snap_frame_length_lines;
line = (line * imx072_ctrl->fps_divider) / Q10;
offset = IMX072_OFFSET;
if (line > (fl_lines - offset))
fl_lines = line + offset;
imx072_group_hold_on();
rc = imx072_i2c_write_w_sensor(REG_FRAME_LENGTH_LINES, fl_lines);
rc = imx072_i2c_write_w_sensor(REG_COARSE_INTEGRATION_TIME, line);
rc = imx072_i2c_write_w_sensor(REG_GLOBAL_GAIN, gain);
imx072_group_hold_off();
return rc;
}
static int32_t imx072_set_pict_exp_gain(uint16_t gain, uint32_t line)
{
int32_t rc = 0;
rc = imx072_write_exp_gain(gain, line);
return rc;
}
static int32_t imx072_sensor_setting(int update_type, int rt)
{
int32_t rc = 0;
struct msm_camera_csi_params imx072_csi_params;
imx072_stop_stream();
msleep(30);
if (update_type == REG_INIT) {
msleep(20);
CSI_CONFIG = 0;
imx072_i2c_write_w_table(imx072_regs.rec_settings,
imx072_regs.rec_size);
} else if (update_type == UPDATE_PERIODIC) {
#ifdef CONFIG_DEBUG_FS
cam_debug_init();
#endif
msleep(20);
if (!CSI_CONFIG) {
imx072_csi_params.lane_cnt = 2;
imx072_csi_params.data_format = CSI_10BIT;
imx072_csi_params.lane_assign = 0xe4;
imx072_csi_params.dpcm_scheme = 0;
imx072_csi_params.settle_cnt = 0x18;
msm_camio_vfe_clk_rate_set(192000000);
rc = msm_camio_csi_config(&imx072_csi_params);
msleep(100);
CSI_CONFIG = 1;
}
imx072_i2c_write_w_table(
imx072_regs.conf_array[rt].conf,
imx072_regs.conf_array[rt].size);
imx072_start_stream();
msleep(30);
}
return rc;
}
static int32_t imx072_video_config(int mode)
{
int32_t rc = 0;
/* change sensor resolution if needed */
if (imx072_sensor_setting(UPDATE_PERIODIC,
imx072_ctrl->prev_res) < 0)
return rc;
imx072_ctrl->curr_res = imx072_ctrl->prev_res;
imx072_ctrl->sensormode = mode;
return rc;
}
static int32_t imx072_snapshot_config(int mode)
{
int32_t rc = 0;
/*change sensor resolution if needed */
if (imx072_ctrl->curr_res != imx072_ctrl->pict_res) {
if (imx072_sensor_setting(UPDATE_PERIODIC,
imx072_ctrl->pict_res) < 0)
return rc;
}
imx072_ctrl->curr_res = imx072_ctrl->pict_res;
imx072_ctrl->sensormode = mode;
return rc;
}
static int32_t imx072_raw_snapshot_config(int mode)
{
int32_t rc = 0;
/* change sensor resolution if needed */
if (imx072_ctrl->curr_res != imx072_ctrl->pict_res) {
if (imx072_sensor_setting(UPDATE_PERIODIC,
imx072_ctrl->pict_res) < 0)
return rc;
}
imx072_ctrl->curr_res = imx072_ctrl->pict_res;
imx072_ctrl->sensormode = mode;
return rc;
}
static int32_t imx072_mode_init(int mode, struct sensor_init_cfg init_info)
{
int32_t rc = 0;
CDBG("%s: %d\n", __func__, __LINE__);
if (mode != imx072_ctrl->cam_mode) {
imx072_ctrl->prev_res = init_info.prev_res;
imx072_ctrl->pict_res = init_info.pict_res;
imx072_ctrl->cam_mode = mode;
prev_frame_length_lines =
imx072_regs.conf_array[imx072_ctrl->prev_res].
conf[IMX072_FRAME_LENGTH_LINES_HI].wdata << 8 |
imx072_regs.conf_array[imx072_ctrl->prev_res].
conf[IMX072_FRAME_LENGTH_LINES_LO].wdata;
prev_line_length_pck =
imx072_regs.conf_array[imx072_ctrl->prev_res].
conf[IMX072_LINE_LENGTH_PCK_HI].wdata << 8 |
imx072_regs.conf_array[imx072_ctrl->prev_res].
conf[IMX072_LINE_LENGTH_PCK_LO].wdata;
snap_frame_length_lines =
imx072_regs.conf_array[imx072_ctrl->pict_res].
conf[IMX072_FRAME_LENGTH_LINES_HI].wdata << 8 |
imx072_regs.conf_array[imx072_ctrl->pict_res].
conf[IMX072_FRAME_LENGTH_LINES_LO].wdata;
snap_line_length_pck =
imx072_regs.conf_array[imx072_ctrl->pict_res].
conf[IMX072_LINE_LENGTH_PCK_HI].wdata << 8 |
imx072_regs.conf_array[imx072_ctrl->pict_res].
conf[IMX072_LINE_LENGTH_PCK_LO].wdata;
rc = imx072_sensor_setting(REG_INIT,
imx072_ctrl->prev_res);
}
return rc;
}
static int32_t imx072_set_sensor_mode(int mode,
int res)
{
int32_t rc = 0;
switch (mode) {
case SENSOR_PREVIEW_MODE:
imx072_ctrl->prev_res = res;
rc = imx072_video_config(mode);
break;
case SENSOR_SNAPSHOT_MODE:
imx072_ctrl->pict_res = res;
rc = imx072_snapshot_config(mode);
break;
case SENSOR_RAW_SNAPSHOT_MODE:
imx072_ctrl->pict_res = res;
rc = imx072_raw_snapshot_config(mode);
break;
default:
rc = -EINVAL;
break;
}
return rc;
}
#define DIV_CEIL(x, y) ((x/y + ((x%y) ? 1 : 0)))
static int32_t imx072_move_focus(int direction,
int32_t num_steps)
{
int32_t rc = 0;
int16_t step_direction, dest_lens_position, dest_step_position;
uint8_t code_val_msb, code_val_lsb;
int16_t next_lens_position, target_dist, small_step;
if (direction == MOVE_NEAR)
step_direction = 1;
else if (direction == MOVE_FAR)
step_direction = -1;
else {
pr_err("Illegal focus direction\n");
return -EINVAL;
}
dest_step_position = imx072_ctrl->curr_step_pos +
(step_direction * num_steps);
if (dest_step_position < 0)
dest_step_position = 0;
else if (dest_step_position > IMX072_TOTAL_STEPS_NEAR_TO_FAR)
dest_step_position = IMX072_TOTAL_STEPS_NEAR_TO_FAR;
if (dest_step_position == imx072_ctrl->curr_step_pos) {
CDBG("imx072 same position No-Move exit\n");
return rc;
}
CDBG("%s Index = [%d]\n", __func__, dest_step_position);
dest_lens_position = imx072_step_position_table[dest_step_position];
CDBG("%s lens_position value = %d\n", __func__, dest_lens_position);
target_dist = step_direction * (dest_lens_position -
imx072_ctrl->curr_lens_pos);
if (step_direction < 0 && (target_dist >=
(imx072_step_position_table[imx072_damping_threshold]
- imx072_af_initial_code))) {
small_step = DIV_CEIL(target_dist, 10);
imx072_sw_damping_time_wait = 30;
} else {
small_step = DIV_CEIL(target_dist, 4);
imx072_sw_damping_time_wait = 20;
}
CDBG("%s: small_step:%d, wait_time:%d\n", __func__, small_step,
imx072_sw_damping_time_wait);
for (next_lens_position = imx072_ctrl->curr_lens_pos +
(step_direction * small_step);
(step_direction * next_lens_position) <=
(step_direction * dest_lens_position);
next_lens_position += (step_direction * small_step)) {
code_val_msb = ((next_lens_position & 0x03F0) >> 4);
code_val_lsb = ((next_lens_position & 0x000F) << 4);
CDBG("position value = %d\n", next_lens_position);
CDBG("movefocus vcm_msb = %d\n", code_val_msb);
CDBG("movefocus vcm_lsb = %d\n", code_val_lsb);
rc = imx072_i2c_write_b_af(code_val_msb, code_val_lsb);
if (rc < 0) {
pr_err("imx072_move_focus failed writing i2c\n");
return rc;
}
imx072_ctrl->curr_lens_pos = next_lens_position;
usleep(imx072_sw_damping_time_wait*100);
}
if (imx072_ctrl->curr_lens_pos != dest_lens_position) {
code_val_msb = ((dest_lens_position & 0x03F0) >> 4);
code_val_lsb = ((dest_lens_position & 0x000F) << 4);
CDBG("position value = %d\n", dest_lens_position);
CDBG("movefocus vcm_msb = %d\n", code_val_msb);
CDBG("movefocus vcm_lsb = %d\n", code_val_lsb);
rc = imx072_i2c_write_b_af(code_val_msb, code_val_lsb);
if (rc < 0) {
pr_err("imx072_move_focus failed writing i2c\n");
return rc;
}
usleep(imx072_sw_damping_time_wait * 100);
}
imx072_ctrl->curr_lens_pos = dest_lens_position;
imx072_ctrl->curr_step_pos = dest_step_position;
return rc;
}
static int32_t imx072_init_focus(void)
{
uint8_t i;
int32_t rc = 0;
imx072_step_position_table[0] = imx072_af_initial_code;
for (i = 1; i <= IMX072_TOTAL_STEPS_NEAR_TO_FAR; i++) {
if (i <= imx072_nl_region_boundary1)
imx072_step_position_table[i] =
imx072_step_position_table[i-1]
+ imx072_nl_region_code_per_step1;
else
imx072_step_position_table[i] =
imx072_step_position_table[i-1]
+ imx072_l_region_code_per_step;
if (imx072_step_position_table[i] > 1023)
imx072_step_position_table[i] = 1023;
}
imx072_ctrl->curr_lens_pos = 0;
return rc;
}
static int32_t imx072_set_default_focus(void)
{
int32_t rc = 0;
uint8_t code_val_msb, code_val_lsb;
int16_t dest_lens_position = 0;
CDBG("%s Index = [%d]\n", __func__, 0);
if (imx072_ctrl->curr_step_pos != 0)
rc = imx072_move_focus(MOVE_FAR,
imx072_ctrl->curr_step_pos);
else {
dest_lens_position = imx072_af_initial_code;
code_val_msb = ((dest_lens_position & 0x03F0) >> 4);
code_val_lsb = ((dest_lens_position & 0x000F) << 4);
CDBG("position value = %d\n", dest_lens_position);
CDBG("movefocus vcm_msb = %d\n", code_val_msb);
CDBG("movefocus vcm_lsb = %d\n", code_val_lsb);
rc = imx072_i2c_write_b_af(code_val_msb, code_val_lsb);
if (rc < 0) {
pr_err("imx072_set_default_focus failed writing i2c\n");
return rc;
}
imx072_ctrl->curr_lens_pos = dest_lens_position;
imx072_ctrl->curr_step_pos = 0;
}
usleep(5000);
return rc;
}
static int32_t imx072_af_power_down(void)
{
int32_t rc = 0;
int32_t i = 0;
int16_t dest_lens_position = imx072_af_initial_code;
if (imx072_ctrl->curr_lens_pos != 0) {
rc = imx072_set_default_focus();
CDBG("%s after imx072_set_default_focus\n", __func__);
msleep(40);
/*to avoid the sound during the power off.
brings the actuator to mechanical infinity gradually.*/
for (i = 0; i < IMX072_TOTAL_STEPS_NEAR_TO_FAR; i++) {
dest_lens_position = dest_lens_position -
(imx072_af_initial_code /
IMX072_TOTAL_STEPS_NEAR_TO_FAR);
CDBG("position value = %d\n", dest_lens_position);
rc = imx072_i2c_write_b_af(
((dest_lens_position & 0x03F0) >> 4),
((dest_lens_position & 0x000F) << 4));
CDBG("count = %d\n", i);
msleep(20);
if (rc < 0) {
pr_err("imx072_set_default_focus failed writing i2c\n");
return rc;
}
}
rc = imx072_i2c_write_b_af(0x00, 00);
msleep(40);
}
rc = imx072_i2c_write_b_af(0x80, 00);
return rc;
}
static int32_t imx072_power_down(void)
{
int32_t rc = 0;
rc = imx072_af_power_down();
return rc;
}
static int imx072_probe_init_done(const struct msm_camera_sensor_info *data)
{
pr_err("probe done\n");
gpio_free(data->sensor_reset);
return 0;
}
static int imx072_probe_init_sensor(
const struct msm_camera_sensor_info *data)
{
int32_t rc = 0;
uint16_t chipid = 0;
CDBG("%s: %d\n", __func__, __LINE__);
rc = gpio_request(data->sensor_reset, "imx072");
CDBG(" imx072_probe_init_sensor\n");
if (!rc) {
pr_err("sensor_reset = %d\n", rc);
gpio_direction_output(data->sensor_reset, 0);
msleep(50);
gpio_set_value_cansleep(data->sensor_reset, 1);
msleep(20);
} else
goto gpio_req_fail;
CDBG(" imx072_probe_init_sensor is called\n");
rc = imx072_i2c_read(0x0, &chipid, 2);
CDBG("ID: %d\n", chipid);
/* 4. Compare sensor ID to IMX072 ID: */
if (chipid != 0x0045) {
rc = -ENODEV;
pr_err("imx072_probe_init_sensor chip id doesnot match\n");
goto init_probe_fail;
}
return rc;
init_probe_fail:
pr_err(" imx072_probe_init_sensor fails\n");
gpio_set_value_cansleep(data->sensor_reset, 0);
imx072_probe_init_done(data);
if (data->vcm_enable) {
int ret = gpio_request(data->vcm_pwd, "imx072_af");
if (!ret) {
gpio_direction_output(data->vcm_pwd, 0);
msleep(20);
gpio_free(data->vcm_pwd);
}
}
gpio_req_fail:
return rc;
}
int imx072_sensor_open_init(const struct msm_camera_sensor_info *data)
{
int32_t rc = 0;
CDBG("%s: %d\n", __func__, __LINE__);
imx072_ctrl = kzalloc(sizeof(struct imx072_ctrl_t), GFP_KERNEL);
if (!imx072_ctrl) {
pr_err("imx072_init failed!\n");
rc = -ENOMEM;
goto init_done;
}
imx072_ctrl->fps_divider = 1 * 0x00000400;
imx072_ctrl->pict_fps_divider = 1 * 0x00000400;
imx072_ctrl->set_test = TEST_OFF;
imx072_ctrl->cam_mode = MODE_INVALID;
if (data)
imx072_ctrl->sensordata = data;
if (rc < 0) {
pr_err("Calling imx072_sensor_open_init fail1\n");
return rc;
}
CDBG("%s: %d\n", __func__, __LINE__);
/* enable mclk first */
msm_camio_clk_rate_set(IMX072_MASTER_CLK_RATE);
rc = imx072_probe_init_sensor(data);
if (rc < 0)
goto init_fail;
imx072_init_focus();
imx072_ctrl->fps = 30*Q8;
if (rc < 0) {
gpio_set_value_cansleep(data->sensor_reset, 0);
goto init_fail;
} else
goto init_done;
init_fail:
pr_err("init_fail\n");
imx072_probe_init_done(data);
init_done:
pr_err("init_done\n");
return rc;
}
static int imx072_init_client(struct i2c_client *client)
{
/* Initialize the MSM_CAMI2C Chip */
init_waitqueue_head(&imx072_wait_queue);
return 0;
}
static const struct i2c_device_id imx072_i2c_id[] = {
{"imx072", 0},
{ }
};
static int imx072_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int rc = 0;
CDBG("imx072_probe called!\n");
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
pr_err("i2c_check_functionality failed\n");
goto probe_failure;
}
imx072_sensorw = kzalloc(sizeof(struct imx072_work_t),
GFP_KERNEL);
if (!imx072_sensorw) {
pr_err("kzalloc failed.\n");
rc = -ENOMEM;
goto probe_failure;
}
i2c_set_clientdata(client, imx072_sensorw);
imx072_init_client(client);
imx072_client = client;
msleep(50);
CDBG("imx072_probe successed! rc = %d\n", rc);
return 0;
probe_failure:
pr_err("imx072_probe failed! rc = %d\n", rc);
return rc;
}
static int imx072_send_wb_info(struct wb_info_cfg *wb)
{
return 0;
}
static int __exit imx072_remove(struct i2c_client *client)
{
struct imx072_work_t_t *sensorw = i2c_get_clientdata(client);
free_irq(client->irq, sensorw);
imx072_client = NULL;
kfree(sensorw);
return 0;
}
static struct i2c_driver imx072_i2c_driver = {
.id_table = imx072_i2c_id,
.probe = imx072_i2c_probe,
.remove = __exit_p(imx072_i2c_remove),
.driver = {
.name = "imx072",
},
};
int imx072_sensor_config(void __user *argp)
{
struct sensor_cfg_data cdata;
long rc = 0;
if (copy_from_user(&cdata,
(void *)argp,
sizeof(struct sensor_cfg_data)))
return -EFAULT;
mutex_lock(&imx072_mut);
CDBG("imx072_sensor_config: cfgtype = %d\n",
cdata.cfgtype);
switch (cdata.cfgtype) {
case CFG_GET_PICT_FPS:
imx072_get_pict_fps(
cdata.cfg.gfps.prevfps,
&(cdata.cfg.gfps.pictfps));
if (copy_to_user((void *)argp,
&cdata,
sizeof(struct sensor_cfg_data)))
rc = -EFAULT;
break;
case CFG_GET_PREV_L_PF:
cdata.cfg.prevl_pf =
imx072_get_prev_lines_pf();
if (copy_to_user((void *)argp,
&cdata,
sizeof(struct sensor_cfg_data)))
rc = -EFAULT;
break;
case CFG_GET_PREV_P_PL:
cdata.cfg.prevp_pl =
imx072_get_prev_pixels_pl();
if (copy_to_user((void *)argp,
&cdata,
sizeof(struct sensor_cfg_data)))
rc = -EFAULT;
break;
case CFG_GET_PICT_L_PF:
cdata.cfg.pictl_pf =
imx072_get_pict_lines_pf();
if (copy_to_user((void *)argp,
&cdata,
sizeof(struct sensor_cfg_data)))
rc = -EFAULT;
break;
case CFG_GET_PICT_P_PL:
cdata.cfg.pictp_pl =
imx072_get_pict_pixels_pl();
if (copy_to_user((void *)argp,
&cdata,
sizeof(struct sensor_cfg_data)))
rc = -EFAULT;
break;
case CFG_GET_PICT_MAX_EXP_LC:
cdata.cfg.pict_max_exp_lc =
imx072_get_pict_max_exp_lc();
if (copy_to_user((void *)argp,
&cdata,
sizeof(struct sensor_cfg_data)))
rc = -EFAULT;
break;
case CFG_SET_FPS:
case CFG_SET_PICT_FPS:
rc = imx072_set_fps(&(cdata.cfg.fps));
break;
case CFG_SET_EXP_GAIN:
rc = imx072_write_exp_gain(
cdata.cfg.exp_gain.gain,
cdata.cfg.exp_gain.line);
break;
case CFG_SET_PICT_EXP_GAIN:
rc = imx072_set_pict_exp_gain(
cdata.cfg.exp_gain.gain,
cdata.cfg.exp_gain.line);
break;
case CFG_SET_MODE:
rc = imx072_set_sensor_mode(cdata.mode, cdata.rs);
break;
case CFG_PWR_DOWN:
rc = imx072_power_down();
break;
case CFG_MOVE_FOCUS:
rc = imx072_move_focus(cdata.cfg.focus.dir,
cdata.cfg.focus.steps);
break;
case CFG_SET_DEFAULT_FOCUS:
imx072_set_default_focus();
break;
case CFG_GET_AF_MAX_STEPS:
cdata.max_steps = IMX072_TOTAL_STEPS_NEAR_TO_FAR;
if (copy_to_user((void *)argp,
&cdata,
sizeof(struct sensor_cfg_data)))
rc = -EFAULT;
break;
case CFG_SET_EFFECT:
break;
case CFG_SEND_WB_INFO:
rc = imx072_send_wb_info(
&(cdata.cfg.wb_info));
break;
case CFG_SENSOR_INIT:
rc = imx072_mode_init(cdata.mode,
cdata.cfg.init_info);
break;
case CFG_SET_LENS_SHADING:
break;
default:
rc = -EFAULT;
break;
}
mutex_unlock(&imx072_mut);
return rc;
}
static int imx072_sensor_release(void)
{
int rc = -EBADF;
mutex_lock(&imx072_mut);
imx072_power_down();
gpio_set_value_cansleep(imx072_ctrl->sensordata->sensor_reset, 0);
msleep(20);
gpio_free(imx072_ctrl->sensordata->sensor_reset);
if (imx072_ctrl->sensordata->vcm_enable) {
gpio_set_value_cansleep(imx072_ctrl->sensordata->vcm_pwd, 0);
gpio_free(imx072_ctrl->sensordata->vcm_pwd);
}
kfree(imx072_ctrl);
imx072_ctrl = NULL;
pr_err("imx072_release completed\n");
mutex_unlock(&imx072_mut);
return rc;
}
static int imx072_sensor_probe(const struct msm_camera_sensor_info *info,
struct msm_sensor_ctrl *s)
{
int rc = 0;
rc = i2c_add_driver(&imx072_i2c_driver);
if (rc < 0 || imx072_client == NULL) {
rc = -ENOTSUPP;
pr_err("I2C add driver failed");
goto probe_fail;
}
msm_camio_clk_rate_set(IMX072_MASTER_CLK_RATE);
rc = imx072_probe_init_sensor(info);
if (rc < 0)
goto probe_fail;
s->s_init = imx072_sensor_open_init;
s->s_release = imx072_sensor_release;
s->s_config = imx072_sensor_config;
s->s_mount_angle = info->sensor_platform_info->mount_angle;
gpio_set_value_cansleep(info->sensor_reset, 0);
imx072_probe_init_done(info);
if (info->vcm_enable) {
rc = gpio_request(info->vcm_pwd, "imx072_af");
if (!rc) {
gpio_direction_output(info->vcm_pwd, 0);
msleep(20);
gpio_free(info->vcm_pwd);
} else
return rc;
}
pr_info("imx072_sensor_probe : SUCCESS\n");
return rc;
probe_fail:
pr_err("imx072_sensor_probe: SENSOR PROBE FAILS!\n");
return rc;
}
static int __imx072_probe(struct platform_device *pdev)
{
return msm_camera_drv_start(pdev, imx072_sensor_probe);
}
static struct platform_driver msm_camera_driver = {
.probe = __imx072_probe,
.driver = {
.name = "msm_camera_imx072",
.owner = THIS_MODULE,
},
};
static int __init imx072_init(void)
{
return platform_driver_register(&msm_camera_driver);
}
module_init(imx072_init);
void imx072_exit(void)
{
i2c_del_driver(&imx072_i2c_driver);
}
MODULE_DESCRIPTION("Aptina 8 MP Bayer sensor driver");
MODULE_LICENSE("GPL v2");
#ifdef CONFIG_DEBUG_FS
static bool streaming = 1;
static int cam_debug_stream_set(void *data, u64 val)
{
int rc = 0;
if (val) {
imx072_start_stream();
streaming = 1;
} else {
imx072_stop_stream();
streaming = 0;
}
return rc;
}
static int cam_debug_stream_get(void *data, u64 *val)
{
*val = streaming;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(cam_stream, cam_debug_stream_get,
cam_debug_stream_set, "%llu\n");
static int imx072_set_af_codestep(void *data, u64 val)
{
imx072_l_region_code_per_step = val;
imx072_init_focus();
return 0;
}
static int imx072_get_af_codestep(void *data, u64 *val)
{
*val = imx072_l_region_code_per_step;
return 0;
}
static uint16_t imx072_linear_total_step = IMX072_TOTAL_STEPS_NEAR_TO_FAR;
static int imx072_set_linear_total_step(void *data, u64 val)
{
imx072_linear_total_step = val;
return 0;
}
static int imx072_af_linearity_test(void *data, u64 *val)
{
int i = 0;
imx072_set_default_focus();
msleep(3000);
for (i = 0; i < imx072_linear_total_step; i++) {
imx072_move_focus(MOVE_NEAR, 1);
CDBG("moved to index =[%d]\n", i);
msleep(1000);
}
for (i = 0; i < imx072_linear_total_step; i++) {
imx072_move_focus(MOVE_FAR, 1);
CDBG("moved to index =[%d]\n", i);
msleep(1000);
}
return 0;
}
static uint16_t imx072_step_val = IMX072_TOTAL_STEPS_NEAR_TO_FAR;
static uint8_t imx072_step_dir = MOVE_NEAR;
static int imx072_af_step_config(void *data, u64 val)
{
imx072_step_val = val & 0xFFFF;
imx072_step_dir = (val >> 16) & 0x1;
return 0;
}
static int imx072_af_step(void *data, u64 *val)
{
int i = 0;
int dir = MOVE_NEAR;
imx072_set_default_focus();
msleep(3000);
if (imx072_step_dir == 1)
dir = MOVE_FAR;
for (i = 0; i < imx072_step_val; i += 4) {
imx072_move_focus(dir, 4);
msleep(1000);
}
imx072_set_default_focus();
msleep(3000);
return 0;
}
static int imx072_af_set_resolution(void *data, u64 val)
{
imx072_init_focus();
return 0;
}
static int imx072_af_get_resolution(void *data, u64 *val)
{
*val = 0xFF;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(af_codeperstep, imx072_get_af_codestep,
imx072_set_af_codestep, "%llu\n");
DEFINE_SIMPLE_ATTRIBUTE(af_linear, imx072_af_linearity_test,
imx072_set_linear_total_step, "%llu\n");
DEFINE_SIMPLE_ATTRIBUTE(af_step, imx072_af_step,
imx072_af_step_config, "%llu\n");
DEFINE_SIMPLE_ATTRIBUTE(af_step_res, imx072_af_get_resolution,
imx072_af_set_resolution, "%llu\n");
static int cam_debug_init(void)
{
struct dentry *cam_dir;
debugfs_base = debugfs_create_dir("sensor", NULL);
if (!debugfs_base)
return -ENOMEM;
cam_dir = debugfs_create_dir("imx072", debugfs_base);
if (!cam_dir)
return -ENOMEM;
if (!debugfs_create_file("stream", S_IRUGO | S_IWUSR, cam_dir,
NULL, &cam_stream))
return -ENOMEM;
if (!debugfs_create_file("af_codeperstep", S_IRUGO | S_IWUSR, cam_dir,
NULL, &af_codeperstep))
return -ENOMEM;
if (!debugfs_create_file("af_linear", S_IRUGO | S_IWUSR, cam_dir,
NULL, &af_linear))
return -ENOMEM;
if (!debugfs_create_file("af_step", S_IRUGO | S_IWUSR, cam_dir,
NULL, &af_step))
return -ENOMEM;
if (!debugfs_create_file("af_step_res", S_IRUGO | S_IWUSR, cam_dir,
NULL, &af_step_res))
return -ENOMEM;
return 0;
}
#endif
| gpl-2.0 |
davepmer/test-kernel | drivers/input/joystick/as5011.c | 3043 | 9361 | /*
* Copyright (c) 2010, 2011 Fabien Marteau <fabien.marteau@armadeus.com>
* Sponsored by ARMadeus Systems
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Driver for Austria Microsystems joysticks AS5011
*
* TODO:
* - Power on the chip when open() and power down when close()
* - Manage power mode
*/
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/input.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/input/as5011.h>
#include <linux/slab.h>
#define DRIVER_DESC "Driver for Austria Microsystems AS5011 joystick"
#define MODULE_DEVICE_ALIAS "as5011"
MODULE_AUTHOR("Fabien Marteau <fabien.marteau@armadeus.com>");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
/* registers */
#define AS5011_CTRL1 0x76
#define AS5011_CTRL2 0x75
#define AS5011_XP 0x43
#define AS5011_XN 0x44
#define AS5011_YP 0x53
#define AS5011_YN 0x54
#define AS5011_X_REG 0x41
#define AS5011_Y_REG 0x42
#define AS5011_X_RES_INT 0x51
#define AS5011_Y_RES_INT 0x52
/* CTRL1 bits */
#define AS5011_CTRL1_LP_PULSED 0x80
#define AS5011_CTRL1_LP_ACTIVE 0x40
#define AS5011_CTRL1_LP_CONTINUE 0x20
#define AS5011_CTRL1_INT_WUP_EN 0x10
#define AS5011_CTRL1_INT_ACT_EN 0x08
#define AS5011_CTRL1_EXT_CLK_EN 0x04
#define AS5011_CTRL1_SOFT_RST 0x02
#define AS5011_CTRL1_DATA_VALID 0x01
/* CTRL2 bits */
#define AS5011_CTRL2_EXT_SAMPLE_EN 0x08
#define AS5011_CTRL2_RC_BIAS_ON 0x04
#define AS5011_CTRL2_INV_SPINNING 0x02
#define AS5011_MAX_AXIS 80
#define AS5011_MIN_AXIS (-80)
#define AS5011_FUZZ 8
#define AS5011_FLAT 40
struct as5011_device {
struct input_dev *input_dev;
struct i2c_client *i2c_client;
unsigned int button_gpio;
unsigned int button_irq;
unsigned int axis_irq;
};
static int as5011_i2c_write(struct i2c_client *client,
uint8_t aregaddr,
uint8_t avalue)
{
uint8_t data[2] = { aregaddr, avalue };
struct i2c_msg msg = {
client->addr, I2C_M_IGNORE_NAK, 2, (uint8_t *)data
};
int error;
error = i2c_transfer(client->adapter, &msg, 1);
return error < 0 ? error : 0;
}
static int as5011_i2c_read(struct i2c_client *client,
uint8_t aregaddr, signed char *value)
{
uint8_t data[2] = { aregaddr };
struct i2c_msg msg_set[2] = {
{ client->addr, I2C_M_REV_DIR_ADDR, 1, (uint8_t *)data },
{ client->addr, I2C_M_RD | I2C_M_NOSTART, 1, (uint8_t *)data }
};
int error;
error = i2c_transfer(client->adapter, msg_set, 2);
if (error < 0)
return error;
*value = data[0] & 0x80 ? -1 * (1 + ~data[0]) : data[0];
return 0;
}
static irqreturn_t as5011_button_interrupt(int irq, void *dev_id)
{
struct as5011_device *as5011 = dev_id;
int val = gpio_get_value_cansleep(as5011->button_gpio);
input_report_key(as5011->input_dev, BTN_JOYSTICK, !val);
input_sync(as5011->input_dev);
return IRQ_HANDLED;
}
static irqreturn_t as5011_axis_interrupt(int irq, void *dev_id)
{
struct as5011_device *as5011 = dev_id;
int error;
signed char x, y;
error = as5011_i2c_read(as5011->i2c_client, AS5011_X_RES_INT, &x);
if (error < 0)
goto out;
error = as5011_i2c_read(as5011->i2c_client, AS5011_Y_RES_INT, &y);
if (error < 0)
goto out;
input_report_abs(as5011->input_dev, ABS_X, x);
input_report_abs(as5011->input_dev, ABS_Y, y);
input_sync(as5011->input_dev);
out:
return IRQ_HANDLED;
}
static int __devinit as5011_configure_chip(struct as5011_device *as5011,
const struct as5011_platform_data *plat_dat)
{
struct i2c_client *client = as5011->i2c_client;
int error;
signed char value;
/* chip soft reset */
error = as5011_i2c_write(client, AS5011_CTRL1,
AS5011_CTRL1_SOFT_RST);
if (error < 0) {
dev_err(&client->dev, "Soft reset failed\n");
return error;
}
mdelay(10);
error = as5011_i2c_write(client, AS5011_CTRL1,
AS5011_CTRL1_LP_PULSED |
AS5011_CTRL1_LP_ACTIVE |
AS5011_CTRL1_INT_ACT_EN);
if (error < 0) {
dev_err(&client->dev, "Power config failed\n");
return error;
}
error = as5011_i2c_write(client, AS5011_CTRL2,
AS5011_CTRL2_INV_SPINNING);
if (error < 0) {
dev_err(&client->dev, "Can't invert spinning\n");
return error;
}
/* write threshold */
error = as5011_i2c_write(client, AS5011_XP, plat_dat->xp);
if (error < 0) {
dev_err(&client->dev, "Can't write threshold\n");
return error;
}
error = as5011_i2c_write(client, AS5011_XN, plat_dat->xn);
if (error < 0) {
dev_err(&client->dev, "Can't write threshold\n");
return error;
}
error = as5011_i2c_write(client, AS5011_YP, plat_dat->yp);
if (error < 0) {
dev_err(&client->dev, "Can't write threshold\n");
return error;
}
error = as5011_i2c_write(client, AS5011_YN, plat_dat->yn);
if (error < 0) {
dev_err(&client->dev, "Can't write threshold\n");
return error;
}
/* to free irq gpio in chip */
error = as5011_i2c_read(client, AS5011_X_RES_INT, &value);
if (error < 0) {
dev_err(&client->dev, "Can't read i2c X resolution value\n");
return error;
}
return 0;
}
static int __devinit as5011_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const struct as5011_platform_data *plat_data;
struct as5011_device *as5011;
struct input_dev *input_dev;
int irq;
int error;
plat_data = client->dev.platform_data;
if (!plat_data)
return -EINVAL;
if (!plat_data->axis_irq) {
dev_err(&client->dev, "No axis IRQ?\n");
return -EINVAL;
}
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_PROTOCOL_MANGLING)) {
dev_err(&client->dev,
"need i2c bus that supports protocol mangling\n");
return -ENODEV;
}
as5011 = kmalloc(sizeof(struct as5011_device), GFP_KERNEL);
input_dev = input_allocate_device();
if (!as5011 || !input_dev) {
dev_err(&client->dev,
"Can't allocate memory for device structure\n");
error = -ENOMEM;
goto err_free_mem;
}
as5011->i2c_client = client;
as5011->input_dev = input_dev;
as5011->button_gpio = plat_data->button_gpio;
as5011->axis_irq = plat_data->axis_irq;
input_dev->name = "Austria Microsystem as5011 joystick";
input_dev->id.bustype = BUS_I2C;
input_dev->dev.parent = &client->dev;
__set_bit(EV_KEY, input_dev->evbit);
__set_bit(EV_ABS, input_dev->evbit);
__set_bit(BTN_JOYSTICK, input_dev->keybit);
input_set_abs_params(input_dev, ABS_X,
AS5011_MIN_AXIS, AS5011_MAX_AXIS, AS5011_FUZZ, AS5011_FLAT);
input_set_abs_params(as5011->input_dev, ABS_Y,
AS5011_MIN_AXIS, AS5011_MAX_AXIS, AS5011_FUZZ, AS5011_FLAT);
error = gpio_request(as5011->button_gpio, "AS5011 button");
if (error < 0) {
dev_err(&client->dev, "Failed to request button gpio\n");
goto err_free_mem;
}
irq = gpio_to_irq(as5011->button_gpio);
if (irq < 0) {
dev_err(&client->dev,
"Failed to get irq number for button gpio\n");
goto err_free_button_gpio;
}
as5011->button_irq = irq;
error = request_threaded_irq(as5011->button_irq,
NULL, as5011_button_interrupt,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
"as5011_button", as5011);
if (error < 0) {
dev_err(&client->dev,
"Can't allocate button irq %d\n", as5011->button_irq);
goto err_free_button_gpio;
}
error = as5011_configure_chip(as5011, plat_data);
if (error)
goto err_free_button_irq;
error = request_threaded_irq(as5011->axis_irq, NULL,
as5011_axis_interrupt,
plat_data->axis_irqflags,
"as5011_joystick", as5011);
if (error) {
dev_err(&client->dev,
"Can't allocate axis irq %d\n", plat_data->axis_irq);
goto err_free_button_irq;
}
error = input_register_device(as5011->input_dev);
if (error) {
dev_err(&client->dev, "Failed to register input device\n");
goto err_free_axis_irq;
}
i2c_set_clientdata(client, as5011);
return 0;
err_free_axis_irq:
free_irq(as5011->axis_irq, as5011);
err_free_button_irq:
free_irq(as5011->button_irq, as5011);
err_free_button_gpio:
gpio_free(as5011->button_gpio);
err_free_mem:
input_free_device(input_dev);
kfree(as5011);
return error;
}
static int __devexit as5011_remove(struct i2c_client *client)
{
struct as5011_device *as5011 = i2c_get_clientdata(client);
free_irq(as5011->axis_irq, as5011);
free_irq(as5011->button_irq, as5011);
gpio_free(as5011->button_gpio);
input_unregister_device(as5011->input_dev);
kfree(as5011);
return 0;
}
static const struct i2c_device_id as5011_id[] = {
{ MODULE_DEVICE_ALIAS, 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, as5011_id);
static struct i2c_driver as5011_driver = {
.driver = {
.name = "as5011",
},
.probe = as5011_probe,
.remove = __devexit_p(as5011_remove),
.id_table = as5011_id,
};
static int __init as5011_init(void)
{
return i2c_add_driver(&as5011_driver);
}
module_init(as5011_init);
static void __exit as5011_exit(void)
{
i2c_del_driver(&as5011_driver);
}
module_exit(as5011_exit);
| gpl-2.0 |
VeryLettuce/LG_F120K_Kernel | fs/readdir.c | 4323 | 7025 | /*
* linux/fs/readdir.c
*
* Copyright (C) 1995 Linus Torvalds
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/time.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/stat.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/dirent.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/unistd.h>
#include <asm/uaccess.h>
int vfs_readdir(struct file *file, filldir_t filler, void *buf)
{
struct inode *inode = file->f_path.dentry->d_inode;
int res = -ENOTDIR;
if (!file->f_op || !file->f_op->readdir)
goto out;
res = security_file_permission(file, MAY_READ);
if (res)
goto out;
res = mutex_lock_killable(&inode->i_mutex);
if (res)
goto out;
res = -ENOENT;
if (!IS_DEADDIR(inode)) {
res = file->f_op->readdir(file, buf, filler);
file_accessed(file);
}
mutex_unlock(&inode->i_mutex);
out:
return res;
}
EXPORT_SYMBOL(vfs_readdir);
/*
* Traditional linux readdir() handling..
*
* "count=1" is a special case, meaning that the buffer is one
* dirent-structure in size and that the code can't handle more
* anyway. Thus the special "fillonedir()" function for that
* case (the low-level handlers don't need to care about this).
*/
#ifdef __ARCH_WANT_OLD_READDIR
struct old_linux_dirent {
unsigned long d_ino;
unsigned long d_offset;
unsigned short d_namlen;
char d_name[1];
};
struct readdir_callback {
struct old_linux_dirent __user * dirent;
int result;
};
static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset,
u64 ino, unsigned int d_type)
{
struct readdir_callback * buf = (struct readdir_callback *) __buf;
struct old_linux_dirent __user * dirent;
unsigned long d_ino;
if (buf->result)
return -EINVAL;
d_ino = ino;
if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) {
buf->result = -EOVERFLOW;
return -EOVERFLOW;
}
buf->result++;
dirent = buf->dirent;
if (!access_ok(VERIFY_WRITE, dirent,
(unsigned long)(dirent->d_name + namlen + 1) -
(unsigned long)dirent))
goto efault;
if ( __put_user(d_ino, &dirent->d_ino) ||
__put_user(offset, &dirent->d_offset) ||
__put_user(namlen, &dirent->d_namlen) ||
__copy_to_user(dirent->d_name, name, namlen) ||
__put_user(0, dirent->d_name + namlen))
goto efault;
return 0;
efault:
buf->result = -EFAULT;
return -EFAULT;
}
SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
struct old_linux_dirent __user *, dirent, unsigned int, count)
{
int error;
struct file * file;
struct readdir_callback buf;
error = -EBADF;
file = fget(fd);
if (!file)
goto out;
buf.result = 0;
buf.dirent = dirent;
error = vfs_readdir(file, fillonedir, &buf);
if (buf.result)
error = buf.result;
fput(file);
out:
return error;
}
#endif /* __ARCH_WANT_OLD_READDIR */
/*
* New, all-improved, singing, dancing, iBCS2-compliant getdents()
* interface.
*/
struct linux_dirent {
unsigned long d_ino;
unsigned long d_off;
unsigned short d_reclen;
char d_name[1];
};
struct getdents_callback {
struct linux_dirent __user * current_dir;
struct linux_dirent __user * previous;
int count;
int error;
};
static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
u64 ino, unsigned int d_type)
{
struct linux_dirent __user * dirent;
struct getdents_callback * buf = (struct getdents_callback *) __buf;
unsigned long d_ino;
int reclen = ALIGN(offsetof(struct linux_dirent, d_name) + namlen + 2,
sizeof(long));
buf->error = -EINVAL; /* only used if we fail.. */
if (reclen > buf->count)
return -EINVAL;
d_ino = ino;
if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) {
buf->error = -EOVERFLOW;
return -EOVERFLOW;
}
dirent = buf->previous;
if (dirent) {
if (__put_user(offset, &dirent->d_off))
goto efault;
}
dirent = buf->current_dir;
if (__put_user(d_ino, &dirent->d_ino))
goto efault;
if (__put_user(reclen, &dirent->d_reclen))
goto efault;
if (copy_to_user(dirent->d_name, name, namlen))
goto efault;
if (__put_user(0, dirent->d_name + namlen))
goto efault;
if (__put_user(d_type, (char __user *) dirent + reclen - 1))
goto efault;
buf->previous = dirent;
dirent = (void __user *)dirent + reclen;
buf->current_dir = dirent;
buf->count -= reclen;
return 0;
efault:
buf->error = -EFAULT;
return -EFAULT;
}
SYSCALL_DEFINE3(getdents, unsigned int, fd,
struct linux_dirent __user *, dirent, unsigned int, count)
{
struct file * file;
struct linux_dirent __user * lastdirent;
struct getdents_callback buf;
int error;
error = -EFAULT;
if (!access_ok(VERIFY_WRITE, dirent, count))
goto out;
error = -EBADF;
file = fget(fd);
if (!file)
goto out;
buf.current_dir = dirent;
buf.previous = NULL;
buf.count = count;
buf.error = 0;
error = vfs_readdir(file, filldir, &buf);
if (error >= 0)
error = buf.error;
lastdirent = buf.previous;
if (lastdirent) {
if (put_user(file->f_pos, &lastdirent->d_off))
error = -EFAULT;
else
error = count - buf.count;
}
fput(file);
out:
return error;
}
struct getdents_callback64 {
struct linux_dirent64 __user * current_dir;
struct linux_dirent64 __user * previous;
int count;
int error;
};
static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
u64 ino, unsigned int d_type)
{
struct linux_dirent64 __user *dirent;
struct getdents_callback64 * buf = (struct getdents_callback64 *) __buf;
int reclen = ALIGN(offsetof(struct linux_dirent64, d_name) + namlen + 1,
sizeof(u64));
buf->error = -EINVAL; /* only used if we fail.. */
if (reclen > buf->count)
return -EINVAL;
dirent = buf->previous;
if (dirent) {
if (__put_user(offset, &dirent->d_off))
goto efault;
}
dirent = buf->current_dir;
if (__put_user(ino, &dirent->d_ino))
goto efault;
if (__put_user(0, &dirent->d_off))
goto efault;
if (__put_user(reclen, &dirent->d_reclen))
goto efault;
if (__put_user(d_type, &dirent->d_type))
goto efault;
if (copy_to_user(dirent->d_name, name, namlen))
goto efault;
if (__put_user(0, dirent->d_name + namlen))
goto efault;
buf->previous = dirent;
dirent = (void __user *)dirent + reclen;
buf->current_dir = dirent;
buf->count -= reclen;
return 0;
efault:
buf->error = -EFAULT;
return -EFAULT;
}
SYSCALL_DEFINE3(getdents64, unsigned int, fd,
struct linux_dirent64 __user *, dirent, unsigned int, count)
{
struct file * file;
struct linux_dirent64 __user * lastdirent;
struct getdents_callback64 buf;
int error;
error = -EFAULT;
if (!access_ok(VERIFY_WRITE, dirent, count))
goto out;
error = -EBADF;
file = fget(fd);
if (!file)
goto out;
buf.current_dir = dirent;
buf.previous = NULL;
buf.count = count;
buf.error = 0;
error = vfs_readdir(file, filldir64, &buf);
if (error >= 0)
error = buf.error;
lastdirent = buf.previous;
if (lastdirent) {
typeof(lastdirent->d_off) d_off = file->f_pos;
if (__put_user(d_off, &lastdirent->d_off))
error = -EFAULT;
else
error = count - buf.count;
}
fput(file);
out:
return error;
}
| gpl-2.0 |
Tkkg1994/IronKernel | fs/btrfs/file.c | 4579 | 49328 | /*
* Copyright (C) 2007 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/time.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/backing-dev.h>
#include <linux/mpage.h>
#include <linux/falloc.h>
#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/statfs.h>
#include <linux/compat.h>
#include <linux/slab.h>
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
#include "btrfs_inode.h"
#include "ioctl.h"
#include "print-tree.h"
#include "tree-log.h"
#include "locking.h"
#include "compat.h"
/*
* when auto defrag is enabled we
* queue up these defrag structs to remember which
* inodes need defragging passes
*/
struct inode_defrag {
struct rb_node rb_node;
/* objectid */
u64 ino;
/*
* transid where the defrag was added, we search for
* extents newer than this
*/
u64 transid;
/* root objectid */
u64 root;
/* last offset we were able to defrag */
u64 last_offset;
/* if we've wrapped around back to zero once already */
int cycled;
};
/* pop a record for an inode into the defrag tree. The lock
* must be held already
*
* If you're inserting a record for an older transid than an
* existing record, the transid already in the tree is lowered
*
* If an existing record is found the defrag item you
* pass in is freed
*/
static void __btrfs_add_inode_defrag(struct inode *inode,
struct inode_defrag *defrag)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct inode_defrag *entry;
struct rb_node **p;
struct rb_node *parent = NULL;
p = &root->fs_info->defrag_inodes.rb_node;
while (*p) {
parent = *p;
entry = rb_entry(parent, struct inode_defrag, rb_node);
if (defrag->ino < entry->ino)
p = &parent->rb_left;
else if (defrag->ino > entry->ino)
p = &parent->rb_right;
else {
/* if we're reinserting an entry for
* an old defrag run, make sure to
* lower the transid of our existing record
*/
if (defrag->transid < entry->transid)
entry->transid = defrag->transid;
if (defrag->last_offset > entry->last_offset)
entry->last_offset = defrag->last_offset;
goto exists;
}
}
BTRFS_I(inode)->in_defrag = 1;
rb_link_node(&defrag->rb_node, parent, p);
rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
return;
exists:
kfree(defrag);
return;
}
/*
* insert a defrag record for this inode if auto defrag is
* enabled
*/
int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct inode_defrag *defrag;
u64 transid;
if (!btrfs_test_opt(root, AUTO_DEFRAG))
return 0;
if (btrfs_fs_closing(root->fs_info))
return 0;
if (BTRFS_I(inode)->in_defrag)
return 0;
if (trans)
transid = trans->transid;
else
transid = BTRFS_I(inode)->root->last_trans;
defrag = kzalloc(sizeof(*defrag), GFP_NOFS);
if (!defrag)
return -ENOMEM;
defrag->ino = btrfs_ino(inode);
defrag->transid = transid;
defrag->root = root->root_key.objectid;
spin_lock(&root->fs_info->defrag_inodes_lock);
if (!BTRFS_I(inode)->in_defrag)
__btrfs_add_inode_defrag(inode, defrag);
else
kfree(defrag);
spin_unlock(&root->fs_info->defrag_inodes_lock);
return 0;
}
/*
* must be called with the defrag_inodes lock held
*/
struct inode_defrag *btrfs_find_defrag_inode(struct btrfs_fs_info *info, u64 ino,
struct rb_node **next)
{
struct inode_defrag *entry = NULL;
struct rb_node *p;
struct rb_node *parent = NULL;
p = info->defrag_inodes.rb_node;
while (p) {
parent = p;
entry = rb_entry(parent, struct inode_defrag, rb_node);
if (ino < entry->ino)
p = parent->rb_left;
else if (ino > entry->ino)
p = parent->rb_right;
else
return entry;
}
if (next) {
while (parent && ino > entry->ino) {
parent = rb_next(parent);
entry = rb_entry(parent, struct inode_defrag, rb_node);
}
*next = parent;
}
return NULL;
}
/*
* run through the list of inodes in the FS that need
* defragging
*/
int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
{
struct inode_defrag *defrag;
struct btrfs_root *inode_root;
struct inode *inode;
struct rb_node *n;
struct btrfs_key key;
struct btrfs_ioctl_defrag_range_args range;
u64 first_ino = 0;
int num_defrag;
int defrag_batch = 1024;
memset(&range, 0, sizeof(range));
range.len = (u64)-1;
atomic_inc(&fs_info->defrag_running);
spin_lock(&fs_info->defrag_inodes_lock);
while(1) {
n = NULL;
/* find an inode to defrag */
defrag = btrfs_find_defrag_inode(fs_info, first_ino, &n);
if (!defrag) {
if (n)
defrag = rb_entry(n, struct inode_defrag, rb_node);
else if (first_ino) {
first_ino = 0;
continue;
} else {
break;
}
}
/* remove it from the rbtree */
first_ino = defrag->ino + 1;
rb_erase(&defrag->rb_node, &fs_info->defrag_inodes);
if (btrfs_fs_closing(fs_info))
goto next_free;
spin_unlock(&fs_info->defrag_inodes_lock);
/* get the inode */
key.objectid = defrag->root;
btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
key.offset = (u64)-1;
inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
if (IS_ERR(inode_root))
goto next;
key.objectid = defrag->ino;
btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
key.offset = 0;
inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
if (IS_ERR(inode))
goto next;
/* do a chunk of defrag */
BTRFS_I(inode)->in_defrag = 0;
range.start = defrag->last_offset;
num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
defrag_batch);
/*
* if we filled the whole defrag batch, there
* must be more work to do. Queue this defrag
* again
*/
if (num_defrag == defrag_batch) {
defrag->last_offset = range.start;
__btrfs_add_inode_defrag(inode, defrag);
/*
* we don't want to kfree defrag, we added it back to
* the rbtree
*/
defrag = NULL;
} else if (defrag->last_offset && !defrag->cycled) {
/*
* we didn't fill our defrag batch, but
* we didn't start at zero. Make sure we loop
* around to the start of the file.
*/
defrag->last_offset = 0;
defrag->cycled = 1;
__btrfs_add_inode_defrag(inode, defrag);
defrag = NULL;
}
iput(inode);
next:
spin_lock(&fs_info->defrag_inodes_lock);
next_free:
kfree(defrag);
}
spin_unlock(&fs_info->defrag_inodes_lock);
atomic_dec(&fs_info->defrag_running);
/*
* during unmount, we use the transaction_wait queue to
* wait for the defragger to stop
*/
wake_up(&fs_info->transaction_wait);
return 0;
}
/* simple helper to fault in pages and copy. This should go away
* and be replaced with calls into generic code.
*/
static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
size_t write_bytes,
struct page **prepared_pages,
struct iov_iter *i)
{
size_t copied = 0;
size_t total_copied = 0;
int pg = 0;
int offset = pos & (PAGE_CACHE_SIZE - 1);
while (write_bytes > 0) {
size_t count = min_t(size_t,
PAGE_CACHE_SIZE - offset, write_bytes);
struct page *page = prepared_pages[pg];
/*
* Copy data from userspace to the current page
*
* Disable pagefault to avoid recursive lock since
* the pages are already locked
*/
pagefault_disable();
copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
pagefault_enable();
/* Flush processor's dcache for this page */
flush_dcache_page(page);
/*
* if we get a partial write, we can end up with
* partially up to date pages. These add
* a lot of complexity, so make sure they don't
* happen by forcing this copy to be retried.
*
* The rest of the btrfs_file_write code will fall
* back to page at a time copies after we return 0.
*/
if (!PageUptodate(page) && copied < count)
copied = 0;
iov_iter_advance(i, copied);
write_bytes -= copied;
total_copied += copied;
/* Return to btrfs_file_aio_write to fault page */
if (unlikely(copied == 0))
break;
if (unlikely(copied < PAGE_CACHE_SIZE - offset)) {
offset += copied;
} else {
pg++;
offset = 0;
}
}
return total_copied;
}
/*
* unlocks pages after btrfs_file_write is done with them
*/
void btrfs_drop_pages(struct page **pages, size_t num_pages)
{
size_t i;
for (i = 0; i < num_pages; i++) {
/* page checked is some magic around finding pages that
* have been modified without going through btrfs_set_page_dirty
* clear it here
*/
ClearPageChecked(pages[i]);
unlock_page(pages[i]);
mark_page_accessed(pages[i]);
page_cache_release(pages[i]);
}
}
/*
* after copy_from_user, pages need to be dirtied and we need to make
* sure holes are created between the current EOF and the start of
* any next extents (if required).
*
* this also makes the decision about creating an inline extent vs
* doing real data extents, marking pages dirty and delalloc as required.
*/
int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
struct page **pages, size_t num_pages,
loff_t pos, size_t write_bytes,
struct extent_state **cached)
{
int err = 0;
int i;
u64 num_bytes;
u64 start_pos;
u64 end_of_last_block;
u64 end_pos = pos + write_bytes;
loff_t isize = i_size_read(inode);
start_pos = pos & ~((u64)root->sectorsize - 1);
num_bytes = (write_bytes + pos - start_pos +
root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
end_of_last_block = start_pos + num_bytes - 1;
err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
cached);
if (err)
return err;
for (i = 0; i < num_pages; i++) {
struct page *p = pages[i];
SetPageUptodate(p);
ClearPageChecked(p);
set_page_dirty(p);
}
/*
* we've only changed i_size in ram, and we haven't updated
* the disk i_size. There is no need to log the inode
* at this time.
*/
if (end_pos > isize)
i_size_write(inode, end_pos);
return 0;
}
/*
* this drops all the extents in the cache that intersect the range
* [start, end]. Existing extents are split as required.
*/
int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
int skip_pinned)
{
struct extent_map *em;
struct extent_map *split = NULL;
struct extent_map *split2 = NULL;
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
u64 len = end - start + 1;
int ret;
int testend = 1;
unsigned long flags;
int compressed = 0;
WARN_ON(end < start);
if (end == (u64)-1) {
len = (u64)-1;
testend = 0;
}
while (1) {
if (!split)
split = alloc_extent_map();
if (!split2)
split2 = alloc_extent_map();
BUG_ON(!split || !split2); /* -ENOMEM */
write_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, start, len);
if (!em) {
write_unlock(&em_tree->lock);
break;
}
flags = em->flags;
if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
if (testend && em->start + em->len >= start + len) {
free_extent_map(em);
write_unlock(&em_tree->lock);
break;
}
start = em->start + em->len;
if (testend)
len = start + len - (em->start + em->len);
free_extent_map(em);
write_unlock(&em_tree->lock);
continue;
}
compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
clear_bit(EXTENT_FLAG_PINNED, &em->flags);
remove_extent_mapping(em_tree, em);
if (em->block_start < EXTENT_MAP_LAST_BYTE &&
em->start < start) {
split->start = em->start;
split->len = start - em->start;
split->orig_start = em->orig_start;
split->block_start = em->block_start;
if (compressed)
split->block_len = em->block_len;
else
split->block_len = split->len;
split->bdev = em->bdev;
split->flags = flags;
split->compress_type = em->compress_type;
ret = add_extent_mapping(em_tree, split);
BUG_ON(ret); /* Logic error */
free_extent_map(split);
split = split2;
split2 = NULL;
}
if (em->block_start < EXTENT_MAP_LAST_BYTE &&
testend && em->start + em->len > start + len) {
u64 diff = start + len - em->start;
split->start = start + len;
split->len = em->start + em->len - (start + len);
split->bdev = em->bdev;
split->flags = flags;
split->compress_type = em->compress_type;
if (compressed) {
split->block_len = em->block_len;
split->block_start = em->block_start;
split->orig_start = em->orig_start;
} else {
split->block_len = split->len;
split->block_start = em->block_start + diff;
split->orig_start = split->start;
}
ret = add_extent_mapping(em_tree, split);
BUG_ON(ret); /* Logic error */
free_extent_map(split);
split = NULL;
}
write_unlock(&em_tree->lock);
/* once for us */
free_extent_map(em);
/* once for the tree*/
free_extent_map(em);
}
if (split)
free_extent_map(split);
if (split2)
free_extent_map(split2);
return 0;
}
/*
* this is very complex, but the basic idea is to drop all extents
* in the range start - end. hint_block is filled in with a block number
* that would be a good hint to the block allocator for this file.
*
* If an extent intersects the range but is not entirely inside the range
* it is either truncated or split. Anything entirely inside the range
* is deleted from the tree.
*/
int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
u64 start, u64 end, u64 *hint_byte, int drop_cache)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_buffer *leaf;
struct btrfs_file_extent_item *fi;
struct btrfs_path *path;
struct btrfs_key key;
struct btrfs_key new_key;
u64 ino = btrfs_ino(inode);
u64 search_start = start;
u64 disk_bytenr = 0;
u64 num_bytes = 0;
u64 extent_offset = 0;
u64 extent_end = 0;
int del_nr = 0;
int del_slot = 0;
int extent_type;
int recow;
int ret;
int modify_tree = -1;
if (drop_cache)
btrfs_drop_extent_cache(inode, start, end - 1, 0);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
if (start >= BTRFS_I(inode)->disk_i_size)
modify_tree = 0;
while (1) {
recow = 0;
ret = btrfs_lookup_file_extent(trans, root, path, ino,
search_start, modify_tree);
if (ret < 0)
break;
if (ret > 0 && path->slots[0] > 0 && search_start == start) {
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
if (key.objectid == ino &&
key.type == BTRFS_EXTENT_DATA_KEY)
path->slots[0]--;
}
ret = 0;
next_slot:
leaf = path->nodes[0];
if (path->slots[0] >= btrfs_header_nritems(leaf)) {
BUG_ON(del_nr > 0);
ret = btrfs_next_leaf(root, path);
if (ret < 0)
break;
if (ret > 0) {
ret = 0;
break;
}
leaf = path->nodes[0];
recow = 1;
}
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
if (key.objectid > ino ||
key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
break;
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
extent_type = btrfs_file_extent_type(leaf, fi);
if (extent_type == BTRFS_FILE_EXTENT_REG ||
extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
extent_offset = btrfs_file_extent_offset(leaf, fi);
extent_end = key.offset +
btrfs_file_extent_num_bytes(leaf, fi);
} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
extent_end = key.offset +
btrfs_file_extent_inline_len(leaf, fi);
} else {
WARN_ON(1);
extent_end = search_start;
}
if (extent_end <= search_start) {
path->slots[0]++;
goto next_slot;
}
search_start = max(key.offset, start);
if (recow || !modify_tree) {
modify_tree = -1;
btrfs_release_path(path);
continue;
}
/*
* | - range to drop - |
* | -------- extent -------- |
*/
if (start > key.offset && end < extent_end) {
BUG_ON(del_nr > 0);
BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
memcpy(&new_key, &key, sizeof(new_key));
new_key.offset = start;
ret = btrfs_duplicate_item(trans, root, path,
&new_key);
if (ret == -EAGAIN) {
btrfs_release_path(path);
continue;
}
if (ret < 0)
break;
leaf = path->nodes[0];
fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
struct btrfs_file_extent_item);
btrfs_set_file_extent_num_bytes(leaf, fi,
start - key.offset);
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
extent_offset += start - key.offset;
btrfs_set_file_extent_offset(leaf, fi, extent_offset);
btrfs_set_file_extent_num_bytes(leaf, fi,
extent_end - start);
btrfs_mark_buffer_dirty(leaf);
if (disk_bytenr > 0) {
ret = btrfs_inc_extent_ref(trans, root,
disk_bytenr, num_bytes, 0,
root->root_key.objectid,
new_key.objectid,
start - extent_offset, 0);
BUG_ON(ret); /* -ENOMEM */
*hint_byte = disk_bytenr;
}
key.offset = start;
}
/*
* | ---- range to drop ----- |
* | -------- extent -------- |
*/
if (start <= key.offset && end < extent_end) {
BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
memcpy(&new_key, &key, sizeof(new_key));
new_key.offset = end;
btrfs_set_item_key_safe(trans, root, path, &new_key);
extent_offset += end - key.offset;
btrfs_set_file_extent_offset(leaf, fi, extent_offset);
btrfs_set_file_extent_num_bytes(leaf, fi,
extent_end - end);
btrfs_mark_buffer_dirty(leaf);
if (disk_bytenr > 0) {
inode_sub_bytes(inode, end - key.offset);
*hint_byte = disk_bytenr;
}
break;
}
search_start = extent_end;
/*
* | ---- range to drop ----- |
* | -------- extent -------- |
*/
if (start > key.offset && end >= extent_end) {
BUG_ON(del_nr > 0);
BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
btrfs_set_file_extent_num_bytes(leaf, fi,
start - key.offset);
btrfs_mark_buffer_dirty(leaf);
if (disk_bytenr > 0) {
inode_sub_bytes(inode, extent_end - start);
*hint_byte = disk_bytenr;
}
if (end == extent_end)
break;
path->slots[0]++;
goto next_slot;
}
/*
* | ---- range to drop ----- |
* | ------ extent ------ |
*/
if (start <= key.offset && end >= extent_end) {
if (del_nr == 0) {
del_slot = path->slots[0];
del_nr = 1;
} else {
BUG_ON(del_slot + del_nr != path->slots[0]);
del_nr++;
}
if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
inode_sub_bytes(inode,
extent_end - key.offset);
extent_end = ALIGN(extent_end,
root->sectorsize);
} else if (disk_bytenr > 0) {
ret = btrfs_free_extent(trans, root,
disk_bytenr, num_bytes, 0,
root->root_key.objectid,
key.objectid, key.offset -
extent_offset, 0);
BUG_ON(ret); /* -ENOMEM */
inode_sub_bytes(inode,
extent_end - key.offset);
*hint_byte = disk_bytenr;
}
if (end == extent_end)
break;
if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
path->slots[0]++;
goto next_slot;
}
ret = btrfs_del_items(trans, root, path, del_slot,
del_nr);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
goto out;
}
del_nr = 0;
del_slot = 0;
btrfs_release_path(path);
continue;
}
BUG_ON(1);
}
if (!ret && del_nr > 0) {
ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
if (ret)
btrfs_abort_transaction(trans, root, ret);
}
out:
btrfs_free_path(path);
return ret;
}
static int extent_mergeable(struct extent_buffer *leaf, int slot,
u64 objectid, u64 bytenr, u64 orig_offset,
u64 *start, u64 *end)
{
struct btrfs_file_extent_item *fi;
struct btrfs_key key;
u64 extent_end;
if (slot < 0 || slot >= btrfs_header_nritems(leaf))
return 0;
btrfs_item_key_to_cpu(leaf, &key, slot);
if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
return 0;
fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
btrfs_file_extent_compression(leaf, fi) ||
btrfs_file_extent_encryption(leaf, fi) ||
btrfs_file_extent_other_encoding(leaf, fi))
return 0;
extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
if ((*start && *start != key.offset) || (*end && *end != extent_end))
return 0;
*start = key.offset;
*end = extent_end;
return 1;
}
/*
* Mark extent in the range start - end as written.
*
* This changes extent type from 'pre-allocated' to 'regular'. If only
* part of extent is marked as written, the extent will be split into
* two or three.
*/
int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
struct inode *inode, u64 start, u64 end)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_buffer *leaf;
struct btrfs_path *path;
struct btrfs_file_extent_item *fi;
struct btrfs_key key;
struct btrfs_key new_key;
u64 bytenr;
u64 num_bytes;
u64 extent_end;
u64 orig_offset;
u64 other_start;
u64 other_end;
u64 split;
int del_nr = 0;
int del_slot = 0;
int recow;
int ret;
u64 ino = btrfs_ino(inode);
btrfs_drop_extent_cache(inode, start, end - 1, 0);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
again:
recow = 0;
split = start;
key.objectid = ino;
key.type = BTRFS_EXTENT_DATA_KEY;
key.offset = split;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret < 0)
goto out;
if (ret > 0 && path->slots[0] > 0)
path->slots[0]--;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY);
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
BUG_ON(btrfs_file_extent_type(leaf, fi) !=
BTRFS_FILE_EXTENT_PREALLOC);
extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
BUG_ON(key.offset > start || extent_end < end);
bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
memcpy(&new_key, &key, sizeof(new_key));
if (start == key.offset && end < extent_end) {
other_start = 0;
other_end = start;
if (extent_mergeable(leaf, path->slots[0] - 1,
ino, bytenr, orig_offset,
&other_start, &other_end)) {
new_key.offset = end;
btrfs_set_item_key_safe(trans, root, path, &new_key);
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
btrfs_set_file_extent_num_bytes(leaf, fi,
extent_end - end);
btrfs_set_file_extent_offset(leaf, fi,
end - orig_offset);
fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
struct btrfs_file_extent_item);
btrfs_set_file_extent_num_bytes(leaf, fi,
end - other_start);
btrfs_mark_buffer_dirty(leaf);
goto out;
}
}
if (start > key.offset && end == extent_end) {
other_start = end;
other_end = 0;
if (extent_mergeable(leaf, path->slots[0] + 1,
ino, bytenr, orig_offset,
&other_start, &other_end)) {
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
btrfs_set_file_extent_num_bytes(leaf, fi,
start - key.offset);
path->slots[0]++;
new_key.offset = start;
btrfs_set_item_key_safe(trans, root, path, &new_key);
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
btrfs_set_file_extent_num_bytes(leaf, fi,
other_end - start);
btrfs_set_file_extent_offset(leaf, fi,
start - orig_offset);
btrfs_mark_buffer_dirty(leaf);
goto out;
}
}
while (start > key.offset || end < extent_end) {
if (key.offset == start)
split = end;
new_key.offset = split;
ret = btrfs_duplicate_item(trans, root, path, &new_key);
if (ret == -EAGAIN) {
btrfs_release_path(path);
goto again;
}
if (ret < 0) {
btrfs_abort_transaction(trans, root, ret);
goto out;
}
leaf = path->nodes[0];
fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
struct btrfs_file_extent_item);
btrfs_set_file_extent_num_bytes(leaf, fi,
split - key.offset);
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
btrfs_set_file_extent_num_bytes(leaf, fi,
extent_end - split);
btrfs_mark_buffer_dirty(leaf);
ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
root->root_key.objectid,
ino, orig_offset, 0);
BUG_ON(ret); /* -ENOMEM */
if (split == start) {
key.offset = start;
} else {
BUG_ON(start != key.offset);
path->slots[0]--;
extent_end = end;
}
recow = 1;
}
other_start = end;
other_end = 0;
if (extent_mergeable(leaf, path->slots[0] + 1,
ino, bytenr, orig_offset,
&other_start, &other_end)) {
if (recow) {
btrfs_release_path(path);
goto again;
}
extent_end = other_end;
del_slot = path->slots[0] + 1;
del_nr++;
ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
0, root->root_key.objectid,
ino, orig_offset, 0);
BUG_ON(ret); /* -ENOMEM */
}
other_start = 0;
other_end = start;
if (extent_mergeable(leaf, path->slots[0] - 1,
ino, bytenr, orig_offset,
&other_start, &other_end)) {
if (recow) {
btrfs_release_path(path);
goto again;
}
key.offset = other_start;
del_slot = path->slots[0];
del_nr++;
ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
0, root->root_key.objectid,
ino, orig_offset, 0);
BUG_ON(ret); /* -ENOMEM */
}
if (del_nr == 0) {
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
btrfs_set_file_extent_type(leaf, fi,
BTRFS_FILE_EXTENT_REG);
btrfs_mark_buffer_dirty(leaf);
} else {
fi = btrfs_item_ptr(leaf, del_slot - 1,
struct btrfs_file_extent_item);
btrfs_set_file_extent_type(leaf, fi,
BTRFS_FILE_EXTENT_REG);
btrfs_set_file_extent_num_bytes(leaf, fi,
extent_end - key.offset);
btrfs_mark_buffer_dirty(leaf);
ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
if (ret < 0) {
btrfs_abort_transaction(trans, root, ret);
goto out;
}
}
out:
btrfs_free_path(path);
return 0;
}
/*
* on error we return an unlocked page and the error value
* on success we return a locked page and 0
*/
static int prepare_uptodate_page(struct page *page, u64 pos,
bool force_uptodate)
{
int ret = 0;
if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) &&
!PageUptodate(page)) {
ret = btrfs_readpage(NULL, page);
if (ret)
return ret;
lock_page(page);
if (!PageUptodate(page)) {
unlock_page(page);
return -EIO;
}
}
return 0;
}
/*
* this gets pages into the page cache and locks them down, it also properly
* waits for data=ordered extents to finish before allowing the pages to be
* modified.
*/
static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
struct page **pages, size_t num_pages,
loff_t pos, unsigned long first_index,
size_t write_bytes, bool force_uptodate)
{
struct extent_state *cached_state = NULL;
int i;
unsigned long index = pos >> PAGE_CACHE_SHIFT;
struct inode *inode = fdentry(file)->d_inode;
gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
int err = 0;
int faili = 0;
u64 start_pos;
u64 last_pos;
start_pos = pos & ~((u64)root->sectorsize - 1);
last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
again:
for (i = 0; i < num_pages; i++) {
pages[i] = find_or_create_page(inode->i_mapping, index + i,
mask | __GFP_WRITE);
if (!pages[i]) {
faili = i - 1;
err = -ENOMEM;
goto fail;
}
if (i == 0)
err = prepare_uptodate_page(pages[i], pos,
force_uptodate);
if (i == num_pages - 1)
err = prepare_uptodate_page(pages[i],
pos + write_bytes, false);
if (err) {
page_cache_release(pages[i]);
faili = i - 1;
goto fail;
}
wait_on_page_writeback(pages[i]);
}
err = 0;
if (start_pos < inode->i_size) {
struct btrfs_ordered_extent *ordered;
lock_extent_bits(&BTRFS_I(inode)->io_tree,
start_pos, last_pos - 1, 0, &cached_state);
ordered = btrfs_lookup_first_ordered_extent(inode,
last_pos - 1);
if (ordered &&
ordered->file_offset + ordered->len > start_pos &&
ordered->file_offset < last_pos) {
btrfs_put_ordered_extent(ordered);
unlock_extent_cached(&BTRFS_I(inode)->io_tree,
start_pos, last_pos - 1,
&cached_state, GFP_NOFS);
for (i = 0; i < num_pages; i++) {
unlock_page(pages[i]);
page_cache_release(pages[i]);
}
btrfs_wait_ordered_range(inode, start_pos,
last_pos - start_pos);
goto again;
}
if (ordered)
btrfs_put_ordered_extent(ordered);
clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING, 0, 0, &cached_state,
GFP_NOFS);
unlock_extent_cached(&BTRFS_I(inode)->io_tree,
start_pos, last_pos - 1, &cached_state,
GFP_NOFS);
}
for (i = 0; i < num_pages; i++) {
if (clear_page_dirty_for_io(pages[i]))
account_page_redirty(pages[i]);
set_page_extent_mapped(pages[i]);
WARN_ON(!PageLocked(pages[i]));
}
return 0;
fail:
while (faili >= 0) {
unlock_page(pages[faili]);
page_cache_release(pages[faili]);
faili--;
}
return err;
}
static noinline ssize_t __btrfs_buffered_write(struct file *file,
struct iov_iter *i,
loff_t pos)
{
struct inode *inode = fdentry(file)->d_inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct page **pages = NULL;
unsigned long first_index;
size_t num_written = 0;
int nrptrs;
int ret = 0;
bool force_page_uptodate = false;
nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
(sizeof(struct page *)));
nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
nrptrs = max(nrptrs, 8);
pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
if (!pages)
return -ENOMEM;
first_index = pos >> PAGE_CACHE_SHIFT;
while (iov_iter_count(i) > 0) {
size_t offset = pos & (PAGE_CACHE_SIZE - 1);
size_t write_bytes = min(iov_iter_count(i),
nrptrs * (size_t)PAGE_CACHE_SIZE -
offset);
size_t num_pages = (write_bytes + offset +
PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
size_t dirty_pages;
size_t copied;
WARN_ON(num_pages > nrptrs);
/*
* Fault pages before locking them in prepare_pages
* to avoid recursive lock
*/
if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
ret = -EFAULT;
break;
}
ret = btrfs_delalloc_reserve_space(inode,
num_pages << PAGE_CACHE_SHIFT);
if (ret)
break;
/*
* This is going to setup the pages array with the number of
* pages we want, so we don't really need to worry about the
* contents of pages from loop to loop
*/
ret = prepare_pages(root, file, pages, num_pages,
pos, first_index, write_bytes,
force_page_uptodate);
if (ret) {
btrfs_delalloc_release_space(inode,
num_pages << PAGE_CACHE_SHIFT);
break;
}
copied = btrfs_copy_from_user(pos, num_pages,
write_bytes, pages, i);
/*
* if we have trouble faulting in the pages, fall
* back to one page at a time
*/
if (copied < write_bytes)
nrptrs = 1;
if (copied == 0) {
force_page_uptodate = true;
dirty_pages = 0;
} else {
force_page_uptodate = false;
dirty_pages = (copied + offset +
PAGE_CACHE_SIZE - 1) >>
PAGE_CACHE_SHIFT;
}
/*
* If we had a short copy we need to release the excess delaloc
* bytes we reserved. We need to increment outstanding_extents
* because btrfs_delalloc_release_space will decrement it, but
* we still have an outstanding extent for the chunk we actually
* managed to copy.
*/
if (num_pages > dirty_pages) {
if (copied > 0) {
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents++;
spin_unlock(&BTRFS_I(inode)->lock);
}
btrfs_delalloc_release_space(inode,
(num_pages - dirty_pages) <<
PAGE_CACHE_SHIFT);
}
if (copied > 0) {
ret = btrfs_dirty_pages(root, inode, pages,
dirty_pages, pos, copied,
NULL);
if (ret) {
btrfs_delalloc_release_space(inode,
dirty_pages << PAGE_CACHE_SHIFT);
btrfs_drop_pages(pages, num_pages);
break;
}
}
btrfs_drop_pages(pages, num_pages);
cond_resched();
balance_dirty_pages_ratelimited_nr(inode->i_mapping,
dirty_pages);
if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
btrfs_btree_balance_dirty(root, 1);
pos += copied;
num_written += copied;
}
kfree(pages);
return num_written ? num_written : ret;
}
static ssize_t __btrfs_direct_write(struct kiocb *iocb,
const struct iovec *iov,
unsigned long nr_segs, loff_t pos,
loff_t *ppos, size_t count, size_t ocount)
{
struct file *file = iocb->ki_filp;
struct inode *inode = fdentry(file)->d_inode;
struct iov_iter i;
ssize_t written;
ssize_t written_buffered;
loff_t endbyte;
int err;
written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos,
count, ocount);
/*
* the generic O_DIRECT will update in-memory i_size after the
* DIOs are done. But our endio handlers that update the on
* disk i_size never update past the in memory i_size. So we
* need one more update here to catch any additions to the
* file
*/
if (inode->i_size != BTRFS_I(inode)->disk_i_size) {
btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
mark_inode_dirty(inode);
}
if (written < 0 || written == count)
return written;
pos += written;
count -= written;
iov_iter_init(&i, iov, nr_segs, count, written);
written_buffered = __btrfs_buffered_write(file, &i, pos);
if (written_buffered < 0) {
err = written_buffered;
goto out;
}
endbyte = pos + written_buffered - 1;
err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
if (err)
goto out;
written += written_buffered;
*ppos = pos + written_buffered;
invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT,
endbyte >> PAGE_CACHE_SHIFT);
out:
return written ? written : err;
}
static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
struct file *file = iocb->ki_filp;
struct inode *inode = fdentry(file)->d_inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
loff_t *ppos = &iocb->ki_pos;
u64 start_pos;
ssize_t num_written = 0;
ssize_t err = 0;
size_t count, ocount;
vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
mutex_lock(&inode->i_mutex);
err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
if (err) {
mutex_unlock(&inode->i_mutex);
goto out;
}
count = ocount;
current->backing_dev_info = inode->i_mapping->backing_dev_info;
err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
if (err) {
mutex_unlock(&inode->i_mutex);
goto out;
}
if (count == 0) {
mutex_unlock(&inode->i_mutex);
goto out;
}
err = file_remove_suid(file);
if (err) {
mutex_unlock(&inode->i_mutex);
goto out;
}
/*
* If BTRFS flips readonly due to some impossible error
* (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
* although we have opened a file as writable, we have
* to stop this write operation to ensure FS consistency.
*/
if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
mutex_unlock(&inode->i_mutex);
err = -EROFS;
goto out;
}
err = btrfs_update_time(file);
if (err) {
mutex_unlock(&inode->i_mutex);
goto out;
}
BTRFS_I(inode)->sequence++;
start_pos = round_down(pos, root->sectorsize);
if (start_pos > i_size_read(inode)) {
err = btrfs_cont_expand(inode, i_size_read(inode), start_pos);
if (err) {
mutex_unlock(&inode->i_mutex);
goto out;
}
}
if (unlikely(file->f_flags & O_DIRECT)) {
num_written = __btrfs_direct_write(iocb, iov, nr_segs,
pos, ppos, count, ocount);
} else {
struct iov_iter i;
iov_iter_init(&i, iov, nr_segs, count, num_written);
num_written = __btrfs_buffered_write(file, &i, pos);
if (num_written > 0)
*ppos = pos + num_written;
}
mutex_unlock(&inode->i_mutex);
/*
* we want to make sure fsync finds this change
* but we haven't joined a transaction running right now.
*
* Later on, someone is sure to update the inode and get the
* real transid recorded.
*
* We set last_trans now to the fs_info generation + 1,
* this will either be one more than the running transaction
* or the generation used for the next transaction if there isn't
* one running right now.
*/
BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
if (num_written > 0 || num_written == -EIOCBQUEUED) {
err = generic_write_sync(file, pos, num_written);
if (err < 0 && num_written > 0)
num_written = err;
}
out:
current->backing_dev_info = NULL;
return num_written ? num_written : err;
}
int btrfs_release_file(struct inode *inode, struct file *filp)
{
/*
* ordered_data_close is set by settattr when we are about to truncate
* a file from a non-zero size to a zero size. This tries to
* flush down new bytes that may have been written if the
* application were using truncate to replace a file in place.
*/
if (BTRFS_I(inode)->ordered_data_close) {
BTRFS_I(inode)->ordered_data_close = 0;
btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode);
if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
filemap_flush(inode->i_mapping);
}
if (filp->private_data)
btrfs_ioctl_trans_end(filp);
return 0;
}
/*
* fsync call for both files and directories. This logs the inode into
* the tree log instead of forcing full commits whenever possible.
*
* It needs to call filemap_fdatawait so that all ordered extent updates are
* in the metadata btree are up to date for copying to the log.
*
* It drops the inode mutex before doing the tree log commit. This is an
* important optimization for directories because holding the mutex prevents
* new operations on the dir while we write to disk.
*/
int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
{
struct dentry *dentry = file->f_path.dentry;
struct inode *inode = dentry->d_inode;
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret = 0;
struct btrfs_trans_handle *trans;
trace_btrfs_sync_file(file, datasync);
ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
if (ret)
return ret;
mutex_lock(&inode->i_mutex);
/* we wait first, since the writeback may change the inode */
root->log_batch++;
btrfs_wait_ordered_range(inode, 0, (u64)-1);
root->log_batch++;
/*
* check the transaction that last modified this inode
* and see if its already been committed
*/
if (!BTRFS_I(inode)->last_trans) {
mutex_unlock(&inode->i_mutex);
goto out;
}
/*
* if the last transaction that changed this file was before
* the current transaction, we can bail out now without any
* syncing
*/
smp_mb();
if (BTRFS_I(inode)->last_trans <=
root->fs_info->last_trans_committed) {
BTRFS_I(inode)->last_trans = 0;
mutex_unlock(&inode->i_mutex);
goto out;
}
/*
* ok we haven't committed the transaction yet, lets do a commit
*/
if (file->private_data)
btrfs_ioctl_trans_end(file);
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
mutex_unlock(&inode->i_mutex);
goto out;
}
ret = btrfs_log_dentry_safe(trans, root, dentry);
if (ret < 0) {
mutex_unlock(&inode->i_mutex);
goto out;
}
/* we've logged all the items and now have a consistent
* version of the file in the log. It is possible that
* someone will come in and modify the file, but that's
* fine because the log is consistent on disk, and we
* have references to all of the file's extents
*
* It is possible that someone will come in and log the
* file again, but that will end up using the synchronization
* inside btrfs_sync_log to keep things safe.
*/
mutex_unlock(&inode->i_mutex);
if (ret != BTRFS_NO_LOG_SYNC) {
if (ret > 0) {
ret = btrfs_commit_transaction(trans, root);
} else {
ret = btrfs_sync_log(trans, root);
if (ret == 0)
ret = btrfs_end_transaction(trans, root);
else
ret = btrfs_commit_transaction(trans, root);
}
} else {
ret = btrfs_end_transaction(trans, root);
}
out:
return ret > 0 ? -EIO : ret;
}
static const struct vm_operations_struct btrfs_file_vm_ops = {
.fault = filemap_fault,
.page_mkwrite = btrfs_page_mkwrite,
};
static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct address_space *mapping = filp->f_mapping;
if (!mapping->a_ops->readpage)
return -ENOEXEC;
file_accessed(filp);
vma->vm_ops = &btrfs_file_vm_ops;
vma->vm_flags |= VM_CAN_NONLINEAR;
return 0;
}
static long btrfs_fallocate(struct file *file, int mode,
loff_t offset, loff_t len)
{
struct inode *inode = file->f_path.dentry->d_inode;
struct extent_state *cached_state = NULL;
u64 cur_offset;
u64 last_byte;
u64 alloc_start;
u64 alloc_end;
u64 alloc_hint = 0;
u64 locked_end;
u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
struct extent_map *em;
int ret;
alloc_start = offset & ~mask;
alloc_end = (offset + len + mask) & ~mask;
/* We only support the FALLOC_FL_KEEP_SIZE mode */
if (mode & ~FALLOC_FL_KEEP_SIZE)
return -EOPNOTSUPP;
/*
* Make sure we have enough space before we do the
* allocation.
*/
ret = btrfs_check_data_free_space(inode, len);
if (ret)
return ret;
/*
* wait for ordered IO before we have any locks. We'll loop again
* below with the locks held.
*/
btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
mutex_lock(&inode->i_mutex);
ret = inode_newsize_ok(inode, alloc_end);
if (ret)
goto out;
if (alloc_start > inode->i_size) {
ret = btrfs_cont_expand(inode, i_size_read(inode),
alloc_start);
if (ret)
goto out;
}
locked_end = alloc_end - 1;
while (1) {
struct btrfs_ordered_extent *ordered;
/* the extent lock is ordered inside the running
* transaction
*/
lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
locked_end, 0, &cached_state);
ordered = btrfs_lookup_first_ordered_extent(inode,
alloc_end - 1);
if (ordered &&
ordered->file_offset + ordered->len > alloc_start &&
ordered->file_offset < alloc_end) {
btrfs_put_ordered_extent(ordered);
unlock_extent_cached(&BTRFS_I(inode)->io_tree,
alloc_start, locked_end,
&cached_state, GFP_NOFS);
/*
* we can't wait on the range with the transaction
* running or with the extent lock held
*/
btrfs_wait_ordered_range(inode, alloc_start,
alloc_end - alloc_start);
} else {
if (ordered)
btrfs_put_ordered_extent(ordered);
break;
}
}
cur_offset = alloc_start;
while (1) {
u64 actual_end;
em = btrfs_get_extent(inode, NULL, 0, cur_offset,
alloc_end - cur_offset, 0);
if (IS_ERR_OR_NULL(em)) {
if (!em)
ret = -ENOMEM;
else
ret = PTR_ERR(em);
break;
}
last_byte = min(extent_map_end(em), alloc_end);
actual_end = min_t(u64, extent_map_end(em), offset + len);
last_byte = (last_byte + mask) & ~mask;
if (em->block_start == EXTENT_MAP_HOLE ||
(cur_offset >= inode->i_size &&
!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
ret = btrfs_prealloc_file_range(inode, mode, cur_offset,
last_byte - cur_offset,
1 << inode->i_blkbits,
offset + len,
&alloc_hint);
if (ret < 0) {
free_extent_map(em);
break;
}
} else if (actual_end > inode->i_size &&
!(mode & FALLOC_FL_KEEP_SIZE)) {
/*
* We didn't need to allocate any more space, but we
* still extended the size of the file so we need to
* update i_size.
*/
inode->i_ctime = CURRENT_TIME;
i_size_write(inode, actual_end);
btrfs_ordered_update_i_size(inode, actual_end, NULL);
}
free_extent_map(em);
cur_offset = last_byte;
if (cur_offset >= alloc_end) {
ret = 0;
break;
}
}
unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
&cached_state, GFP_NOFS);
out:
mutex_unlock(&inode->i_mutex);
/* Let go of our reservation. */
btrfs_free_reserved_data_space(inode, len);
return ret;
}
static int find_desired_extent(struct inode *inode, loff_t *offset, int origin)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_map *em;
struct extent_state *cached_state = NULL;
u64 lockstart = *offset;
u64 lockend = i_size_read(inode);
u64 start = *offset;
u64 orig_start = *offset;
u64 len = i_size_read(inode);
u64 last_end = 0;
int ret = 0;
lockend = max_t(u64, root->sectorsize, lockend);
if (lockend <= lockstart)
lockend = lockstart + root->sectorsize;
len = lockend - lockstart + 1;
len = max_t(u64, len, root->sectorsize);
if (inode->i_size == 0)
return -ENXIO;
lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0,
&cached_state);
/*
* Delalloc is such a pain. If we have a hole and we have pending
* delalloc for a portion of the hole we will get back a hole that
* exists for the entire range since it hasn't been actually written
* yet. So to take care of this case we need to look for an extent just
* before the position we want in case there is outstanding delalloc
* going on here.
*/
if (origin == SEEK_HOLE && start != 0) {
if (start <= root->sectorsize)
em = btrfs_get_extent_fiemap(inode, NULL, 0, 0,
root->sectorsize, 0);
else
em = btrfs_get_extent_fiemap(inode, NULL, 0,
start - root->sectorsize,
root->sectorsize, 0);
if (IS_ERR(em)) {
ret = PTR_ERR(em);
goto out;
}
last_end = em->start + em->len;
if (em->block_start == EXTENT_MAP_DELALLOC)
last_end = min_t(u64, last_end, inode->i_size);
free_extent_map(em);
}
while (1) {
em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0);
if (IS_ERR(em)) {
ret = PTR_ERR(em);
break;
}
if (em->block_start == EXTENT_MAP_HOLE) {
if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
if (last_end <= orig_start) {
free_extent_map(em);
ret = -ENXIO;
break;
}
}
if (origin == SEEK_HOLE) {
*offset = start;
free_extent_map(em);
break;
}
} else {
if (origin == SEEK_DATA) {
if (em->block_start == EXTENT_MAP_DELALLOC) {
if (start >= inode->i_size) {
free_extent_map(em);
ret = -ENXIO;
break;
}
}
*offset = start;
free_extent_map(em);
break;
}
}
start = em->start + em->len;
last_end = em->start + em->len;
if (em->block_start == EXTENT_MAP_DELALLOC)
last_end = min_t(u64, last_end, inode->i_size);
if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
free_extent_map(em);
ret = -ENXIO;
break;
}
free_extent_map(em);
cond_resched();
}
if (!ret)
*offset = min(*offset, inode->i_size);
out:
unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
&cached_state, GFP_NOFS);
return ret;
}
static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int origin)
{
struct inode *inode = file->f_mapping->host;
int ret;
mutex_lock(&inode->i_mutex);
switch (origin) {
case SEEK_END:
case SEEK_CUR:
offset = generic_file_llseek(file, offset, origin);
goto out;
case SEEK_DATA:
case SEEK_HOLE:
if (offset >= i_size_read(inode)) {
mutex_unlock(&inode->i_mutex);
return -ENXIO;
}
ret = find_desired_extent(inode, &offset, origin);
if (ret) {
mutex_unlock(&inode->i_mutex);
return ret;
}
}
if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) {
offset = -EINVAL;
goto out;
}
if (offset > inode->i_sb->s_maxbytes) {
offset = -EINVAL;
goto out;
}
/* Special lock needed here? */
if (offset != file->f_pos) {
file->f_pos = offset;
file->f_version = 0;
}
out:
mutex_unlock(&inode->i_mutex);
return offset;
}
const struct file_operations btrfs_file_operations = {
.llseek = btrfs_file_llseek,
.read = do_sync_read,
.write = do_sync_write,
.aio_read = generic_file_aio_read,
.splice_read = generic_file_splice_read,
.aio_write = btrfs_file_aio_write,
.mmap = btrfs_file_mmap,
.open = generic_file_open,
.release = btrfs_release_file,
.fsync = btrfs_sync_file,
.fallocate = btrfs_fallocate,
.unlocked_ioctl = btrfs_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = btrfs_ioctl,
#endif
};
| gpl-2.0 |
emceethemouth/kernel_cancro | arch/powerpc/sysdev/msi_bitmap.c | 7139 | 6674 | /*
* Copyright 2006-2008, Michael Ellerman, IBM Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; version 2 of the
* License.
*
*/
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/bitmap.h>
#include <asm/msi_bitmap.h>
#include <asm/setup.h>
int msi_bitmap_alloc_hwirqs(struct msi_bitmap *bmp, int num)
{
unsigned long flags;
int offset, order = get_count_order(num);
spin_lock_irqsave(&bmp->lock, flags);
/*
* This is fast, but stricter than we need. We might want to add
* a fallback routine which does a linear search with no alignment.
*/
offset = bitmap_find_free_region(bmp->bitmap, bmp->irq_count, order);
spin_unlock_irqrestore(&bmp->lock, flags);
pr_debug("msi_bitmap: allocated 0x%x (2^%d) at offset 0x%x\n",
num, order, offset);
return offset;
}
void msi_bitmap_free_hwirqs(struct msi_bitmap *bmp, unsigned int offset,
unsigned int num)
{
unsigned long flags;
int order = get_count_order(num);
pr_debug("msi_bitmap: freeing 0x%x (2^%d) at offset 0x%x\n",
num, order, offset);
spin_lock_irqsave(&bmp->lock, flags);
bitmap_release_region(bmp->bitmap, offset, order);
spin_unlock_irqrestore(&bmp->lock, flags);
}
void msi_bitmap_reserve_hwirq(struct msi_bitmap *bmp, unsigned int hwirq)
{
unsigned long flags;
pr_debug("msi_bitmap: reserving hwirq 0x%x\n", hwirq);
spin_lock_irqsave(&bmp->lock, flags);
bitmap_allocate_region(bmp->bitmap, hwirq, 0);
spin_unlock_irqrestore(&bmp->lock, flags);
}
/**
* msi_bitmap_reserve_dt_hwirqs - Reserve irqs specified in the device tree.
* @bmp: pointer to the MSI bitmap.
*
* Looks in the device tree to see if there is a property specifying which
* irqs can be used for MSI. If found those irqs reserved in the device tree
* are reserved in the bitmap.
*
* Returns 0 for success, < 0 if there was an error, and > 0 if no property
* was found in the device tree.
**/
int msi_bitmap_reserve_dt_hwirqs(struct msi_bitmap *bmp)
{
int i, j, len;
const u32 *p;
if (!bmp->of_node)
return 1;
p = of_get_property(bmp->of_node, "msi-available-ranges", &len);
if (!p) {
pr_debug("msi_bitmap: no msi-available-ranges property " \
"found on %s\n", bmp->of_node->full_name);
return 1;
}
if (len % (2 * sizeof(u32)) != 0) {
printk(KERN_WARNING "msi_bitmap: Malformed msi-available-ranges"
" property on %s\n", bmp->of_node->full_name);
return -EINVAL;
}
bitmap_allocate_region(bmp->bitmap, 0, get_count_order(bmp->irq_count));
spin_lock(&bmp->lock);
/* Format is: (<u32 start> <u32 count>)+ */
len /= 2 * sizeof(u32);
for (i = 0; i < len; i++, p += 2) {
for (j = 0; j < *(p + 1); j++)
bitmap_release_region(bmp->bitmap, *p + j, 0);
}
spin_unlock(&bmp->lock);
return 0;
}
int msi_bitmap_alloc(struct msi_bitmap *bmp, unsigned int irq_count,
struct device_node *of_node)
{
int size;
if (!irq_count)
return -EINVAL;
size = BITS_TO_LONGS(irq_count) * sizeof(long);
pr_debug("msi_bitmap: allocator bitmap size is 0x%x bytes\n", size);
bmp->bitmap = zalloc_maybe_bootmem(size, GFP_KERNEL);
if (!bmp->bitmap) {
pr_debug("msi_bitmap: ENOMEM allocating allocator bitmap!\n");
return -ENOMEM;
}
/* We zalloc'ed the bitmap, so all irqs are free by default */
spin_lock_init(&bmp->lock);
bmp->of_node = of_node_get(of_node);
bmp->irq_count = irq_count;
return 0;
}
void msi_bitmap_free(struct msi_bitmap *bmp)
{
/* we can't free the bitmap we don't know if it's bootmem etc. */
of_node_put(bmp->of_node);
bmp->bitmap = NULL;
}
#ifdef CONFIG_MSI_BITMAP_SELFTEST
#define check(x) \
if (!(x)) printk("msi_bitmap: test failed at line %d\n", __LINE__);
void __init test_basics(void)
{
struct msi_bitmap bmp;
int i, size = 512;
/* Can't allocate a bitmap of 0 irqs */
check(msi_bitmap_alloc(&bmp, 0, NULL) != 0);
/* of_node may be NULL */
check(0 == msi_bitmap_alloc(&bmp, size, NULL));
/* Should all be free by default */
check(0 == bitmap_find_free_region(bmp.bitmap, size,
get_count_order(size)));
bitmap_release_region(bmp.bitmap, 0, get_count_order(size));
/* With no node, there's no msi-available-ranges, so expect > 0 */
check(msi_bitmap_reserve_dt_hwirqs(&bmp) > 0);
/* Should all still be free */
check(0 == bitmap_find_free_region(bmp.bitmap, size,
get_count_order(size)));
bitmap_release_region(bmp.bitmap, 0, get_count_order(size));
/* Check we can fill it up and then no more */
for (i = 0; i < size; i++)
check(msi_bitmap_alloc_hwirqs(&bmp, 1) >= 0);
check(msi_bitmap_alloc_hwirqs(&bmp, 1) < 0);
/* Should all be allocated */
check(bitmap_find_free_region(bmp.bitmap, size, 0) < 0);
/* And if we free one we can then allocate another */
msi_bitmap_free_hwirqs(&bmp, size / 2, 1);
check(msi_bitmap_alloc_hwirqs(&bmp, 1) == size / 2);
msi_bitmap_free(&bmp);
/* Clients may check bitmap == NULL for "not-allocated" */
check(bmp.bitmap == NULL);
kfree(bmp.bitmap);
}
void __init test_of_node(void)
{
u32 prop_data[] = { 10, 10, 25, 3, 40, 1, 100, 100, 200, 20 };
const char *expected_str = "0-9,20-24,28-39,41-99,220-255";
char *prop_name = "msi-available-ranges";
char *node_name = "/fakenode";
struct device_node of_node;
struct property prop;
struct msi_bitmap bmp;
int size = 256;
DECLARE_BITMAP(expected, size);
/* There should really be a struct device_node allocator */
memset(&of_node, 0, sizeof(of_node));
kref_init(&of_node.kref);
of_node.full_name = node_name;
check(0 == msi_bitmap_alloc(&bmp, size, &of_node));
/* No msi-available-ranges, so expect > 0 */
check(msi_bitmap_reserve_dt_hwirqs(&bmp) > 0);
/* Should all still be free */
check(0 == bitmap_find_free_region(bmp.bitmap, size,
get_count_order(size)));
bitmap_release_region(bmp.bitmap, 0, get_count_order(size));
/* Now create a fake msi-available-ranges property */
/* There should really .. oh whatever */
memset(&prop, 0, sizeof(prop));
prop.name = prop_name;
prop.value = &prop_data;
prop.length = sizeof(prop_data);
of_node.properties = ∝
/* msi-available-ranges, so expect == 0 */
check(msi_bitmap_reserve_dt_hwirqs(&bmp) == 0);
/* Check we got the expected result */
check(0 == bitmap_parselist(expected_str, expected, size));
check(bitmap_equal(expected, bmp.bitmap, size));
msi_bitmap_free(&bmp);
kfree(bmp.bitmap);
}
int __init msi_bitmap_selftest(void)
{
printk(KERN_DEBUG "Running MSI bitmap self-tests ...\n");
test_basics();
test_of_node();
return 0;
}
late_initcall(msi_bitmap_selftest);
#endif /* CONFIG_MSI_BITMAP_SELFTEST */
| gpl-2.0 |
garwynn/SMN900P_MI3_Kernel | net/wireless/debugfs.c | 7395 | 3115 | /*
* cfg80211 debugfs
*
* Copyright 2009 Luis R. Rodriguez <lrodriguez@atheros.com>
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/slab.h>
#include "core.h"
#include "debugfs.h"
#define DEBUGFS_READONLY_FILE(name, buflen, fmt, value...) \
static ssize_t name## _read(struct file *file, char __user *userbuf, \
size_t count, loff_t *ppos) \
{ \
struct wiphy *wiphy= file->private_data; \
char buf[buflen]; \
int res; \
\
res = scnprintf(buf, buflen, fmt "\n", ##value); \
return simple_read_from_buffer(userbuf, count, ppos, buf, res); \
} \
\
static const struct file_operations name## _ops = { \
.read = name## _read, \
.open = simple_open, \
.llseek = generic_file_llseek, \
};
DEBUGFS_READONLY_FILE(rts_threshold, 20, "%d",
wiphy->rts_threshold)
DEBUGFS_READONLY_FILE(fragmentation_threshold, 20, "%d",
wiphy->frag_threshold);
DEBUGFS_READONLY_FILE(short_retry_limit, 20, "%d",
wiphy->retry_short)
DEBUGFS_READONLY_FILE(long_retry_limit, 20, "%d",
wiphy->retry_long);
static int ht_print_chan(struct ieee80211_channel *chan,
char *buf, int buf_size, int offset)
{
if (WARN_ON(offset > buf_size))
return 0;
if (chan->flags & IEEE80211_CHAN_DISABLED)
return snprintf(buf + offset,
buf_size - offset,
"%d Disabled\n",
chan->center_freq);
return snprintf(buf + offset,
buf_size - offset,
"%d HT40 %c%c\n",
chan->center_freq,
(chan->flags & IEEE80211_CHAN_NO_HT40MINUS) ? ' ' : '-',
(chan->flags & IEEE80211_CHAN_NO_HT40PLUS) ? ' ' : '+');
}
static ssize_t ht40allow_map_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wiphy *wiphy = file->private_data;
char *buf;
unsigned int offset = 0, buf_size = PAGE_SIZE, i, r;
enum ieee80211_band band;
struct ieee80211_supported_band *sband;
buf = kzalloc(buf_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
mutex_lock(&cfg80211_mutex);
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
sband = wiphy->bands[band];
if (!sband)
continue;
for (i = 0; i < sband->n_channels; i++)
offset += ht_print_chan(&sband->channels[i],
buf, buf_size, offset);
}
mutex_unlock(&cfg80211_mutex);
r = simple_read_from_buffer(user_buf, count, ppos, buf, offset);
kfree(buf);
return r;
}
static const struct file_operations ht40allow_map_ops = {
.read = ht40allow_map_read,
.open = simple_open,
.llseek = default_llseek,
};
#define DEBUGFS_ADD(name) \
debugfs_create_file(#name, S_IRUGO, phyd, &rdev->wiphy, &name## _ops);
void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev)
{
struct dentry *phyd = rdev->wiphy.debugfsdir;
DEBUGFS_ADD(rts_threshold);
DEBUGFS_ADD(fragmentation_threshold);
DEBUGFS_ADD(short_retry_limit);
DEBUGFS_ADD(long_retry_limit);
DEBUGFS_ADD(ht40allow_map);
}
| gpl-2.0 |
SyNtheticNightmar3/android_kernel_asus_flo | sound/pci/ctxfi/ctvmem.c | 7907 | 5684 | /**
* Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
*
* This source file is released under GPL v2 license (no other versions).
* See the COPYING file included in the main directory of this source
* distribution for the license terms and conditions.
*
* @File ctvmem.c
*
* @Brief
* This file contains the implementation of virtual memory management object
* for card device.
*
* @Author Liu Chun
* @Date Apr 1 2008
*/
#include "ctvmem.h"
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/io.h>
#include <sound/pcm.h>
#define CT_PTES_PER_PAGE (CT_PAGE_SIZE / sizeof(void *))
#define CT_ADDRS_PER_PAGE (CT_PTES_PER_PAGE * CT_PAGE_SIZE)
/* *
* Find or create vm block based on requested @size.
* @size must be page aligned.
* */
static struct ct_vm_block *
get_vm_block(struct ct_vm *vm, unsigned int size)
{
struct ct_vm_block *block = NULL, *entry;
struct list_head *pos;
size = CT_PAGE_ALIGN(size);
if (size > vm->size) {
printk(KERN_ERR "ctxfi: Fail! No sufficient device virtual "
"memory space available!\n");
return NULL;
}
mutex_lock(&vm->lock);
list_for_each(pos, &vm->unused) {
entry = list_entry(pos, struct ct_vm_block, list);
if (entry->size >= size)
break; /* found a block that is big enough */
}
if (pos == &vm->unused)
goto out;
if (entry->size == size) {
/* Move the vm node from unused list to used list directly */
list_move(&entry->list, &vm->used);
vm->size -= size;
block = entry;
goto out;
}
block = kzalloc(sizeof(*block), GFP_KERNEL);
if (!block)
goto out;
block->addr = entry->addr;
block->size = size;
list_add(&block->list, &vm->used);
entry->addr += size;
entry->size -= size;
vm->size -= size;
out:
mutex_unlock(&vm->lock);
return block;
}
static void put_vm_block(struct ct_vm *vm, struct ct_vm_block *block)
{
struct ct_vm_block *entry, *pre_ent;
struct list_head *pos, *pre;
block->size = CT_PAGE_ALIGN(block->size);
mutex_lock(&vm->lock);
list_del(&block->list);
vm->size += block->size;
list_for_each(pos, &vm->unused) {
entry = list_entry(pos, struct ct_vm_block, list);
if (entry->addr >= (block->addr + block->size))
break; /* found a position */
}
if (pos == &vm->unused) {
list_add_tail(&block->list, &vm->unused);
entry = block;
} else {
if ((block->addr + block->size) == entry->addr) {
entry->addr = block->addr;
entry->size += block->size;
kfree(block);
} else {
__list_add(&block->list, pos->prev, pos);
entry = block;
}
}
pos = &entry->list;
pre = pos->prev;
while (pre != &vm->unused) {
entry = list_entry(pos, struct ct_vm_block, list);
pre_ent = list_entry(pre, struct ct_vm_block, list);
if ((pre_ent->addr + pre_ent->size) > entry->addr)
break;
pre_ent->size += entry->size;
list_del(pos);
kfree(entry);
pos = pre;
pre = pos->prev;
}
mutex_unlock(&vm->lock);
}
/* Map host addr (kmalloced/vmalloced) to device logical addr. */
static struct ct_vm_block *
ct_vm_map(struct ct_vm *vm, struct snd_pcm_substream *substream, int size)
{
struct ct_vm_block *block;
unsigned int pte_start;
unsigned i, pages;
unsigned long *ptp;
block = get_vm_block(vm, size);
if (block == NULL) {
printk(KERN_ERR "ctxfi: No virtual memory block that is big "
"enough to allocate!\n");
return NULL;
}
ptp = (unsigned long *)vm->ptp[0].area;
pte_start = (block->addr >> CT_PAGE_SHIFT);
pages = block->size >> CT_PAGE_SHIFT;
for (i = 0; i < pages; i++) {
unsigned long addr;
addr = snd_pcm_sgbuf_get_addr(substream, i << CT_PAGE_SHIFT);
ptp[pte_start + i] = addr;
}
block->size = size;
return block;
}
static void ct_vm_unmap(struct ct_vm *vm, struct ct_vm_block *block)
{
/* do unmapping */
put_vm_block(vm, block);
}
/* *
* return the host physical addr of the @index-th device
* page table page on success, or ~0UL on failure.
* The first returned ~0UL indicates the termination.
* */
static dma_addr_t
ct_get_ptp_phys(struct ct_vm *vm, int index)
{
dma_addr_t addr;
addr = (index >= CT_PTP_NUM) ? ~0UL : vm->ptp[index].addr;
return addr;
}
int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci)
{
struct ct_vm *vm;
struct ct_vm_block *block;
int i, err = 0;
*rvm = NULL;
vm = kzalloc(sizeof(*vm), GFP_KERNEL);
if (!vm)
return -ENOMEM;
mutex_init(&vm->lock);
/* Allocate page table pages */
for (i = 0; i < CT_PTP_NUM; i++) {
err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
snd_dma_pci_data(pci),
PAGE_SIZE, &vm->ptp[i]);
if (err < 0)
break;
}
if (err < 0) {
/* no page table pages are allocated */
ct_vm_destroy(vm);
return -ENOMEM;
}
vm->size = CT_ADDRS_PER_PAGE * i;
vm->map = ct_vm_map;
vm->unmap = ct_vm_unmap;
vm->get_ptp_phys = ct_get_ptp_phys;
INIT_LIST_HEAD(&vm->unused);
INIT_LIST_HEAD(&vm->used);
block = kzalloc(sizeof(*block), GFP_KERNEL);
if (NULL != block) {
block->addr = 0;
block->size = vm->size;
list_add(&block->list, &vm->unused);
}
*rvm = vm;
return 0;
}
/* The caller must ensure no mapping pages are being used
* by hardware before calling this function */
void ct_vm_destroy(struct ct_vm *vm)
{
int i;
struct list_head *pos;
struct ct_vm_block *entry;
/* free used and unused list nodes */
while (!list_empty(&vm->used)) {
pos = vm->used.next;
list_del(pos);
entry = list_entry(pos, struct ct_vm_block, list);
kfree(entry);
}
while (!list_empty(&vm->unused)) {
pos = vm->unused.next;
list_del(pos);
entry = list_entry(pos, struct ct_vm_block, list);
kfree(entry);
}
/* free allocated page table pages */
for (i = 0; i < CT_PTP_NUM; i++)
snd_dma_free_pages(&vm->ptp[i]);
vm->size = 0;
kfree(vm);
}
| gpl-2.0 |
chettyharish/polling-server-deadline | drivers/media/common/cx2341x.c | 11235 | 56182 | /*
* cx2341x - generic code for cx23415/6/8 based devices
*
* Copyright (C) 2006 Hans Verkuil <hverkuil@xs4all.nl>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/videodev2.h>
#include <media/tuner.h>
#include <media/cx2341x.h>
#include <media/v4l2-common.h>
MODULE_DESCRIPTION("cx23415/6/8 driver");
MODULE_AUTHOR("Hans Verkuil");
MODULE_LICENSE("GPL");
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Debug level (0-1)");
/********************** COMMON CODE *********************/
/* definitions for audio properties bits 29-28 */
#define CX2341X_AUDIO_ENCODING_METHOD_MPEG 0
#define CX2341X_AUDIO_ENCODING_METHOD_AC3 1
#define CX2341X_AUDIO_ENCODING_METHOD_LPCM 2
static const char *cx2341x_get_name(u32 id)
{
switch (id) {
case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE:
return "Spatial Filter Mode";
case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER:
return "Spatial Filter";
case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE:
return "Spatial Luma Filter Type";
case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE:
return "Spatial Chroma Filter Type";
case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE:
return "Temporal Filter Mode";
case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER:
return "Temporal Filter";
case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE:
return "Median Filter Type";
case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP:
return "Median Luma Filter Maximum";
case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM:
return "Median Luma Filter Minimum";
case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP:
return "Median Chroma Filter Maximum";
case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM:
return "Median Chroma Filter Minimum";
case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS:
return "Insert Navigation Packets";
}
return NULL;
}
static const char **cx2341x_get_menu(u32 id)
{
static const char *cx2341x_video_spatial_filter_mode_menu[] = {
"Manual",
"Auto",
NULL
};
static const char *cx2341x_video_luma_spatial_filter_type_menu[] = {
"Off",
"1D Horizontal",
"1D Vertical",
"2D H/V Separable",
"2D Symmetric non-separable",
NULL
};
static const char *cx2341x_video_chroma_spatial_filter_type_menu[] = {
"Off",
"1D Horizontal",
NULL
};
static const char *cx2341x_video_temporal_filter_mode_menu[] = {
"Manual",
"Auto",
NULL
};
static const char *cx2341x_video_median_filter_type_menu[] = {
"Off",
"Horizontal",
"Vertical",
"Horizontal/Vertical",
"Diagonal",
NULL
};
switch (id) {
case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE:
return cx2341x_video_spatial_filter_mode_menu;
case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE:
return cx2341x_video_luma_spatial_filter_type_menu;
case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE:
return cx2341x_video_chroma_spatial_filter_type_menu;
case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE:
return cx2341x_video_temporal_filter_mode_menu;
case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE:
return cx2341x_video_median_filter_type_menu;
}
return NULL;
}
static void cx2341x_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
s32 *min, s32 *max, s32 *step, s32 *def, u32 *flags)
{
*name = cx2341x_get_name(id);
*flags = 0;
switch (id) {
case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE:
case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE:
case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE:
case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE:
case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE:
*type = V4L2_CTRL_TYPE_MENU;
*min = 0;
*step = 0;
break;
case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS:
*type = V4L2_CTRL_TYPE_BOOLEAN;
*min = 0;
*max = *step = 1;
break;
default:
*type = V4L2_CTRL_TYPE_INTEGER;
break;
}
switch (id) {
case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE:
case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE:
case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE:
*flags |= V4L2_CTRL_FLAG_UPDATE;
break;
case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER:
case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER:
case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP:
case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM:
case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP:
case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM:
*flags |= V4L2_CTRL_FLAG_SLIDER;
break;
case V4L2_CID_MPEG_VIDEO_ENCODING:
*flags |= V4L2_CTRL_FLAG_READ_ONLY;
break;
}
}
/********************** OLD CODE *********************/
/* Must be sorted from low to high control ID! */
const u32 cx2341x_mpeg_ctrls[] = {
V4L2_CID_MPEG_CLASS,
V4L2_CID_MPEG_STREAM_TYPE,
V4L2_CID_MPEG_STREAM_VBI_FMT,
V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ,
V4L2_CID_MPEG_AUDIO_ENCODING,
V4L2_CID_MPEG_AUDIO_L2_BITRATE,
V4L2_CID_MPEG_AUDIO_MODE,
V4L2_CID_MPEG_AUDIO_MODE_EXTENSION,
V4L2_CID_MPEG_AUDIO_EMPHASIS,
V4L2_CID_MPEG_AUDIO_CRC,
V4L2_CID_MPEG_AUDIO_MUTE,
V4L2_CID_MPEG_AUDIO_AC3_BITRATE,
V4L2_CID_MPEG_VIDEO_ENCODING,
V4L2_CID_MPEG_VIDEO_ASPECT,
V4L2_CID_MPEG_VIDEO_B_FRAMES,
V4L2_CID_MPEG_VIDEO_GOP_SIZE,
V4L2_CID_MPEG_VIDEO_GOP_CLOSURE,
V4L2_CID_MPEG_VIDEO_BITRATE_MODE,
V4L2_CID_MPEG_VIDEO_BITRATE,
V4L2_CID_MPEG_VIDEO_BITRATE_PEAK,
V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION,
V4L2_CID_MPEG_VIDEO_MUTE,
V4L2_CID_MPEG_VIDEO_MUTE_YUV,
V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE,
V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER,
V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE,
V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE,
V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE,
V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER,
V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE,
V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM,
V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP,
V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM,
V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP,
V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS,
0
};
EXPORT_SYMBOL(cx2341x_mpeg_ctrls);
static const struct cx2341x_mpeg_params default_params = {
/* misc */
.capabilities = 0,
.port = CX2341X_PORT_MEMORY,
.width = 720,
.height = 480,
.is_50hz = 0,
/* stream */
.stream_type = V4L2_MPEG_STREAM_TYPE_MPEG2_PS,
.stream_vbi_fmt = V4L2_MPEG_STREAM_VBI_FMT_NONE,
.stream_insert_nav_packets = 0,
/* audio */
.audio_sampling_freq = V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000,
.audio_encoding = V4L2_MPEG_AUDIO_ENCODING_LAYER_2,
.audio_l2_bitrate = V4L2_MPEG_AUDIO_L2_BITRATE_224K,
.audio_ac3_bitrate = V4L2_MPEG_AUDIO_AC3_BITRATE_224K,
.audio_mode = V4L2_MPEG_AUDIO_MODE_STEREO,
.audio_mode_extension = V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_4,
.audio_emphasis = V4L2_MPEG_AUDIO_EMPHASIS_NONE,
.audio_crc = V4L2_MPEG_AUDIO_CRC_NONE,
.audio_mute = 0,
/* video */
.video_encoding = V4L2_MPEG_VIDEO_ENCODING_MPEG_2,
.video_aspect = V4L2_MPEG_VIDEO_ASPECT_4x3,
.video_b_frames = 2,
.video_gop_size = 12,
.video_gop_closure = 1,
.video_bitrate_mode = V4L2_MPEG_VIDEO_BITRATE_MODE_VBR,
.video_bitrate = 6000000,
.video_bitrate_peak = 8000000,
.video_temporal_decimation = 0,
.video_mute = 0,
.video_mute_yuv = 0x008080, /* YCbCr value for black */
/* encoding filters */
.video_spatial_filter_mode =
V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL,
.video_spatial_filter = 0,
.video_luma_spatial_filter_type =
V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_HOR,
.video_chroma_spatial_filter_type =
V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_1D_HOR,
.video_temporal_filter_mode =
V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_MANUAL,
.video_temporal_filter = 8,
.video_median_filter_type =
V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF,
.video_luma_median_filter_top = 255,
.video_luma_median_filter_bottom = 0,
.video_chroma_median_filter_top = 255,
.video_chroma_median_filter_bottom = 0,
};
/* Map the control ID to the correct field in the cx2341x_mpeg_params
struct. Return -EINVAL if the ID is unknown, else return 0. */
static int cx2341x_get_ctrl(const struct cx2341x_mpeg_params *params,
struct v4l2_ext_control *ctrl)
{
switch (ctrl->id) {
case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ:
ctrl->value = params->audio_sampling_freq;
break;
case V4L2_CID_MPEG_AUDIO_ENCODING:
ctrl->value = params->audio_encoding;
break;
case V4L2_CID_MPEG_AUDIO_L2_BITRATE:
ctrl->value = params->audio_l2_bitrate;
break;
case V4L2_CID_MPEG_AUDIO_AC3_BITRATE:
ctrl->value = params->audio_ac3_bitrate;
break;
case V4L2_CID_MPEG_AUDIO_MODE:
ctrl->value = params->audio_mode;
break;
case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION:
ctrl->value = params->audio_mode_extension;
break;
case V4L2_CID_MPEG_AUDIO_EMPHASIS:
ctrl->value = params->audio_emphasis;
break;
case V4L2_CID_MPEG_AUDIO_CRC:
ctrl->value = params->audio_crc;
break;
case V4L2_CID_MPEG_AUDIO_MUTE:
ctrl->value = params->audio_mute;
break;
case V4L2_CID_MPEG_VIDEO_ENCODING:
ctrl->value = params->video_encoding;
break;
case V4L2_CID_MPEG_VIDEO_ASPECT:
ctrl->value = params->video_aspect;
break;
case V4L2_CID_MPEG_VIDEO_B_FRAMES:
ctrl->value = params->video_b_frames;
break;
case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
ctrl->value = params->video_gop_size;
break;
case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE:
ctrl->value = params->video_gop_closure;
break;
case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
ctrl->value = params->video_bitrate_mode;
break;
case V4L2_CID_MPEG_VIDEO_BITRATE:
ctrl->value = params->video_bitrate;
break;
case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK:
ctrl->value = params->video_bitrate_peak;
break;
case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION:
ctrl->value = params->video_temporal_decimation;
break;
case V4L2_CID_MPEG_VIDEO_MUTE:
ctrl->value = params->video_mute;
break;
case V4L2_CID_MPEG_VIDEO_MUTE_YUV:
ctrl->value = params->video_mute_yuv;
break;
case V4L2_CID_MPEG_STREAM_TYPE:
ctrl->value = params->stream_type;
break;
case V4L2_CID_MPEG_STREAM_VBI_FMT:
ctrl->value = params->stream_vbi_fmt;
break;
case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE:
ctrl->value = params->video_spatial_filter_mode;
break;
case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER:
ctrl->value = params->video_spatial_filter;
break;
case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE:
ctrl->value = params->video_luma_spatial_filter_type;
break;
case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE:
ctrl->value = params->video_chroma_spatial_filter_type;
break;
case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE:
ctrl->value = params->video_temporal_filter_mode;
break;
case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER:
ctrl->value = params->video_temporal_filter;
break;
case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE:
ctrl->value = params->video_median_filter_type;
break;
case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP:
ctrl->value = params->video_luma_median_filter_top;
break;
case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM:
ctrl->value = params->video_luma_median_filter_bottom;
break;
case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP:
ctrl->value = params->video_chroma_median_filter_top;
break;
case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM:
ctrl->value = params->video_chroma_median_filter_bottom;
break;
case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS:
ctrl->value = params->stream_insert_nav_packets;
break;
default:
return -EINVAL;
}
return 0;
}
/* Map the control ID to the correct field in the cx2341x_mpeg_params
struct. Return -EINVAL if the ID is unknown, else return 0. */
static int cx2341x_set_ctrl(struct cx2341x_mpeg_params *params, int busy,
struct v4l2_ext_control *ctrl)
{
switch (ctrl->id) {
case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ:
if (busy)
return -EBUSY;
params->audio_sampling_freq = ctrl->value;
break;
case V4L2_CID_MPEG_AUDIO_ENCODING:
if (busy)
return -EBUSY;
if (params->capabilities & CX2341X_CAP_HAS_AC3)
if (ctrl->value != V4L2_MPEG_AUDIO_ENCODING_LAYER_2 &&
ctrl->value != V4L2_MPEG_AUDIO_ENCODING_AC3)
return -ERANGE;
params->audio_encoding = ctrl->value;
break;
case V4L2_CID_MPEG_AUDIO_L2_BITRATE:
if (busy)
return -EBUSY;
params->audio_l2_bitrate = ctrl->value;
break;
case V4L2_CID_MPEG_AUDIO_AC3_BITRATE:
if (busy)
return -EBUSY;
if (!(params->capabilities & CX2341X_CAP_HAS_AC3))
return -EINVAL;
params->audio_ac3_bitrate = ctrl->value;
break;
case V4L2_CID_MPEG_AUDIO_MODE:
params->audio_mode = ctrl->value;
break;
case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION:
params->audio_mode_extension = ctrl->value;
break;
case V4L2_CID_MPEG_AUDIO_EMPHASIS:
params->audio_emphasis = ctrl->value;
break;
case V4L2_CID_MPEG_AUDIO_CRC:
params->audio_crc = ctrl->value;
break;
case V4L2_CID_MPEG_AUDIO_MUTE:
params->audio_mute = ctrl->value;
break;
case V4L2_CID_MPEG_VIDEO_ASPECT:
params->video_aspect = ctrl->value;
break;
case V4L2_CID_MPEG_VIDEO_B_FRAMES: {
int b = ctrl->value + 1;
int gop = params->video_gop_size;
params->video_b_frames = ctrl->value;
params->video_gop_size = b * ((gop + b - 1) / b);
/* Max GOP size = 34 */
while (params->video_gop_size > 34)
params->video_gop_size -= b;
break;
}
case V4L2_CID_MPEG_VIDEO_GOP_SIZE: {
int b = params->video_b_frames + 1;
int gop = ctrl->value;
params->video_gop_size = b * ((gop + b - 1) / b);
/* Max GOP size = 34 */
while (params->video_gop_size > 34)
params->video_gop_size -= b;
ctrl->value = params->video_gop_size;
break;
}
case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE:
params->video_gop_closure = ctrl->value;
break;
case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
if (busy)
return -EBUSY;
/* MPEG-1 only allows CBR */
if (params->video_encoding == V4L2_MPEG_VIDEO_ENCODING_MPEG_1 &&
ctrl->value != V4L2_MPEG_VIDEO_BITRATE_MODE_CBR)
return -EINVAL;
params->video_bitrate_mode = ctrl->value;
break;
case V4L2_CID_MPEG_VIDEO_BITRATE:
if (busy)
return -EBUSY;
params->video_bitrate = ctrl->value;
break;
case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK:
if (busy)
return -EBUSY;
params->video_bitrate_peak = ctrl->value;
break;
case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION:
params->video_temporal_decimation = ctrl->value;
break;
case V4L2_CID_MPEG_VIDEO_MUTE:
params->video_mute = (ctrl->value != 0);
break;
case V4L2_CID_MPEG_VIDEO_MUTE_YUV:
params->video_mute_yuv = ctrl->value;
break;
case V4L2_CID_MPEG_STREAM_TYPE:
if (busy)
return -EBUSY;
params->stream_type = ctrl->value;
params->video_encoding =
(params->stream_type == V4L2_MPEG_STREAM_TYPE_MPEG1_SS ||
params->stream_type == V4L2_MPEG_STREAM_TYPE_MPEG1_VCD) ?
V4L2_MPEG_VIDEO_ENCODING_MPEG_1 :
V4L2_MPEG_VIDEO_ENCODING_MPEG_2;
if (params->video_encoding == V4L2_MPEG_VIDEO_ENCODING_MPEG_1)
/* MPEG-1 implies CBR */
params->video_bitrate_mode =
V4L2_MPEG_VIDEO_BITRATE_MODE_CBR;
break;
case V4L2_CID_MPEG_STREAM_VBI_FMT:
params->stream_vbi_fmt = ctrl->value;
break;
case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE:
params->video_spatial_filter_mode = ctrl->value;
break;
case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER:
params->video_spatial_filter = ctrl->value;
break;
case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE:
params->video_luma_spatial_filter_type = ctrl->value;
break;
case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE:
params->video_chroma_spatial_filter_type = ctrl->value;
break;
case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE:
params->video_temporal_filter_mode = ctrl->value;
break;
case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER:
params->video_temporal_filter = ctrl->value;
break;
case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE:
params->video_median_filter_type = ctrl->value;
break;
case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP:
params->video_luma_median_filter_top = ctrl->value;
break;
case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM:
params->video_luma_median_filter_bottom = ctrl->value;
break;
case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP:
params->video_chroma_median_filter_top = ctrl->value;
break;
case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM:
params->video_chroma_median_filter_bottom = ctrl->value;
break;
case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS:
params->stream_insert_nav_packets = ctrl->value;
break;
default:
return -EINVAL;
}
return 0;
}
static int cx2341x_ctrl_query_fill(struct v4l2_queryctrl *qctrl,
s32 min, s32 max, s32 step, s32 def)
{
const char *name;
switch (qctrl->id) {
/* MPEG controls */
case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE:
case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER:
case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE:
case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE:
case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE:
case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER:
case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE:
case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP:
case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM:
case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP:
case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM:
case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS:
cx2341x_ctrl_fill(qctrl->id, &name, &qctrl->type,
&min, &max, &step, &def, &qctrl->flags);
qctrl->minimum = min;
qctrl->maximum = max;
qctrl->step = step;
qctrl->default_value = def;
qctrl->reserved[0] = qctrl->reserved[1] = 0;
strlcpy(qctrl->name, name, sizeof(qctrl->name));
return 0;
default:
return v4l2_ctrl_query_fill(qctrl, min, max, step, def);
}
}
int cx2341x_ctrl_query(const struct cx2341x_mpeg_params *params,
struct v4l2_queryctrl *qctrl)
{
int err;
switch (qctrl->id) {
case V4L2_CID_MPEG_CLASS:
return v4l2_ctrl_query_fill(qctrl, 0, 0, 0, 0);
case V4L2_CID_MPEG_STREAM_TYPE:
return v4l2_ctrl_query_fill(qctrl,
V4L2_MPEG_STREAM_TYPE_MPEG2_PS,
V4L2_MPEG_STREAM_TYPE_MPEG2_SVCD, 1,
V4L2_MPEG_STREAM_TYPE_MPEG2_PS);
case V4L2_CID_MPEG_STREAM_VBI_FMT:
if (params->capabilities & CX2341X_CAP_HAS_SLICED_VBI)
return v4l2_ctrl_query_fill(qctrl,
V4L2_MPEG_STREAM_VBI_FMT_NONE,
V4L2_MPEG_STREAM_VBI_FMT_IVTV, 1,
V4L2_MPEG_STREAM_VBI_FMT_NONE);
return cx2341x_ctrl_query_fill(qctrl,
V4L2_MPEG_STREAM_VBI_FMT_NONE,
V4L2_MPEG_STREAM_VBI_FMT_NONE, 1,
default_params.stream_vbi_fmt);
case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ:
return v4l2_ctrl_query_fill(qctrl,
V4L2_MPEG_AUDIO_SAMPLING_FREQ_44100,
V4L2_MPEG_AUDIO_SAMPLING_FREQ_32000, 1,
V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000);
case V4L2_CID_MPEG_AUDIO_ENCODING:
if (params->capabilities & CX2341X_CAP_HAS_AC3) {
/*
* The state of L2 & AC3 bitrate controls can change
* when this control changes, but v4l2_ctrl_query_fill()
* already sets V4L2_CTRL_FLAG_UPDATE for
* V4L2_CID_MPEG_AUDIO_ENCODING, so we don't here.
*/
return v4l2_ctrl_query_fill(qctrl,
V4L2_MPEG_AUDIO_ENCODING_LAYER_2,
V4L2_MPEG_AUDIO_ENCODING_AC3, 1,
default_params.audio_encoding);
}
return v4l2_ctrl_query_fill(qctrl,
V4L2_MPEG_AUDIO_ENCODING_LAYER_2,
V4L2_MPEG_AUDIO_ENCODING_LAYER_2, 1,
default_params.audio_encoding);
case V4L2_CID_MPEG_AUDIO_L2_BITRATE:
err = v4l2_ctrl_query_fill(qctrl,
V4L2_MPEG_AUDIO_L2_BITRATE_192K,
V4L2_MPEG_AUDIO_L2_BITRATE_384K, 1,
default_params.audio_l2_bitrate);
if (err)
return err;
if (params->capabilities & CX2341X_CAP_HAS_AC3 &&
params->audio_encoding != V4L2_MPEG_AUDIO_ENCODING_LAYER_2)
qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
return 0;
case V4L2_CID_MPEG_AUDIO_MODE:
return v4l2_ctrl_query_fill(qctrl,
V4L2_MPEG_AUDIO_MODE_STEREO,
V4L2_MPEG_AUDIO_MODE_MONO, 1,
V4L2_MPEG_AUDIO_MODE_STEREO);
case V4L2_CID_MPEG_AUDIO_MODE_EXTENSION:
err = v4l2_ctrl_query_fill(qctrl,
V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_4,
V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_16, 1,
V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_4);
if (err == 0 &&
params->audio_mode != V4L2_MPEG_AUDIO_MODE_JOINT_STEREO)
qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
return err;
case V4L2_CID_MPEG_AUDIO_EMPHASIS:
return v4l2_ctrl_query_fill(qctrl,
V4L2_MPEG_AUDIO_EMPHASIS_NONE,
V4L2_MPEG_AUDIO_EMPHASIS_CCITT_J17, 1,
V4L2_MPEG_AUDIO_EMPHASIS_NONE);
case V4L2_CID_MPEG_AUDIO_CRC:
return v4l2_ctrl_query_fill(qctrl,
V4L2_MPEG_AUDIO_CRC_NONE,
V4L2_MPEG_AUDIO_CRC_CRC16, 1,
V4L2_MPEG_AUDIO_CRC_NONE);
case V4L2_CID_MPEG_AUDIO_MUTE:
return v4l2_ctrl_query_fill(qctrl, 0, 1, 1, 0);
case V4L2_CID_MPEG_AUDIO_AC3_BITRATE:
err = v4l2_ctrl_query_fill(qctrl,
V4L2_MPEG_AUDIO_AC3_BITRATE_48K,
V4L2_MPEG_AUDIO_AC3_BITRATE_448K, 1,
default_params.audio_ac3_bitrate);
if (err)
return err;
if (params->capabilities & CX2341X_CAP_HAS_AC3) {
if (params->audio_encoding !=
V4L2_MPEG_AUDIO_ENCODING_AC3)
qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
} else
qctrl->flags |= V4L2_CTRL_FLAG_DISABLED;
return 0;
case V4L2_CID_MPEG_VIDEO_ENCODING:
/* this setting is read-only for the cx2341x since the
V4L2_CID_MPEG_STREAM_TYPE really determines the
MPEG-1/2 setting */
err = v4l2_ctrl_query_fill(qctrl,
V4L2_MPEG_VIDEO_ENCODING_MPEG_1,
V4L2_MPEG_VIDEO_ENCODING_MPEG_2, 1,
V4L2_MPEG_VIDEO_ENCODING_MPEG_2);
if (err == 0)
qctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
return err;
case V4L2_CID_MPEG_VIDEO_ASPECT:
return v4l2_ctrl_query_fill(qctrl,
V4L2_MPEG_VIDEO_ASPECT_1x1,
V4L2_MPEG_VIDEO_ASPECT_221x100, 1,
V4L2_MPEG_VIDEO_ASPECT_4x3);
case V4L2_CID_MPEG_VIDEO_B_FRAMES:
return v4l2_ctrl_query_fill(qctrl, 0, 33, 1, 2);
case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
return v4l2_ctrl_query_fill(qctrl, 1, 34, 1,
params->is_50hz ? 12 : 15);
case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE:
return v4l2_ctrl_query_fill(qctrl, 0, 1, 1, 1);
case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
err = v4l2_ctrl_query_fill(qctrl,
V4L2_MPEG_VIDEO_BITRATE_MODE_VBR,
V4L2_MPEG_VIDEO_BITRATE_MODE_CBR, 1,
V4L2_MPEG_VIDEO_BITRATE_MODE_VBR);
if (err == 0 &&
params->video_encoding == V4L2_MPEG_VIDEO_ENCODING_MPEG_1)
qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
return err;
case V4L2_CID_MPEG_VIDEO_BITRATE:
return v4l2_ctrl_query_fill(qctrl, 0, 27000000, 1, 6000000);
case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK:
err = v4l2_ctrl_query_fill(qctrl, 0, 27000000, 1, 8000000);
if (err == 0 &&
params->video_bitrate_mode ==
V4L2_MPEG_VIDEO_BITRATE_MODE_CBR)
qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
return err;
case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION:
return v4l2_ctrl_query_fill(qctrl, 0, 255, 1, 0);
case V4L2_CID_MPEG_VIDEO_MUTE:
return v4l2_ctrl_query_fill(qctrl, 0, 1, 1, 0);
case V4L2_CID_MPEG_VIDEO_MUTE_YUV: /* Init YUV (really YCbCr) to black */
return v4l2_ctrl_query_fill(qctrl, 0, 0xffffff, 1, 0x008080);
/* CX23415/6 specific */
case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE:
return cx2341x_ctrl_query_fill(qctrl,
V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL,
V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO, 1,
default_params.video_spatial_filter_mode);
case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER:
cx2341x_ctrl_query_fill(qctrl, 0, 15, 1,
default_params.video_spatial_filter);
qctrl->flags |= V4L2_CTRL_FLAG_SLIDER;
if (params->video_spatial_filter_mode ==
V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO)
qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
return 0;
case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE:
cx2341x_ctrl_query_fill(qctrl,
V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_OFF,
V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_SYM_NON_SEPARABLE,
1,
default_params.video_luma_spatial_filter_type);
if (params->video_spatial_filter_mode ==
V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO)
qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
return 0;
case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE:
cx2341x_ctrl_query_fill(qctrl,
V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_OFF,
V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_1D_HOR,
1,
default_params.video_chroma_spatial_filter_type);
if (params->video_spatial_filter_mode ==
V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO)
qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
return 0;
case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE:
return cx2341x_ctrl_query_fill(qctrl,
V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_MANUAL,
V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_AUTO, 1,
default_params.video_temporal_filter_mode);
case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER:
cx2341x_ctrl_query_fill(qctrl, 0, 31, 1,
default_params.video_temporal_filter);
qctrl->flags |= V4L2_CTRL_FLAG_SLIDER;
if (params->video_temporal_filter_mode ==
V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_AUTO)
qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
return 0;
case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE:
return cx2341x_ctrl_query_fill(qctrl,
V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF,
V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_DIAG, 1,
default_params.video_median_filter_type);
case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP:
cx2341x_ctrl_query_fill(qctrl, 0, 255, 1,
default_params.video_luma_median_filter_top);
qctrl->flags |= V4L2_CTRL_FLAG_SLIDER;
if (params->video_median_filter_type ==
V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF)
qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
return 0;
case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM:
cx2341x_ctrl_query_fill(qctrl, 0, 255, 1,
default_params.video_luma_median_filter_bottom);
qctrl->flags |= V4L2_CTRL_FLAG_SLIDER;
if (params->video_median_filter_type ==
V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF)
qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
return 0;
case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP:
cx2341x_ctrl_query_fill(qctrl, 0, 255, 1,
default_params.video_chroma_median_filter_top);
qctrl->flags |= V4L2_CTRL_FLAG_SLIDER;
if (params->video_median_filter_type ==
V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF)
qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
return 0;
case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM:
cx2341x_ctrl_query_fill(qctrl, 0, 255, 1,
default_params.video_chroma_median_filter_bottom);
qctrl->flags |= V4L2_CTRL_FLAG_SLIDER;
if (params->video_median_filter_type ==
V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF)
qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
return 0;
case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS:
return cx2341x_ctrl_query_fill(qctrl, 0, 1, 1,
default_params.stream_insert_nav_packets);
default:
return -EINVAL;
}
}
EXPORT_SYMBOL(cx2341x_ctrl_query);
const char * const *cx2341x_ctrl_get_menu(const struct cx2341x_mpeg_params *p, u32 id)
{
static const char * const mpeg_stream_type_without_ts[] = {
"MPEG-2 Program Stream",
"",
"MPEG-1 System Stream",
"MPEG-2 DVD-compatible Stream",
"MPEG-1 VCD-compatible Stream",
"MPEG-2 SVCD-compatible Stream",
NULL
};
static const char *mpeg_stream_type_with_ts[] = {
"MPEG-2 Program Stream",
"MPEG-2 Transport Stream",
"MPEG-1 System Stream",
"MPEG-2 DVD-compatible Stream",
"MPEG-1 VCD-compatible Stream",
"MPEG-2 SVCD-compatible Stream",
NULL
};
static const char *mpeg_audio_encoding_l2_ac3[] = {
"",
"MPEG-1/2 Layer II",
"",
"",
"AC-3",
NULL
};
switch (id) {
case V4L2_CID_MPEG_STREAM_TYPE:
return (p->capabilities & CX2341X_CAP_HAS_TS) ?
mpeg_stream_type_with_ts : mpeg_stream_type_without_ts;
case V4L2_CID_MPEG_AUDIO_ENCODING:
return (p->capabilities & CX2341X_CAP_HAS_AC3) ?
mpeg_audio_encoding_l2_ac3 : v4l2_ctrl_get_menu(id);
case V4L2_CID_MPEG_AUDIO_L1_BITRATE:
case V4L2_CID_MPEG_AUDIO_L3_BITRATE:
return NULL;
case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE:
case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE:
case V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE:
case V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE:
case V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE:
return cx2341x_get_menu(id);
default:
return v4l2_ctrl_get_menu(id);
}
}
EXPORT_SYMBOL(cx2341x_ctrl_get_menu);
static void cx2341x_calc_audio_properties(struct cx2341x_mpeg_params *params)
{
params->audio_properties =
(params->audio_sampling_freq << 0) |
(params->audio_mode << 8) |
(params->audio_mode_extension << 10) |
(((params->audio_emphasis == V4L2_MPEG_AUDIO_EMPHASIS_CCITT_J17)
? 3 : params->audio_emphasis) << 12) |
(params->audio_crc << 14);
if ((params->capabilities & CX2341X_CAP_HAS_AC3) &&
params->audio_encoding == V4L2_MPEG_AUDIO_ENCODING_AC3) {
params->audio_properties |=
/* Not sure if this MPEG Layer II setting is required */
((3 - V4L2_MPEG_AUDIO_ENCODING_LAYER_2) << 2) |
(params->audio_ac3_bitrate << 4) |
(CX2341X_AUDIO_ENCODING_METHOD_AC3 << 28);
} else {
/* Assuming MPEG Layer II */
params->audio_properties |=
((3 - params->audio_encoding) << 2) |
((1 + params->audio_l2_bitrate) << 4);
}
}
int cx2341x_ext_ctrls(struct cx2341x_mpeg_params *params, int busy,
struct v4l2_ext_controls *ctrls, unsigned int cmd)
{
int err = 0;
int i;
if (cmd == VIDIOC_G_EXT_CTRLS) {
for (i = 0; i < ctrls->count; i++) {
struct v4l2_ext_control *ctrl = ctrls->controls + i;
err = cx2341x_get_ctrl(params, ctrl);
if (err) {
ctrls->error_idx = i;
break;
}
}
return err;
}
for (i = 0; i < ctrls->count; i++) {
struct v4l2_ext_control *ctrl = ctrls->controls + i;
struct v4l2_queryctrl qctrl;
const char * const *menu_items = NULL;
qctrl.id = ctrl->id;
err = cx2341x_ctrl_query(params, &qctrl);
if (err)
break;
if (qctrl.type == V4L2_CTRL_TYPE_MENU)
menu_items = cx2341x_ctrl_get_menu(params, qctrl.id);
err = v4l2_ctrl_check(ctrl, &qctrl, menu_items);
if (err)
break;
err = cx2341x_set_ctrl(params, busy, ctrl);
if (err)
break;
}
if (err == 0 &&
params->video_bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR &&
params->video_bitrate_peak < params->video_bitrate) {
err = -ERANGE;
ctrls->error_idx = ctrls->count;
}
if (err)
ctrls->error_idx = i;
else
cx2341x_calc_audio_properties(params);
return err;
}
EXPORT_SYMBOL(cx2341x_ext_ctrls);
void cx2341x_fill_defaults(struct cx2341x_mpeg_params *p)
{
*p = default_params;
cx2341x_calc_audio_properties(p);
}
EXPORT_SYMBOL(cx2341x_fill_defaults);
static int cx2341x_api(void *priv, cx2341x_mbox_func func,
u32 cmd, int args, ...)
{
u32 data[CX2341X_MBOX_MAX_DATA];
va_list vargs;
int i;
va_start(vargs, args);
for (i = 0; i < args; i++)
data[i] = va_arg(vargs, int);
va_end(vargs);
return func(priv, cmd, args, 0, data);
}
#define NEQ(field) (old->field != new->field)
int cx2341x_update(void *priv, cx2341x_mbox_func func,
const struct cx2341x_mpeg_params *old,
const struct cx2341x_mpeg_params *new)
{
static int mpeg_stream_type[] = {
0, /* MPEG-2 PS */
1, /* MPEG-2 TS */
2, /* MPEG-1 SS */
14, /* DVD */
11, /* VCD */
12, /* SVCD */
};
int err = 0;
int force = (old == NULL);
u16 temporal = new->video_temporal_filter;
cx2341x_api(priv, func, CX2341X_ENC_SET_OUTPUT_PORT, 2, new->port, 0);
if (force || NEQ(is_50hz)) {
err = cx2341x_api(priv, func, CX2341X_ENC_SET_FRAME_RATE, 1,
new->is_50hz);
if (err) return err;
}
if (force || NEQ(width) || NEQ(height) || NEQ(video_encoding)) {
u16 w = new->width;
u16 h = new->height;
if (new->video_encoding == V4L2_MPEG_VIDEO_ENCODING_MPEG_1) {
w /= 2;
h /= 2;
}
err = cx2341x_api(priv, func, CX2341X_ENC_SET_FRAME_SIZE, 2,
h, w);
if (err) return err;
}
if (force || NEQ(stream_type)) {
err = cx2341x_api(priv, func, CX2341X_ENC_SET_STREAM_TYPE, 1,
mpeg_stream_type[new->stream_type]);
if (err) return err;
}
if (force || NEQ(video_aspect)) {
err = cx2341x_api(priv, func, CX2341X_ENC_SET_ASPECT_RATIO, 1,
1 + new->video_aspect);
if (err) return err;
}
if (force || NEQ(video_b_frames) || NEQ(video_gop_size)) {
err = cx2341x_api(priv, func, CX2341X_ENC_SET_GOP_PROPERTIES, 2,
new->video_gop_size, new->video_b_frames + 1);
if (err) return err;
}
if (force || NEQ(video_gop_closure)) {
err = cx2341x_api(priv, func, CX2341X_ENC_SET_GOP_CLOSURE, 1,
new->video_gop_closure);
if (err) return err;
}
if (force || NEQ(audio_properties)) {
err = cx2341x_api(priv, func, CX2341X_ENC_SET_AUDIO_PROPERTIES,
1, new->audio_properties);
if (err) return err;
}
if (force || NEQ(audio_mute)) {
err = cx2341x_api(priv, func, CX2341X_ENC_MUTE_AUDIO, 1,
new->audio_mute);
if (err) return err;
}
if (force || NEQ(video_bitrate_mode) || NEQ(video_bitrate) ||
NEQ(video_bitrate_peak)) {
err = cx2341x_api(priv, func, CX2341X_ENC_SET_BIT_RATE, 5,
new->video_bitrate_mode, new->video_bitrate,
new->video_bitrate_peak / 400, 0, 0);
if (err) return err;
}
if (force || NEQ(video_spatial_filter_mode) ||
NEQ(video_temporal_filter_mode) ||
NEQ(video_median_filter_type)) {
err = cx2341x_api(priv, func, CX2341X_ENC_SET_DNR_FILTER_MODE,
2, new->video_spatial_filter_mode |
(new->video_temporal_filter_mode << 1),
new->video_median_filter_type);
if (err) return err;
}
if (force || NEQ(video_luma_median_filter_bottom) ||
NEQ(video_luma_median_filter_top) ||
NEQ(video_chroma_median_filter_bottom) ||
NEQ(video_chroma_median_filter_top)) {
err = cx2341x_api(priv, func, CX2341X_ENC_SET_CORING_LEVELS, 4,
new->video_luma_median_filter_bottom,
new->video_luma_median_filter_top,
new->video_chroma_median_filter_bottom,
new->video_chroma_median_filter_top);
if (err) return err;
}
if (force || NEQ(video_luma_spatial_filter_type) ||
NEQ(video_chroma_spatial_filter_type)) {
err = cx2341x_api(priv, func,
CX2341X_ENC_SET_SPATIAL_FILTER_TYPE,
2, new->video_luma_spatial_filter_type,
new->video_chroma_spatial_filter_type);
if (err) return err;
}
if (force || NEQ(video_spatial_filter) ||
old->video_temporal_filter != temporal) {
err = cx2341x_api(priv, func, CX2341X_ENC_SET_DNR_FILTER_PROPS,
2, new->video_spatial_filter, temporal);
if (err) return err;
}
if (force || NEQ(video_temporal_decimation)) {
err = cx2341x_api(priv, func, CX2341X_ENC_SET_FRAME_DROP_RATE,
1, new->video_temporal_decimation);
if (err) return err;
}
if (force || NEQ(video_mute) ||
(new->video_mute && NEQ(video_mute_yuv))) {
err = cx2341x_api(priv, func, CX2341X_ENC_MUTE_VIDEO, 1,
new->video_mute | (new->video_mute_yuv << 8));
if (err) return err;
}
if (force || NEQ(stream_insert_nav_packets)) {
err = cx2341x_api(priv, func, CX2341X_ENC_MISC, 2,
7, new->stream_insert_nav_packets);
if (err) return err;
}
return 0;
}
EXPORT_SYMBOL(cx2341x_update);
static const char *cx2341x_menu_item(const struct cx2341x_mpeg_params *p, u32 id)
{
const char * const *menu = cx2341x_ctrl_get_menu(p, id);
struct v4l2_ext_control ctrl;
if (menu == NULL)
goto invalid;
ctrl.id = id;
if (cx2341x_get_ctrl(p, &ctrl))
goto invalid;
while (ctrl.value-- && *menu) menu++;
if (*menu == NULL)
goto invalid;
return *menu;
invalid:
return "<invalid>";
}
void cx2341x_log_status(const struct cx2341x_mpeg_params *p, const char *prefix)
{
int is_mpeg1 = p->video_encoding == V4L2_MPEG_VIDEO_ENCODING_MPEG_1;
/* Stream */
printk(KERN_INFO "%s: Stream: %s",
prefix,
cx2341x_menu_item(p, V4L2_CID_MPEG_STREAM_TYPE));
if (p->stream_insert_nav_packets)
printk(" (with navigation packets)");
printk("\n");
printk(KERN_INFO "%s: VBI Format: %s\n",
prefix,
cx2341x_menu_item(p, V4L2_CID_MPEG_STREAM_VBI_FMT));
/* Video */
printk(KERN_INFO "%s: Video: %dx%d, %d fps%s\n",
prefix,
p->width / (is_mpeg1 ? 2 : 1), p->height / (is_mpeg1 ? 2 : 1),
p->is_50hz ? 25 : 30,
(p->video_mute) ? " (muted)" : "");
printk(KERN_INFO "%s: Video: %s, %s, %s, %d",
prefix,
cx2341x_menu_item(p, V4L2_CID_MPEG_VIDEO_ENCODING),
cx2341x_menu_item(p, V4L2_CID_MPEG_VIDEO_ASPECT),
cx2341x_menu_item(p, V4L2_CID_MPEG_VIDEO_BITRATE_MODE),
p->video_bitrate);
if (p->video_bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR)
printk(", Peak %d", p->video_bitrate_peak);
printk("\n");
printk(KERN_INFO
"%s: Video: GOP Size %d, %d B-Frames, %sGOP Closure\n",
prefix,
p->video_gop_size, p->video_b_frames,
p->video_gop_closure ? "" : "No ");
if (p->video_temporal_decimation)
printk(KERN_INFO "%s: Video: Temporal Decimation %d\n",
prefix, p->video_temporal_decimation);
/* Audio */
printk(KERN_INFO "%s: Audio: %s, %s, %s, %s%s",
prefix,
cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ),
cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_ENCODING),
cx2341x_menu_item(p,
p->audio_encoding == V4L2_MPEG_AUDIO_ENCODING_AC3
? V4L2_CID_MPEG_AUDIO_AC3_BITRATE
: V4L2_CID_MPEG_AUDIO_L2_BITRATE),
cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_MODE),
p->audio_mute ? " (muted)" : "");
if (p->audio_mode == V4L2_MPEG_AUDIO_MODE_JOINT_STEREO)
printk(", %s", cx2341x_menu_item(p,
V4L2_CID_MPEG_AUDIO_MODE_EXTENSION));
printk(", %s, %s\n",
cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_EMPHASIS),
cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_CRC));
/* Encoding filters */
printk(KERN_INFO "%s: Spatial Filter: %s, Luma %s, Chroma %s, %d\n",
prefix,
cx2341x_menu_item(p,
V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE),
cx2341x_menu_item(p,
V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE),
cx2341x_menu_item(p,
V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE),
p->video_spatial_filter);
printk(KERN_INFO "%s: Temporal Filter: %s, %d\n",
prefix,
cx2341x_menu_item(p,
V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE),
p->video_temporal_filter);
printk(KERN_INFO
"%s: Median Filter: %s, Luma [%d, %d], Chroma [%d, %d]\n",
prefix,
cx2341x_menu_item(p,
V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE),
p->video_luma_median_filter_bottom,
p->video_luma_median_filter_top,
p->video_chroma_median_filter_bottom,
p->video_chroma_median_filter_top);
}
EXPORT_SYMBOL(cx2341x_log_status);
/********************** NEW CODE *********************/
static inline struct cx2341x_handler *to_cxhdl(struct v4l2_ctrl *ctrl)
{
return container_of(ctrl->handler, struct cx2341x_handler, hdl);
}
static int cx2341x_hdl_api(struct cx2341x_handler *hdl,
u32 cmd, int args, ...)
{
u32 data[CX2341X_MBOX_MAX_DATA];
va_list vargs;
int i;
va_start(vargs, args);
for (i = 0; i < args; i++)
data[i] = va_arg(vargs, int);
va_end(vargs);
return hdl->func(hdl->priv, cmd, args, 0, data);
}
/* ctrl->handler->lock is held, so it is safe to access cur.val */
static inline int cx2341x_neq(struct v4l2_ctrl *ctrl)
{
return ctrl && ctrl->val != ctrl->cur.val;
}
static int cx2341x_try_ctrl(struct v4l2_ctrl *ctrl)
{
struct cx2341x_handler *hdl = to_cxhdl(ctrl);
s32 val = ctrl->val;
switch (ctrl->id) {
case V4L2_CID_MPEG_VIDEO_B_FRAMES: {
/* video gop cluster */
int b = val + 1;
int gop = hdl->video_gop_size->val;
gop = b * ((gop + b - 1) / b);
/* Max GOP size = 34 */
while (gop > 34)
gop -= b;
hdl->video_gop_size->val = gop;
break;
}
case V4L2_CID_MPEG_STREAM_TYPE:
/* stream type cluster */
hdl->video_encoding->val =
(hdl->stream_type->val == V4L2_MPEG_STREAM_TYPE_MPEG1_SS ||
hdl->stream_type->val == V4L2_MPEG_STREAM_TYPE_MPEG1_VCD) ?
V4L2_MPEG_VIDEO_ENCODING_MPEG_1 :
V4L2_MPEG_VIDEO_ENCODING_MPEG_2;
if (hdl->video_encoding->val == V4L2_MPEG_VIDEO_ENCODING_MPEG_1)
/* MPEG-1 implies CBR */
hdl->video_bitrate_mode->val =
V4L2_MPEG_VIDEO_BITRATE_MODE_CBR;
/* peak bitrate shall be >= normal bitrate */
if (hdl->video_bitrate_mode->val == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR &&
hdl->video_bitrate_peak->val < hdl->video_bitrate->val)
hdl->video_bitrate_peak->val = hdl->video_bitrate->val;
break;
}
return 0;
}
static int cx2341x_s_ctrl(struct v4l2_ctrl *ctrl)
{
static const int mpeg_stream_type[] = {
0, /* MPEG-2 PS */
1, /* MPEG-2 TS */
2, /* MPEG-1 SS */
14, /* DVD */
11, /* VCD */
12, /* SVCD */
};
struct cx2341x_handler *hdl = to_cxhdl(ctrl);
s32 val = ctrl->val;
u32 props;
int err;
switch (ctrl->id) {
case V4L2_CID_MPEG_STREAM_VBI_FMT:
if (hdl->ops && hdl->ops->s_stream_vbi_fmt)
return hdl->ops->s_stream_vbi_fmt(hdl, val);
return 0;
case V4L2_CID_MPEG_VIDEO_ASPECT:
return cx2341x_hdl_api(hdl,
CX2341X_ENC_SET_ASPECT_RATIO, 1, val + 1);
case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE:
return cx2341x_hdl_api(hdl, CX2341X_ENC_SET_GOP_CLOSURE, 1, val);
case V4L2_CID_MPEG_AUDIO_MUTE:
return cx2341x_hdl_api(hdl, CX2341X_ENC_MUTE_AUDIO, 1, val);
case V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION:
return cx2341x_hdl_api(hdl,
CX2341X_ENC_SET_FRAME_DROP_RATE, 1, val);
case V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS:
return cx2341x_hdl_api(hdl, CX2341X_ENC_MISC, 2, 7, val);
case V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ:
/* audio properties cluster */
props = (hdl->audio_sampling_freq->val << 0) |
(hdl->audio_mode->val << 8) |
(hdl->audio_mode_extension->val << 10) |
(hdl->audio_crc->val << 14);
if (hdl->audio_emphasis->val == V4L2_MPEG_AUDIO_EMPHASIS_CCITT_J17)
props |= 3 << 12;
else
props |= hdl->audio_emphasis->val << 12;
if (hdl->audio_encoding->val == V4L2_MPEG_AUDIO_ENCODING_AC3) {
props |=
#if 1
/* Not sure if this MPEG Layer II setting is required */
((3 - V4L2_MPEG_AUDIO_ENCODING_LAYER_2) << 2) |
#endif
(hdl->audio_ac3_bitrate->val << 4) |
(CX2341X_AUDIO_ENCODING_METHOD_AC3 << 28);
} else {
/* Assuming MPEG Layer II */
props |=
((3 - hdl->audio_encoding->val) << 2) |
((1 + hdl->audio_l2_bitrate->val) << 4);
}
err = cx2341x_hdl_api(hdl,
CX2341X_ENC_SET_AUDIO_PROPERTIES, 1, props);
if (err)
return err;
hdl->audio_properties = props;
if (hdl->audio_ac3_bitrate) {
int is_ac3 = hdl->audio_encoding->val ==
V4L2_MPEG_AUDIO_ENCODING_AC3;
v4l2_ctrl_activate(hdl->audio_ac3_bitrate, is_ac3);
v4l2_ctrl_activate(hdl->audio_l2_bitrate, !is_ac3);
}
v4l2_ctrl_activate(hdl->audio_mode_extension,
hdl->audio_mode->val == V4L2_MPEG_AUDIO_MODE_JOINT_STEREO);
if (cx2341x_neq(hdl->audio_sampling_freq) &&
hdl->ops && hdl->ops->s_audio_sampling_freq)
return hdl->ops->s_audio_sampling_freq(hdl, hdl->audio_sampling_freq->val);
if (cx2341x_neq(hdl->audio_mode) &&
hdl->ops && hdl->ops->s_audio_mode)
return hdl->ops->s_audio_mode(hdl, hdl->audio_mode->val);
return 0;
case V4L2_CID_MPEG_VIDEO_B_FRAMES:
/* video gop cluster */
return cx2341x_hdl_api(hdl, CX2341X_ENC_SET_GOP_PROPERTIES, 2,
hdl->video_gop_size->val,
hdl->video_b_frames->val + 1);
case V4L2_CID_MPEG_STREAM_TYPE:
/* stream type cluster */
err = cx2341x_hdl_api(hdl,
CX2341X_ENC_SET_STREAM_TYPE, 1, mpeg_stream_type[val]);
if (err)
return err;
err = cx2341x_hdl_api(hdl, CX2341X_ENC_SET_BIT_RATE, 5,
hdl->video_bitrate_mode->val,
hdl->video_bitrate->val,
hdl->video_bitrate_peak->val / 400, 0, 0);
if (err)
return err;
v4l2_ctrl_activate(hdl->video_bitrate_mode,
hdl->video_encoding->val != V4L2_MPEG_VIDEO_ENCODING_MPEG_1);
v4l2_ctrl_activate(hdl->video_bitrate_peak,
hdl->video_bitrate_mode->val != V4L2_MPEG_VIDEO_BITRATE_MODE_CBR);
if (cx2341x_neq(hdl->video_encoding) &&
hdl->ops && hdl->ops->s_video_encoding)
return hdl->ops->s_video_encoding(hdl, hdl->video_encoding->val);
return 0;
case V4L2_CID_MPEG_VIDEO_MUTE:
/* video mute cluster */
return cx2341x_hdl_api(hdl, CX2341X_ENC_MUTE_VIDEO, 1,
hdl->video_mute->val |
(hdl->video_mute_yuv->val << 8));
case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE: {
int active_filter;
/* video filter mode */
err = cx2341x_hdl_api(hdl, CX2341X_ENC_SET_DNR_FILTER_MODE, 2,
hdl->video_spatial_filter_mode->val |
(hdl->video_temporal_filter_mode->val << 1),
hdl->video_median_filter_type->val);
if (err)
return err;
active_filter = hdl->video_spatial_filter_mode->val !=
V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO;
v4l2_ctrl_activate(hdl->video_spatial_filter, active_filter);
v4l2_ctrl_activate(hdl->video_luma_spatial_filter_type, active_filter);
v4l2_ctrl_activate(hdl->video_chroma_spatial_filter_type, active_filter);
active_filter = hdl->video_temporal_filter_mode->val !=
V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_AUTO;
v4l2_ctrl_activate(hdl->video_temporal_filter, active_filter);
active_filter = hdl->video_median_filter_type->val !=
V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF;
v4l2_ctrl_activate(hdl->video_luma_median_filter_bottom, active_filter);
v4l2_ctrl_activate(hdl->video_luma_median_filter_top, active_filter);
v4l2_ctrl_activate(hdl->video_chroma_median_filter_bottom, active_filter);
v4l2_ctrl_activate(hdl->video_chroma_median_filter_top, active_filter);
return 0;
}
case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE:
/* video filter type cluster */
return cx2341x_hdl_api(hdl,
CX2341X_ENC_SET_SPATIAL_FILTER_TYPE, 2,
hdl->video_luma_spatial_filter_type->val,
hdl->video_chroma_spatial_filter_type->val);
case V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER:
/* video filter cluster */
return cx2341x_hdl_api(hdl, CX2341X_ENC_SET_DNR_FILTER_PROPS, 2,
hdl->video_spatial_filter->val,
hdl->video_temporal_filter->val);
case V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP:
/* video median cluster */
return cx2341x_hdl_api(hdl, CX2341X_ENC_SET_CORING_LEVELS, 4,
hdl->video_luma_median_filter_bottom->val,
hdl->video_luma_median_filter_top->val,
hdl->video_chroma_median_filter_bottom->val,
hdl->video_chroma_median_filter_top->val);
}
return -EINVAL;
}
static const struct v4l2_ctrl_ops cx2341x_ops = {
.try_ctrl = cx2341x_try_ctrl,
.s_ctrl = cx2341x_s_ctrl,
};
static struct v4l2_ctrl *cx2341x_ctrl_new_custom(struct v4l2_ctrl_handler *hdl,
u32 id, s32 min, s32 max, s32 step, s32 def)
{
struct v4l2_ctrl_config cfg;
cx2341x_ctrl_fill(id, &cfg.name, &cfg.type, &min, &max, &step, &def, &cfg.flags);
cfg.ops = &cx2341x_ops;
cfg.id = id;
cfg.min = min;
cfg.max = max;
cfg.def = def;
if (cfg.type == V4L2_CTRL_TYPE_MENU) {
cfg.step = 0;
cfg.menu_skip_mask = step;
cfg.qmenu = cx2341x_get_menu(id);
} else {
cfg.step = step;
cfg.menu_skip_mask = 0;
}
return v4l2_ctrl_new_custom(hdl, &cfg, NULL);
}
static struct v4l2_ctrl *cx2341x_ctrl_new_std(struct v4l2_ctrl_handler *hdl,
u32 id, s32 min, s32 max, s32 step, s32 def)
{
return v4l2_ctrl_new_std(hdl, &cx2341x_ops, id, min, max, step, def);
}
static struct v4l2_ctrl *cx2341x_ctrl_new_menu(struct v4l2_ctrl_handler *hdl,
u32 id, s32 max, s32 mask, s32 def)
{
return v4l2_ctrl_new_std_menu(hdl, &cx2341x_ops, id, max, mask, def);
}
int cx2341x_handler_init(struct cx2341x_handler *cxhdl,
unsigned nr_of_controls_hint)
{
struct v4l2_ctrl_handler *hdl = &cxhdl->hdl;
u32 caps = cxhdl->capabilities;
int has_sliced_vbi = caps & CX2341X_CAP_HAS_SLICED_VBI;
int has_ac3 = caps & CX2341X_CAP_HAS_AC3;
int has_ts = caps & CX2341X_CAP_HAS_TS;
cxhdl->width = 720;
cxhdl->height = 480;
v4l2_ctrl_handler_init(hdl, nr_of_controls_hint);
/* Add controls in ascending control ID order for fastest
insertion time. */
cxhdl->stream_type = cx2341x_ctrl_new_menu(hdl,
V4L2_CID_MPEG_STREAM_TYPE,
V4L2_MPEG_STREAM_TYPE_MPEG2_SVCD, has_ts ? 0 : 2,
V4L2_MPEG_STREAM_TYPE_MPEG2_PS);
cxhdl->stream_vbi_fmt = cx2341x_ctrl_new_menu(hdl,
V4L2_CID_MPEG_STREAM_VBI_FMT,
V4L2_MPEG_STREAM_VBI_FMT_IVTV, has_sliced_vbi ? 0 : 2,
V4L2_MPEG_STREAM_VBI_FMT_NONE);
cxhdl->audio_sampling_freq = cx2341x_ctrl_new_menu(hdl,
V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ,
V4L2_MPEG_AUDIO_SAMPLING_FREQ_32000, 0,
V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000);
cxhdl->audio_encoding = cx2341x_ctrl_new_menu(hdl,
V4L2_CID_MPEG_AUDIO_ENCODING,
V4L2_MPEG_AUDIO_ENCODING_AC3, has_ac3 ? ~0x12 : ~0x2,
V4L2_MPEG_AUDIO_ENCODING_LAYER_2);
cxhdl->audio_l2_bitrate = cx2341x_ctrl_new_menu(hdl,
V4L2_CID_MPEG_AUDIO_L2_BITRATE,
V4L2_MPEG_AUDIO_L2_BITRATE_384K, 0x1ff,
V4L2_MPEG_AUDIO_L2_BITRATE_224K);
cxhdl->audio_mode = cx2341x_ctrl_new_menu(hdl,
V4L2_CID_MPEG_AUDIO_MODE,
V4L2_MPEG_AUDIO_MODE_MONO, 0,
V4L2_MPEG_AUDIO_MODE_STEREO);
cxhdl->audio_mode_extension = cx2341x_ctrl_new_menu(hdl,
V4L2_CID_MPEG_AUDIO_MODE_EXTENSION,
V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_16, 0,
V4L2_MPEG_AUDIO_MODE_EXTENSION_BOUND_4);
cxhdl->audio_emphasis = cx2341x_ctrl_new_menu(hdl,
V4L2_CID_MPEG_AUDIO_EMPHASIS,
V4L2_MPEG_AUDIO_EMPHASIS_CCITT_J17, 0,
V4L2_MPEG_AUDIO_EMPHASIS_NONE);
cxhdl->audio_crc = cx2341x_ctrl_new_menu(hdl,
V4L2_CID_MPEG_AUDIO_CRC,
V4L2_MPEG_AUDIO_CRC_CRC16, 0,
V4L2_MPEG_AUDIO_CRC_NONE);
cx2341x_ctrl_new_std(hdl, V4L2_CID_MPEG_AUDIO_MUTE, 0, 1, 1, 0);
if (has_ac3)
cxhdl->audio_ac3_bitrate = cx2341x_ctrl_new_menu(hdl,
V4L2_CID_MPEG_AUDIO_AC3_BITRATE,
V4L2_MPEG_AUDIO_AC3_BITRATE_448K, 0x03,
V4L2_MPEG_AUDIO_AC3_BITRATE_224K);
cxhdl->video_encoding = cx2341x_ctrl_new_menu(hdl,
V4L2_CID_MPEG_VIDEO_ENCODING,
V4L2_MPEG_VIDEO_ENCODING_MPEG_2, 0,
V4L2_MPEG_VIDEO_ENCODING_MPEG_2);
cx2341x_ctrl_new_menu(hdl,
V4L2_CID_MPEG_VIDEO_ASPECT,
V4L2_MPEG_VIDEO_ASPECT_221x100, 0,
V4L2_MPEG_VIDEO_ASPECT_4x3);
cxhdl->video_b_frames = cx2341x_ctrl_new_std(hdl,
V4L2_CID_MPEG_VIDEO_B_FRAMES, 0, 33, 1, 2);
cxhdl->video_gop_size = cx2341x_ctrl_new_std(hdl,
V4L2_CID_MPEG_VIDEO_GOP_SIZE,
1, 34, 1, cxhdl->is_50hz ? 12 : 15);
cx2341x_ctrl_new_std(hdl, V4L2_CID_MPEG_VIDEO_GOP_CLOSURE, 0, 1, 1, 1);
cxhdl->video_bitrate_mode = cx2341x_ctrl_new_menu(hdl,
V4L2_CID_MPEG_VIDEO_BITRATE_MODE,
V4L2_MPEG_VIDEO_BITRATE_MODE_CBR, 0,
V4L2_MPEG_VIDEO_BITRATE_MODE_VBR);
cxhdl->video_bitrate = cx2341x_ctrl_new_std(hdl,
V4L2_CID_MPEG_VIDEO_BITRATE,
0, 27000000, 1, 6000000);
cxhdl->video_bitrate_peak = cx2341x_ctrl_new_std(hdl,
V4L2_CID_MPEG_VIDEO_BITRATE_PEAK,
0, 27000000, 1, 8000000);
cx2341x_ctrl_new_std(hdl,
V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION, 0, 255, 1, 0);
cxhdl->video_mute = cx2341x_ctrl_new_std(hdl,
V4L2_CID_MPEG_VIDEO_MUTE, 0, 1, 1, 0);
cxhdl->video_mute_yuv = cx2341x_ctrl_new_std(hdl,
V4L2_CID_MPEG_VIDEO_MUTE_YUV, 0, 0xffffff, 1, 0x008080);
/* CX23415/6 specific */
cxhdl->video_spatial_filter_mode = cx2341x_ctrl_new_custom(hdl,
V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE,
V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL,
V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_AUTO, 0,
V4L2_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE_MANUAL);
cxhdl->video_spatial_filter = cx2341x_ctrl_new_custom(hdl,
V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER,
0, 15, 1, 0);
cxhdl->video_luma_spatial_filter_type = cx2341x_ctrl_new_custom(hdl,
V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE,
V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_OFF,
V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_2D_SYM_NON_SEPARABLE,
0,
V4L2_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE_1D_HOR);
cxhdl->video_chroma_spatial_filter_type = cx2341x_ctrl_new_custom(hdl,
V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE,
V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_OFF,
V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_1D_HOR,
0,
V4L2_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE_1D_HOR);
cxhdl->video_temporal_filter_mode = cx2341x_ctrl_new_custom(hdl,
V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE,
V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_MANUAL,
V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_AUTO,
0,
V4L2_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE_MANUAL);
cxhdl->video_temporal_filter = cx2341x_ctrl_new_custom(hdl,
V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER,
0, 31, 1, 8);
cxhdl->video_median_filter_type = cx2341x_ctrl_new_custom(hdl,
V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE,
V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF,
V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_DIAG,
0,
V4L2_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE_OFF);
cxhdl->video_luma_median_filter_bottom = cx2341x_ctrl_new_custom(hdl,
V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM,
0, 255, 1, 0);
cxhdl->video_luma_median_filter_top = cx2341x_ctrl_new_custom(hdl,
V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP,
0, 255, 1, 255);
cxhdl->video_chroma_median_filter_bottom = cx2341x_ctrl_new_custom(hdl,
V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM,
0, 255, 1, 0);
cxhdl->video_chroma_median_filter_top = cx2341x_ctrl_new_custom(hdl,
V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP,
0, 255, 1, 255);
cx2341x_ctrl_new_custom(hdl, V4L2_CID_MPEG_CX2341X_STREAM_INSERT_NAV_PACKETS,
0, 1, 1, 0);
if (hdl->error) {
int err = hdl->error;
v4l2_ctrl_handler_free(hdl);
return err;
}
v4l2_ctrl_cluster(8, &cxhdl->audio_sampling_freq);
v4l2_ctrl_cluster(2, &cxhdl->video_b_frames);
v4l2_ctrl_cluster(5, &cxhdl->stream_type);
v4l2_ctrl_cluster(2, &cxhdl->video_mute);
v4l2_ctrl_cluster(3, &cxhdl->video_spatial_filter_mode);
v4l2_ctrl_cluster(2, &cxhdl->video_luma_spatial_filter_type);
v4l2_ctrl_cluster(2, &cxhdl->video_spatial_filter);
v4l2_ctrl_cluster(4, &cxhdl->video_luma_median_filter_top);
return 0;
}
EXPORT_SYMBOL(cx2341x_handler_init);
void cx2341x_handler_set_50hz(struct cx2341x_handler *cxhdl, int is_50hz)
{
cxhdl->is_50hz = is_50hz;
cxhdl->video_gop_size->default_value = cxhdl->is_50hz ? 12 : 15;
}
EXPORT_SYMBOL(cx2341x_handler_set_50hz);
int cx2341x_handler_setup(struct cx2341x_handler *cxhdl)
{
int h = cxhdl->height;
int w = cxhdl->width;
int err;
err = cx2341x_hdl_api(cxhdl, CX2341X_ENC_SET_OUTPUT_PORT, 2, cxhdl->port, 0);
if (err)
return err;
err = cx2341x_hdl_api(cxhdl, CX2341X_ENC_SET_FRAME_RATE, 1, cxhdl->is_50hz);
if (err)
return err;
if (v4l2_ctrl_g_ctrl(cxhdl->video_encoding) == V4L2_MPEG_VIDEO_ENCODING_MPEG_1) {
w /= 2;
h /= 2;
}
err = cx2341x_hdl_api(cxhdl, CX2341X_ENC_SET_FRAME_SIZE, 2, h, w);
if (err)
return err;
return v4l2_ctrl_handler_setup(&cxhdl->hdl);
}
EXPORT_SYMBOL(cx2341x_handler_setup);
void cx2341x_handler_set_busy(struct cx2341x_handler *cxhdl, int busy)
{
v4l2_ctrl_grab(cxhdl->audio_sampling_freq, busy);
v4l2_ctrl_grab(cxhdl->audio_encoding, busy);
v4l2_ctrl_grab(cxhdl->audio_l2_bitrate, busy);
v4l2_ctrl_grab(cxhdl->audio_ac3_bitrate, busy);
v4l2_ctrl_grab(cxhdl->stream_vbi_fmt, busy);
v4l2_ctrl_grab(cxhdl->stream_type, busy);
v4l2_ctrl_grab(cxhdl->video_bitrate_mode, busy);
v4l2_ctrl_grab(cxhdl->video_bitrate, busy);
v4l2_ctrl_grab(cxhdl->video_bitrate_peak, busy);
}
EXPORT_SYMBOL(cx2341x_handler_set_busy);
| gpl-2.0 |
wyldstallyns/M4CH3T3_kernel_m8 | drivers/net/arcnet/arc-rawmode.c | 13283 | 5295 | /*
* Linux ARCnet driver - "raw mode" packet encapsulation (no soft headers)
*
* Written 1994-1999 by Avery Pennarun.
* Derived from skeleton.c by Donald Becker.
*
* Special thanks to Contemporary Controls, Inc. (www.ccontrols.com)
* for sponsoring the further development of this driver.
*
* **********************
*
* The original copyright of skeleton.c was as follows:
*
* skeleton.c Written 1993 by Donald Becker.
* Copyright 1993 United States Government as represented by the
* Director, National Security Agency. This software may only be used
* and distributed according to the terms of the GNU General Public License as
* modified by SRC, incorporated herein by reference.
*
* **********************
*
* For more details, see drivers/net/arcnet.c
*
* **********************
*/
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/init.h>
#include <linux/if_arp.h>
#include <net/arp.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/arcdevice.h>
#define VERSION "arcnet: raw mode (`r') encapsulation support loaded.\n"
static void rx(struct net_device *dev, int bufnum,
struct archdr *pkthdr, int length);
static int build_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, uint8_t daddr);
static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
int bufnum);
static struct ArcProto rawmode_proto =
{
.suffix = 'r',
.mtu = XMTU,
.rx = rx,
.build_header = build_header,
.prepare_tx = prepare_tx,
.continue_tx = NULL,
.ack_tx = NULL
};
static int __init arcnet_raw_init(void)
{
int count;
printk(VERSION);
for (count = 0; count < 256; count++)
if (arc_proto_map[count] == arc_proto_default)
arc_proto_map[count] = &rawmode_proto;
/* for raw mode, we only set the bcast proto if there's no better one */
if (arc_bcast_proto == arc_proto_default)
arc_bcast_proto = &rawmode_proto;
arc_proto_default = &rawmode_proto;
return 0;
}
static void __exit arcnet_raw_exit(void)
{
arcnet_unregister_proto(&rawmode_proto);
}
module_init(arcnet_raw_init);
module_exit(arcnet_raw_exit);
MODULE_LICENSE("GPL");
/* packet receiver */
static void rx(struct net_device *dev, int bufnum,
struct archdr *pkthdr, int length)
{
struct arcnet_local *lp = netdev_priv(dev);
struct sk_buff *skb;
struct archdr *pkt = pkthdr;
int ofs;
BUGMSG(D_DURING, "it's a raw packet (length=%d)\n", length);
if (length > MTU)
ofs = 512 - length;
else
ofs = 256 - length;
skb = alloc_skb(length + ARC_HDR_SIZE, GFP_ATOMIC);
if (skb == NULL) {
BUGMSG(D_NORMAL, "Memory squeeze, dropping packet.\n");
dev->stats.rx_dropped++;
return;
}
skb_put(skb, length + ARC_HDR_SIZE);
skb->dev = dev;
pkt = (struct archdr *) skb->data;
skb_reset_mac_header(skb);
skb_pull(skb, ARC_HDR_SIZE);
/* up to sizeof(pkt->soft) has already been copied from the card */
memcpy(pkt, pkthdr, sizeof(struct archdr));
if (length > sizeof(pkt->soft))
lp->hw.copy_from_card(dev, bufnum, ofs + sizeof(pkt->soft),
pkt->soft.raw + sizeof(pkt->soft),
length - sizeof(pkt->soft));
BUGLVL(D_SKB) arcnet_dump_skb(dev, skb, "rx");
skb->protocol = cpu_to_be16(ETH_P_ARCNET);
netif_rx(skb);
}
/*
* Create the ARCnet hard/soft headers for raw mode.
* There aren't any soft headers in raw mode - not even the protocol id.
*/
static int build_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, uint8_t daddr)
{
int hdr_size = ARC_HDR_SIZE;
struct archdr *pkt = (struct archdr *) skb_push(skb, hdr_size);
/*
* Set the source hardware address.
*
* This is pretty pointless for most purposes, but it can help in
* debugging. ARCnet does not allow us to change the source address in
* the actual packet sent)
*/
pkt->hard.source = *dev->dev_addr;
/* see linux/net/ethernet/eth.c to see where I got the following */
if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) {
/*
* FIXME: fill in the last byte of the dest ipaddr here to better
* comply with RFC1051 in "noarp" mode.
*/
pkt->hard.dest = 0;
return hdr_size;
}
/* otherwise, just fill it in and go! */
pkt->hard.dest = daddr;
return hdr_size; /* success */
}
static int prepare_tx(struct net_device *dev, struct archdr *pkt, int length,
int bufnum)
{
struct arcnet_local *lp = netdev_priv(dev);
struct arc_hardware *hard = &pkt->hard;
int ofs;
BUGMSG(D_DURING, "prepare_tx: txbufs=%d/%d/%d\n",
lp->next_tx, lp->cur_tx, bufnum);
length -= ARC_HDR_SIZE; /* hard header is not included in packet length */
if (length > XMTU) {
/* should never happen! other people already check for this. */
BUGMSG(D_NORMAL, "Bug! prepare_tx with size %d (> %d)\n",
length, XMTU);
length = XMTU;
}
if (length >= MinTU) {
hard->offset[0] = 0;
hard->offset[1] = ofs = 512 - length;
} else if (length > MTU) {
hard->offset[0] = 0;
hard->offset[1] = ofs = 512 - length - 3;
} else
hard->offset[0] = ofs = 256 - length;
BUGMSG(D_DURING, "prepare_tx: length=%d ofs=%d\n",
length,ofs);
lp->hw.copy_to_card(dev, bufnum, 0, hard, ARC_HDR_SIZE);
lp->hw.copy_to_card(dev, bufnum, ofs, &pkt->soft, length);
lp->lastload_dest = hard->dest;
return 1; /* done */
}
| gpl-2.0 |
OneRom/kernel_moto_shamu | arch/mips/lasat/lasat_board.c | 14051 | 7154 | /*
* Thomas Horsten <thh@lasat.com>
* Copyright (C) 2000 LASAT Networks A/S.
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Routines specific to the LASAT boards
*/
#include <linux/types.h>
#include <linux/crc32.h>
#include <asm/lasat/lasat.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/mutex.h>
#include <asm/addrspace.h>
#include "at93c.h"
/* New model description table */
#include "lasat_models.h"
static DEFINE_MUTEX(lasat_eeprom_mutex);
#define EEPROM_CRC(data, len) (~crc32(~0, data, len))
struct lasat_info lasat_board_info;
int EEPROMRead(unsigned int pos, unsigned char *data, int len)
{
int i;
for (i = 0; i < len; i++)
*data++ = at93c_read(pos++);
return 0;
}
int EEPROMWrite(unsigned int pos, unsigned char *data, int len)
{
int i;
for (i = 0; i < len; i++)
at93c_write(pos++, *data++);
return 0;
}
static void init_flash_sizes(void)
{
unsigned long *lb = lasat_board_info.li_flashpart_base;
unsigned long *ls = lasat_board_info.li_flashpart_size;
int i;
ls[LASAT_MTD_BOOTLOADER] = 0x40000;
ls[LASAT_MTD_SERVICE] = 0xC0000;
ls[LASAT_MTD_NORMAL] = 0x100000;
if (!IS_LASAT_200()) {
lasat_board_info.li_flash_base = 0x1e000000;
lb[LASAT_MTD_BOOTLOADER] = 0x1e400000;
if (lasat_board_info.li_flash_size > 0x200000) {
ls[LASAT_MTD_CONFIG] = 0x100000;
ls[LASAT_MTD_FS] = 0x500000;
}
} else {
lasat_board_info.li_flash_base = 0x10000000;
if (lasat_board_info.li_flash_size < 0x1000000) {
lb[LASAT_MTD_BOOTLOADER] = 0x10000000;
ls[LASAT_MTD_CONFIG] = 0x100000;
if (lasat_board_info.li_flash_size >= 0x400000)
ls[LASAT_MTD_FS] =
lasat_board_info.li_flash_size - 0x300000;
}
}
for (i = 1; i < LASAT_MTD_LAST; i++)
lb[i] = lb[i-1] + ls[i-1];
}
int lasat_init_board_info(void)
{
int c;
unsigned long crc;
unsigned long cfg0, cfg1;
const struct product_info *ppi;
int i_n_base_models = N_BASE_MODELS;
const char * const * i_txt_base_models = txt_base_models;
int i_n_prids = N_PRIDS;
memset(&lasat_board_info, 0, sizeof(lasat_board_info));
/* First read the EEPROM info */
EEPROMRead(0, (unsigned char *)&lasat_board_info.li_eeprom_info,
sizeof(struct lasat_eeprom_struct));
/* Check the CRC */
crc = EEPROM_CRC((unsigned char *)(&lasat_board_info.li_eeprom_info),
sizeof(struct lasat_eeprom_struct) - 4);
if (crc != lasat_board_info.li_eeprom_info.crc32) {
printk(KERN_WARNING "WARNING...\nWARNING...\nEEPROM CRC does "
"not match calculated, attempting to soldier on...\n");
}
if (lasat_board_info.li_eeprom_info.version != LASAT_EEPROM_VERSION) {
printk(KERN_WARNING "WARNING...\nWARNING...\nEEPROM version "
"%d, wanted version %d, attempting to soldier on...\n",
(unsigned int)lasat_board_info.li_eeprom_info.version,
LASAT_EEPROM_VERSION);
}
cfg0 = lasat_board_info.li_eeprom_info.cfg[0];
cfg1 = lasat_board_info.li_eeprom_info.cfg[1];
if (LASAT_W0_DSCTYPE(cfg0) != 1) {
printk(KERN_WARNING "WARNING...\nWARNING...\n"
"Invalid configuration read from EEPROM, attempting to "
"soldier on...");
}
/* We have a valid configuration */
switch (LASAT_W0_SDRAMBANKSZ(cfg0)) {
case 0:
lasat_board_info.li_memsize = 0x0800000;
break;
case 1:
lasat_board_info.li_memsize = 0x1000000;
break;
case 2:
lasat_board_info.li_memsize = 0x2000000;
break;
case 3:
lasat_board_info.li_memsize = 0x4000000;
break;
case 4:
lasat_board_info.li_memsize = 0x8000000;
break;
default:
lasat_board_info.li_memsize = 0;
}
switch (LASAT_W0_SDRAMBANKS(cfg0)) {
case 0:
break;
case 1:
lasat_board_info.li_memsize *= 2;
break;
default:
break;
}
switch (LASAT_W0_BUSSPEED(cfg0)) {
case 0x0:
lasat_board_info.li_bus_hz = 60000000;
break;
case 0x1:
lasat_board_info.li_bus_hz = 66000000;
break;
case 0x2:
lasat_board_info.li_bus_hz = 66666667;
break;
case 0x3:
lasat_board_info.li_bus_hz = 80000000;
break;
case 0x4:
lasat_board_info.li_bus_hz = 83333333;
break;
case 0x5:
lasat_board_info.li_bus_hz = 100000000;
break;
}
switch (LASAT_W0_CPUCLK(cfg0)) {
case 0x0:
lasat_board_info.li_cpu_hz =
lasat_board_info.li_bus_hz;
break;
case 0x1:
lasat_board_info.li_cpu_hz =
lasat_board_info.li_bus_hz +
(lasat_board_info.li_bus_hz >> 1);
break;
case 0x2:
lasat_board_info.li_cpu_hz =
lasat_board_info.li_bus_hz +
lasat_board_info.li_bus_hz;
break;
case 0x3:
lasat_board_info.li_cpu_hz =
lasat_board_info.li_bus_hz +
lasat_board_info.li_bus_hz +
(lasat_board_info.li_bus_hz >> 1);
break;
case 0x4:
lasat_board_info.li_cpu_hz =
lasat_board_info.li_bus_hz +
lasat_board_info.li_bus_hz +
lasat_board_info.li_bus_hz;
break;
}
/* Flash size */
switch (LASAT_W1_FLASHSIZE(cfg1)) {
case 0:
lasat_board_info.li_flash_size = 0x200000;
break;
case 1:
lasat_board_info.li_flash_size = 0x400000;
break;
case 2:
lasat_board_info.li_flash_size = 0x800000;
break;
case 3:
lasat_board_info.li_flash_size = 0x1000000;
break;
case 4:
lasat_board_info.li_flash_size = 0x2000000;
break;
}
init_flash_sizes();
lasat_board_info.li_bmid = LASAT_W0_BMID(cfg0);
lasat_board_info.li_prid = lasat_board_info.li_eeprom_info.prid;
if (lasat_board_info.li_prid == 0xffff || lasat_board_info.li_prid == 0)
lasat_board_info.li_prid = lasat_board_info.li_bmid;
/* Base model stuff */
if (lasat_board_info.li_bmid > i_n_base_models)
lasat_board_info.li_bmid = i_n_base_models;
strcpy(lasat_board_info.li_bmstr,
i_txt_base_models[lasat_board_info.li_bmid]);
/* Product ID dependent values */
c = lasat_board_info.li_prid;
if (c >= i_n_prids) {
strcpy(lasat_board_info.li_namestr, "Unknown Model");
strcpy(lasat_board_info.li_typestr, "Unknown Type");
} else {
ppi = &vendor_info_table[0].vi_product_info[c];
strcpy(lasat_board_info.li_namestr, ppi->pi_name);
if (ppi->pi_type)
strcpy(lasat_board_info.li_typestr, ppi->pi_type);
else
sprintf(lasat_board_info.li_typestr, "%d", 10 * c);
}
return 0;
}
void lasat_write_eeprom_info(void)
{
unsigned long crc;
mutex_lock(&lasat_eeprom_mutex);
/* Generate the CRC */
crc = EEPROM_CRC((unsigned char *)(&lasat_board_info.li_eeprom_info),
sizeof(struct lasat_eeprom_struct) - 4);
lasat_board_info.li_eeprom_info.crc32 = crc;
/* Write the EEPROM info */
EEPROMWrite(0, (unsigned char *)&lasat_board_info.li_eeprom_info,
sizeof(struct lasat_eeprom_struct));
mutex_unlock(&lasat_eeprom_mutex);
}
| gpl-2.0 |
utkaar099/m7ul_kernel | drivers/media/video/msm/yushanII/ilp0100_ST_debugging.c | 228 | 34704 | /*******************************************************************************
################################################################################
# (C) STMicroelectronics 2012
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 and only version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#------------------------------------------------------------------------------
# Imaging Division
################################################################################
********************************************************************************/
#include "ilp0100_ST_debugging.h"
#define ILP0100_DEBUG_NONE 0
#define ILP0100_DEBUG_BYTE 1
#define ILP0100_DEBUG_BOOL 1
#define ILP0100_DEBUG_UINT8 1
#define ILP0100_DEBUG_UINT16 2
#define ILP0100_DEBUG_UINT32 3
#define ILP0100_DEBUG_FLOAT 4
#ifdef ILP0100_DEBUG
#define DUMP_PARAM_STRUCT(ParamType, Param) Ilp0100_dumpParameter(#ParamType, sizeof(ParamType), Param, ILP0100_DEBUG_NONE)
#define DUMP_PARAM(ParamName, Param, ParamType) Ilp0100_dumpParameter(#ParamName, 1, Param, ParamType)
#define DUMP_PARAM_PTR(ParamName, Param, ParamSize, ParamType) Ilp0100_dumpParameter(#ParamName, ParamSize, Param, ParamType)
#define DUMP_STRUCTPARAM(StructType, StructName, ParamName, ParamType) Ilp0100_dumpParameter(#ParamName, 1, (uint8_t *)&(((StructType*)StructName)->ParamName), ParamType);
#define DUMP_STRUCTPARAM_PTR(StructType, StructName, ParamName, ParamSize, ParamType) Ilp0100_dumpParameter(#ParamName, ParamSize, (uint8_t *)(((StructType*)StructName)->ParamName) , ParamType);
#define DUMP_STRUCTPARAM_ARRAY(StructType, StructName, ParamName, ParamSize, ParamType) Ilp0100_dumpParameter(#ParamName, ParamSize, (uint8_t *)(&(((StructType*)StructName)->ParamName[0])) , ParamType);
#define ILP0100_DEBUG_TEST_STR_EQUALITY(Str1, Str2, UINT8_VAR) Ilp0100_core_strCmpr(Str1, Str2, &UINT8_VAR);if(UINT8_VAR)
ilp0100_error Ilp0100_dumpParameters(const char* pFunctionName, void **pFuncArguments);
ilp0100_error Ilp0100_dumpParameter(const char* pParamName, const uint16_t ParamLength, const uint8_t *pParamValues, const uint8_t ParamType);
ilp0100_error Ilp0100_isApiCoreFunction(const char* pFunctionName, bool_t* isCoreFunc);
uint8_t* pIlp0100DebugBuffer=0;
uint32_t Ilp0100DebugLogSize=0;
bool_t Ilp0100DebugStarted=FALSE;
int8_t CurrentLevel=0;
char FunctionsLevel[20][45];
ilp0100_error Ilp0100_loggingOpen()
{
ilp0100_error Status = ILP0100_ERROR_NONE;
pIlp0100DebugBuffer = (uint8_t *)ILP0100_BUFFERCREATE(ILP0100_MAX_DEBUG_BUFFER_SIZE);
Ilp0100DebugLogSize = 0;
CurrentLevel = 0;
if(pIlp0100DebugBuffer==0) {
Status = ILP_0100_DEBUG_BUFFER_CREATION_ERROR;
}
if(Status == ILP0100_ERROR_NONE){
ILP0100_DEBUG_WRITE_IN_LOG("<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n");
ILP0100_DEBUG_WRITE_IN_LOG("<Ilp0100_Logging>\n");
} else {
return ILP_0100_DEBUG_BUFFER_CREATION_ERROR;
}
return ILP0100_ERROR_NONE;
}
ilp0100_error Ilp0100_loggingClose()
{
ilp0100_error Status = ILP0100_ERROR_NONE;
if(Ilp0100DebugStarted){
Status = Ilp0100_loggingStop();
}
if(Status == ILP0100_ERROR_NONE){
ILP0100_BUFFERFREE(pIlp0100DebugBuffer);
pIlp0100DebugBuffer=0;
}
return Status;
}
ilp0100_error Ilp0100_loggingStart(uint8_t DebugLevel)
{
ilp0100_error Status = ILP0100_ERROR_NONE;
uint32_t CurrentTime;
if(pIlp0100DebugBuffer==0) {
Status = Ilp0100_loggingOpen();
}
Ilp0100DebugStarted=TRUE;
if(Status == ILP0100_ERROR_NONE){
Status = Ilp0100_GetTimeStamp(&CurrentTime);
}
if(Status == ILP0100_ERROR_NONE){
ILP0100_DEBUG_WRITE_IN_LOG("<Ilp0100_LoggingSession TimeStamp=\"%.8d\">\n", CurrentTime);
ILP0100_DEBUG_WRITE_IN_LOG("<Ilp0100_DebugStart TimeStamp=\"%.8d\"/>\n", CurrentTime);
}
CurrentLevel = 0;
return ILP0100_ERROR_NONE;
}
ilp0100_error Ilp0100_loggingStop()
{
ilp0100_error Status = ILP0100_ERROR_NONE;
uint32_t CurrentTime;
Status = Ilp0100_GetTimeStamp(&CurrentTime);
if(Status == ILP0100_ERROR_NONE){
ILP0100_DEBUG_WRITE_IN_LOG("<Ilp0100_DebugStop TimeStamp=\"%.8d\"/>\n", CurrentTime);
ILP0100_DEBUG_WRITE_IN_LOG("</Ilp0100_LoggingSession>\n");
}
Ilp0100DebugStarted=FALSE;
return ILP0100_ERROR_NONE;
}
ilp0100_error Ilp0100_logDebugMessageStart(const char* pFunctionName)
{
ilp0100_error Status = ILP0100_ERROR_NONE;
uint32_t CurrentTime;
Status = Ilp0100_GetTimeStamp(&CurrentTime);
if(Status==ILP0100_ERROR_NONE){
if(pIlp0100DebugBuffer!=0) {
ILP0100_DEBUG_WRITE_IN_LOG("<Log_message TimeStamp=\"%.8d\" API_Function_Name=\"%s\">",CurrentTime, pFunctionName);
}
}
return Status;
}
ilp0100_error Ilp0100_logDebugMessageEnd()
{
ilp0100_error Status = ILP0100_ERROR_NONE;
if(pIlp0100DebugBuffer!=0) {
ILP0100_DEBUG_WRITE_IN_LOG("</Log_message>\n");
}
return Status;
}
ilp0100_error Ilp0100_logErrorMessageStart(const char* pFunctionName)
{
ilp0100_error Status = ILP0100_ERROR_NONE;
uint32_t CurrentTime;
Status = Ilp0100_GetTimeStamp(&CurrentTime);
if(Status==ILP0100_ERROR_NONE){
if(pIlp0100DebugBuffer!=0) {
ILP0100_DEBUG_WRITE_IN_LOG("<ERROR_MESSAGE TimeStamp=\"%.8d\" API_Function_Name=\"%s\">",CurrentTime, pFunctionName);
}
}
return Status;
}
ilp0100_error Ilp0100_logErrorMessageEnd()
{
ilp0100_error Status = ILP0100_ERROR_NONE;
if(pIlp0100DebugBuffer!=0) {
ILP0100_DEBUG_WRITE_IN_LOG("</ERROR_MESSAGE>\n");
}
return Status;
}
ilp0100_error Ilp0100_loggingGetSize(uint32_t* pLogSize)
{
*pLogSize = Ilp0100DebugLogSize+20;
return ILP0100_ERROR_NONE;
}
ilp0100_error Ilp0100_loggingReadBack(uint8_t* pDebugLog, uint32_t* pLogSize)
{
ilp0100_error Status = ILP0100_ERROR_NONE;
uint32_t CurrentLogSize;
uint32_t i;
if(pDebugLog!=0)
{
if(pIlp0100DebugBuffer!=0)
{
if(Status == ILP0100_ERROR_NONE){
CurrentLogSize=Ilp0100DebugLogSize;
ILP0100_DEBUG_WRITE_IN_LOG("</Ilp0100_Logging>\n");
}
for(i=0; i<Ilp0100DebugLogSize; i++)
{
*(pDebugLog+i) = *(pIlp0100DebugBuffer+i);
}
*pLogSize = Ilp0100DebugLogSize;
Ilp0100DebugLogSize = CurrentLogSize;
}
else
{
Status = ILP_0100_DEBUG_SESSION_NOT_OPENED;
}
}
else
{
Status = ILP_0100_DEBUG_NOT_VALID_BUFFER_ERROR;
}
return Status;
}
ilp0100_error Ilp0100_loggingFunctionStart(const char* pFunctionName, void **pFuncArguments)
{
ilp0100_error Status = ILP0100_ERROR_NONE;
uint32_t CurrentTime;
bool_t isCoreFunc;
if(Ilp0100DebugStarted){
Status = Ilp0100_GetTimeStamp(&CurrentTime);
if(Status==ILP0100_ERROR_NONE){
if(pIlp0100DebugBuffer!=0) {
ILP0100_DEBUG_WRITE_IN_LOG("<API_Function>\n");
ILP0100_DEBUG_WRITE_IN_LOG("<Exec_Start TimeStamp=\"%.8d\">\n", CurrentTime);
ILP0100_DEBUG_WRITE_IN_LOG("<API_Function_Name>%s</API_Function_Name>\n", pFunctionName);
Ilp0100_isApiCoreFunction(pFunctionName, &isCoreFunc);
if(isCoreFunc && CurrentLevel==0)
{
ILP0100_DEBUG_WRITE_IN_LOG("<Warning>\nSeems that a core function is called by User application\n</Warning>\n");
}
if((!isCoreFunc) && (CurrentLevel!=0))
{
ILP0100_DEBUG_WRITE_IN_LOG("<Warning>\nSeems that an API user interface function is called in parrallel to antoher one.\n(or API function calls another API user interface function)\n</Warning>\n");
}
ILP0100_DEBUG_WRITE_IN_LOG("<API_Input_Arguments>\n");
Ilp0100_dumpParameters(pFunctionName, pFuncArguments);
ILP0100_DEBUG_WRITE_IN_LOG("</API_Input_Arguments>\n");
ILP0100_DEBUG_WRITE_IN_LOG("</Exec_Start>\n");
ILP0100_SPRINTF(FunctionsLevel[CurrentLevel], "%s", pFunctionName);
CurrentLevel = CurrentLevel+1;
} else {
Status = ILP_0100_DEBUG_SESSION_NOT_OPENED;
}
}
}
return Status;
}
ilp0100_error Ilp0100_loggingFunctionEnd(const char* pFunctionName, ilp0100_error ReturnedValue, void **pFuncArguments)
{
ilp0100_error Status = ILP0100_ERROR_NONE;
uint32_t CurrentTime;
uint8_t FuncOK;
if(Ilp0100DebugStarted){
Status = Ilp0100_GetTimeStamp(&CurrentTime);
if(Status==ILP0100_ERROR_NONE){
if(pIlp0100DebugBuffer!=0) {
ILP0100_DEBUG_WRITE_IN_LOG("<Exec_End TimeStamp=\"%.8d\"/>\n", CurrentTime);
ILP0100_DEBUG_WRITE_IN_LOG("<API_Function_Name>%s</API_Function_Name>\n", pFunctionName);
ILP0100_DEBUG_WRITE_IN_LOG("<API_Output_Arguments>\n");
Ilp0100_dumpParameters(pFunctionName, pFuncArguments);
ILP0100_DEBUG_WRITE_IN_LOG("</API_Output_Arguments>\n");
ILP0100_DEBUG_WRITE_IN_LOG("<Returned_Value>%x</Returned_Value>\n", ReturnedValue);
if(CurrentLevel>0)
CurrentLevel = CurrentLevel-1;
Ilp0100_core_strCmpr(pFunctionName, FunctionsLevel[CurrentLevel], &FuncOK);
if(!FuncOK){
ILP0100_DEBUG_WRITE_IN_LOG("<WARNING>\nEND EXEC FUNCTION NAME DO NOT CORRESPOND TO START FUNCTION NAME.\nEXPECTED END OF: %s\nGOT END OF: %s\nPLEASE CHECK!\n</WARNING>\n",FunctionsLevel[CurrentLevel],pFunctionName);
}
ILP0100_DEBUG_WRITE_IN_LOG("</API_Function>\n");
} else {
Status = ILP_0100_DEBUG_SESSION_NOT_OPENED;
}
}
}
return Status;
}
ilp0100_error Ilp0100_loggingFunctionIlpAccess(const char* pFunctionName, uint16_t RegisterName, uint16_t Count, uint8_t *pData, ilp0100_error ReturnedValue)
{
ilp0100_error Status = ILP0100_ERROR_NONE;
uint32_t CurrentTime;
void *pFuncArguments[]={(void*)&RegisterName, (void*)&Count, (void*)pData};
if(Ilp0100DebugStarted){
Status = Ilp0100_GetTimeStamp(&CurrentTime);
if(Status==ILP0100_ERROR_NONE){
if(pIlp0100DebugBuffer!=0) {
ILP0100_DEBUG_WRITE_IN_LOG("<ILP_Access TimeStamp=\"%.8ud\" API_FuncName=\"%s\">\n", CurrentTime, pFunctionName);
Ilp0100_dumpParameters(pFunctionName, pFuncArguments);
ILP0100_DEBUG_WRITE_IN_LOG("<Returned_Value>%x</Returned_Value>\n", ReturnedValue);
ILP0100_DEBUG_WRITE_IN_LOG("</ILP_Access>\n");
} else {
Status = ILP_0100_DEBUG_SESSION_NOT_OPENED;
}
}
}
return Status;
}
ilp0100_error Ilp0100_isApiCoreFunction(const char* pFunctionName, bool_t* isCoreFunc)
{
ilp0100_error Status = ILP0100_ERROR_NONE;
char corefunctionRoot[]="Ilp0100_core";
char functionRoot[13];
uint8_t i;
for(i=0; ((i<12)&&(pFunctionName[i]!=0)); i=i+1)
{
functionRoot[i]=pFunctionName[i];
}
functionRoot[i]=0;
Status = Ilp0100_core_strCmpr(functionRoot, corefunctionRoot, isCoreFunc);
return Status;
}
ilp0100_error Ilp0100_dumpParameters(const char* pFunctionName, void **pFuncArguments)
{
ilp0100_error Status = ILP0100_ERROR_NONE;
uint8_t FuncStrEqual;
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_init", FuncStrEqual){
DUMP_PARAM_STRUCT(Ilp0100_structInit, *(pFuncArguments+0));
DUMP_PARAM_STRUCT(Ilp0100_structInitFirmware, *(pFuncArguments+1));
DUMP_PARAM_STRUCT(Ilp0100_structSensorParams, *(pFuncArguments+2));
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_defineMode", FuncStrEqual){
DUMP_PARAM_STRUCT(Ilp0100_structFrameFormat, *(pFuncArguments+0));
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_configDefcorShortOrNormal", FuncStrEqual){
DUMP_PARAM_STRUCT(Ilp0100_structDefcorConfig, *(pFuncArguments+0));
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_configDefcorLong", FuncStrEqual){
DUMP_PARAM_STRUCT(Ilp0100_structDefcorConfig, *(pFuncArguments+0));
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_updateDefcorShortOrNormal", FuncStrEqual){
DUMP_PARAM_STRUCT(Ilp0100_structDefcorParams, *(pFuncArguments+0));
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_updateDefcorLong", FuncStrEqual){
DUMP_PARAM_STRUCT(Ilp0100_structDefcorParams, *(pFuncArguments+0));
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_configChannelOffsetShortOrNormal", FuncStrEqual){
DUMP_PARAM_STRUCT(Ilp0100_structChannelOffsetConfig, *(pFuncArguments+0));
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_configChannelOffsetLong", FuncStrEqual){
DUMP_PARAM_STRUCT(Ilp0100_structChannelOffsetConfig, *(pFuncArguments+0));
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_updateChannelOffsetShortOrNormal", FuncStrEqual){
DUMP_PARAM_STRUCT(Ilp0100_structChannelOffsetParams, *(pFuncArguments+0));
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_updateChannelOffsetLong", FuncStrEqual){
DUMP_PARAM_STRUCT(Ilp0100_structChannelOffsetParams, *(pFuncArguments+0));
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_configHdrMerge", FuncStrEqual){
DUMP_PARAM_STRUCT(Ilp0100_structHdrMergeConfig, *(pFuncArguments+0));
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_updateHdrMerge", FuncStrEqual){
DUMP_PARAM_STRUCT(Ilp0100_structHdrMergeParams, *(pFuncArguments+0));
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_configCls", FuncStrEqual){
DUMP_PARAM_STRUCT(Ilp0100_structClsConfig, *(pFuncArguments+0));
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_updateCls", FuncStrEqual){
DUMP_PARAM_STRUCT(Ilp0100_structClsParams, *(pFuncArguments+0));
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_configToneMapping", FuncStrEqual){
DUMP_PARAM_STRUCT(Ilp0100_structToneMappingConfig, *(pFuncArguments+0));
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_updateToneMapping", FuncStrEqual){
DUMP_PARAM_STRUCT(Ilp0100_structToneMappingParams, *(pFuncArguments+0));
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_configGlaceShortOrNormal", FuncStrEqual){
DUMP_PARAM_STRUCT(Ilp0100_structGlaceConfig, *(pFuncArguments+0));
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_configGlaceLong", FuncStrEqual){
DUMP_PARAM_STRUCT(Ilp0100_structGlaceConfig, *(pFuncArguments+0));
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_configHistShortOrNormal", FuncStrEqual){
DUMP_PARAM_STRUCT(Ilp0100_structHistConfig, *(pFuncArguments+0));
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_configHistLong", FuncStrEqual){
DUMP_PARAM_STRUCT(Ilp0100_structHistConfig, *(pFuncArguments+0));
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_updateSensorParamsShortOrNormal", FuncStrEqual){
DUMP_PARAM_STRUCT(Ilp0100_structFrameParams, *(pFuncArguments+0));
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_updateSensorParamsLong", FuncStrEqual){
DUMP_PARAM_STRUCT(Ilp0100_structFrameParams, *(pFuncArguments+0));
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_readBackGlaceStatisticsShortOrNormal", FuncStrEqual){
DUMP_PARAM_STRUCT(Ilp0100_structGlaceStatsData, *(pFuncArguments+0));
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_readBackGlaceStatisticsLong", FuncStrEqual){
DUMP_PARAM_STRUCT(Ilp0100_structGlaceStatsData, *(pFuncArguments+0));
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_readBackHistStatisticsShortOrNormal", FuncStrEqual){
DUMP_PARAM_STRUCT(Ilp0100_structHistStatsData, *(pFuncArguments+0));
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_readBackHistStatisticsLong", FuncStrEqual){
DUMP_PARAM_STRUCT(Ilp0100_structHistStatsData, *(pFuncArguments+0));
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_setVirtualChannelShortOrNormal", FuncStrEqual){
DUMP_PARAM(VirtualChannel, *(pFuncArguments+0), ILP0100_DEBUG_UINT8);
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_setVirtualChannelLong", FuncStrEqual){
DUMP_PARAM(VirtualChannel, *(pFuncArguments+0), ILP0100_DEBUG_UINT8);
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_setHDRFactor", FuncStrEqual){
DUMP_PARAM(HDRFactor, *(pFuncArguments+0), ILP0100_DEBUG_UINT8);
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_interruptEnable", FuncStrEqual){
DUMP_PARAM(InterruptSetMask,*(pFuncArguments+0), ILP0100_DEBUG_UINT32);
DUMP_PARAM(Pin, *(pFuncArguments+1), ILP0100_DEBUG_UINT8);
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_interruptDisable", FuncStrEqual){
DUMP_PARAM(InterruptClrMask,*(pFuncArguments+0), ILP0100_DEBUG_UINT32);
DUMP_PARAM(Pin, *(pFuncArguments+1), ILP0100_DEBUG_UINT8);
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_interruptReadStatus", FuncStrEqual){
DUMP_PARAM(pInterruptId, *(pFuncArguments+0), ILP0100_DEBUG_UINT32);
DUMP_PARAM(Pin, *(pFuncArguments+1), ILP0100_DEBUG_UINT8);
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_interruptClearStatus", FuncStrEqual){
DUMP_PARAM(pInterruptId, *(pFuncArguments+0), ILP0100_DEBUG_UINT32);
DUMP_PARAM(Pin, *(pFuncArguments+1), ILP0100_DEBUG_UINT8);
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_getApiVersionNumber", FuncStrEqual){
DUMP_PARAM_PTR(pMajorNumber, *(pFuncArguments+0), 1, ILP0100_DEBUG_UINT8);
DUMP_PARAM_PTR(pMinorNumber, *(pFuncArguments+1), 1, ILP0100_DEBUG_UINT8);
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_getFirmwareVersionumber", FuncStrEqual){
DUMP_PARAM_PTR(pMajorNumber, *(pFuncArguments+0), 1, ILP0100_DEBUG_UINT8);
DUMP_PARAM_PTR(pMinorNumber, *(pFuncArguments+1), 1, ILP0100_DEBUG_UINT8);
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_startTestMode", FuncStrEqual){
DUMP_PARAM(IpsBypass, *(pFuncArguments+0), ILP0100_DEBUG_UINT16);
DUMP_PARAM(TestMode, *(pFuncArguments+1), ILP0100_DEBUG_UINT16);
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_readRegister", FuncStrEqual){
DUMP_PARAM(RegisterName, *(pFuncArguments+0), ILP0100_DEBUG_UINT16);
DUMP_PARAM_PTR(pData, *(pFuncArguments+1), 4, ILP0100_DEBUG_UINT8);
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_writeRegister", FuncStrEqual){
DUMP_PARAM(RegisterName, *(pFuncArguments+0), ILP0100_DEBUG_UINT16);
DUMP_PARAM_PTR(pData, *(pFuncArguments+1), 4, ILP0100_DEBUG_UINT8);
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_enableIlp0100SensorClock", FuncStrEqual){
DUMP_PARAM(SensorInterface, *(pFuncArguments+0), ILP0100_DEBUG_BOOL);
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_disableIlp0100SensorClock", FuncStrEqual){
DUMP_PARAM(SensorInterface, *(pFuncArguments+0), ILP0100_DEBUG_BOOL);
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_core_readRegister", FuncStrEqual){
DUMP_PARAM(RegisterName, *(pFuncArguments+0), ILP0100_DEBUG_UINT16);
DUMP_PARAM(Count, *(pFuncArguments+1), ILP0100_DEBUG_UINT16);
DUMP_PARAM_PTR(pData, *(pFuncArguments+2), *((uint16_t*)*(pFuncArguments+1)), ILP0100_DEBUG_UINT8);
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pFunctionName, "Ilp0100_core_writeRegister", FuncStrEqual){
DUMP_PARAM(RegisterName, *(pFuncArguments+0), ILP0100_DEBUG_UINT16);
DUMP_PARAM(Count, *(pFuncArguments+1), ILP0100_DEBUG_UINT16);
DUMP_PARAM_PTR(pData, *(pFuncArguments+2), *((uint16_t*)*(pFuncArguments+1)), ILP0100_DEBUG_UINT8);
}
return Status;
}
ilp0100_error Ilp0100_dumpParameter(const char* pParamName, const uint16_t ParamLength, const uint8_t *pParamValues, const uint8_t ParamType)
{
ilp0100_error Status = ILP0100_ERROR_NONE;
uint16_t I;
uint8_t ParamStrEqual;
ILP0100_DEBUG_WRITE_IN_LOG("<%s>",pParamName);
if(ParamType==ILP0100_DEBUG_NONE){
ILP0100_DEBUG_TEST_STR_EQUALITY(pParamName, "Ilp0100_structInit", ParamStrEqual){
ILP0100_DEBUG_WRITE_IN_LOG("\n");
DUMP_STRUCTPARAM(Ilp0100_structInit, pParamValues, NumberOfLanes, ILP0100_DEBUG_BYTE);
DUMP_STRUCTPARAM(Ilp0100_structInit, pParamValues, uwPixelFormat, ILP0100_DEBUG_UINT16);
DUMP_STRUCTPARAM(Ilp0100_structInit, pParamValues, BitRate, ILP0100_DEBUG_UINT32);
DUMP_STRUCTPARAM(Ilp0100_structInit, pParamValues, ExternalClock, ILP0100_DEBUG_UINT32);
DUMP_STRUCTPARAM(Ilp0100_structInit, pParamValues, ClockUsed, ILP0100_DEBUG_BYTE);
DUMP_STRUCTPARAM(Ilp0100_structInit, pParamValues, UsedSensorInterface, ILP0100_DEBUG_BYTE);
DUMP_STRUCTPARAM(Ilp0100_structInit, pParamValues, IntrEnablePin1, ILP0100_DEBUG_UINT32);
DUMP_STRUCTPARAM(Ilp0100_structInit, pParamValues, IntrEnablePin2, ILP0100_DEBUG_UINT32);
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pParamName, "Ilp0100_structInitFirmware", ParamStrEqual){
ILP0100_DEBUG_WRITE_IN_LOG("\n");
DUMP_STRUCTPARAM_PTR(Ilp0100_structInitFirmware, pParamValues, pIlp0100Firmware, ((Ilp0100_structInitFirmware*)pParamValues)->Ilp0100FirmwareSize, ILP0100_DEBUG_BYTE);
DUMP_STRUCTPARAM( Ilp0100_structInitFirmware, pParamValues, Ilp0100FirmwareSize, ILP0100_DEBUG_UINT32);
DUMP_STRUCTPARAM_PTR(Ilp0100_structInitFirmware, pParamValues, pIlp0100SensorGenericCalibData, ((Ilp0100_structInitFirmware*)pParamValues)->Ilp0100SensorGenericCalibDataSize, ILP0100_DEBUG_BYTE);
DUMP_STRUCTPARAM( Ilp0100_structInitFirmware, pParamValues, Ilp0100SensorGenericCalibDataSize, ILP0100_DEBUG_UINT32);
DUMP_STRUCTPARAM_PTR(Ilp0100_structInitFirmware, pParamValues, pIlp0100SensorRawPart2PartCalibData,((Ilp0100_structInitFirmware*)pParamValues)->Ilp0100SensorRawPart2PartCalibDataSize, ILP0100_DEBUG_BYTE);
DUMP_STRUCTPARAM( Ilp0100_structInitFirmware, pParamValues, Ilp0100SensorRawPart2PartCalibDataSize, ILP0100_DEBUG_UINT32);
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pParamName, "Ilp0100_structSensorParams", ParamStrEqual){
ILP0100_DEBUG_WRITE_IN_LOG("\n");
DUMP_STRUCTPARAM(Ilp0100_structSensorParams, pParamValues, FullActivePixels, ILP0100_DEBUG_UINT16);
DUMP_STRUCTPARAM(Ilp0100_structSensorParams, pParamValues, MinLineLength, ILP0100_DEBUG_UINT16);
DUMP_STRUCTPARAM(Ilp0100_structSensorParams, pParamValues, FullActiveLines, ILP0100_DEBUG_UINT16);
DUMP_STRUCTPARAM(Ilp0100_structSensorParams, pParamValues, PixelOrder, ILP0100_DEBUG_UINT8);
DUMP_STRUCTPARAM(Ilp0100_structSensorParams, pParamValues, StatusNbLines, ILP0100_DEBUG_UINT8);
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pParamName, "Ilp0100_structFrameFormat", ParamStrEqual){
ILP0100_DEBUG_WRITE_IN_LOG("\n");
DUMP_STRUCTPARAM(Ilp0100_structFrameFormat, pParamValues, ActiveLineLengthPixels, ILP0100_DEBUG_UINT16);
DUMP_STRUCTPARAM(Ilp0100_structFrameFormat, pParamValues, ActiveFrameLengthLines, ILP0100_DEBUG_UINT16);
DUMP_STRUCTPARAM(Ilp0100_structFrameFormat, pParamValues, LineLengthPixels, ILP0100_DEBUG_UINT16);
DUMP_STRUCTPARAM(Ilp0100_structFrameFormat, pParamValues, FrameLengthLines, ILP0100_DEBUG_UINT16);
DUMP_STRUCTPARAM(Ilp0100_structFrameFormat, pParamValues, StatusLinesOutputted, ILP0100_DEBUG_BOOL);
DUMP_STRUCTPARAM(Ilp0100_structFrameFormat, pParamValues, StatusLineLengthPixels, ILP0100_DEBUG_UINT16);
DUMP_STRUCTPARAM(Ilp0100_structFrameFormat, pParamValues, StatusNbLines, ILP0100_DEBUG_UINT16);
DUMP_STRUCTPARAM(Ilp0100_structFrameFormat, pParamValues, MinInterframe, ILP0100_DEBUG_UINT16);
DUMP_STRUCTPARAM(Ilp0100_structFrameFormat, pParamValues, AutomaticFrameParamsUpdate, ILP0100_DEBUG_BOOL);
DUMP_STRUCTPARAM(Ilp0100_structFrameFormat, pParamValues, HDRMode, ILP0100_DEBUG_UINT8);
DUMP_STRUCTPARAM(Ilp0100_structFrameFormat, pParamValues, uwOutputPixelFormat, ILP0100_DEBUG_UINT8);
DUMP_STRUCTPARAM(Ilp0100_structFrameFormat, pParamValues, ImageOrientation, ILP0100_DEBUG_UINT8);
DUMP_STRUCTPARAM(Ilp0100_structFrameFormat, pParamValues, HScaling, ILP0100_DEBUG_UINT32);
DUMP_STRUCTPARAM(Ilp0100_structFrameFormat, pParamValues, VScaling, ILP0100_DEBUG_UINT32);
DUMP_STRUCTPARAM(Ilp0100_structFrameFormat, pParamValues, Binning, ILP0100_DEBUG_UINT8);
DUMP_STRUCTPARAM(Ilp0100_structFrameFormat, pParamValues, Hoffset, ILP0100_DEBUG_UINT16);
DUMP_STRUCTPARAM(Ilp0100_structFrameFormat, pParamValues, Voffset, ILP0100_DEBUG_UINT16);
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pParamName, "Ilp0100_structFrameParams", ParamStrEqual){
ILP0100_DEBUG_WRITE_IN_LOG("\n");
DUMP_STRUCTPARAM(Ilp0100_structFrameParams, pParamValues, ExposureTime, ILP0100_DEBUG_UINT16);
DUMP_STRUCTPARAM(Ilp0100_structFrameParams, pParamValues, AnalogGainCodeGreen, ILP0100_DEBUG_UINT16);
DUMP_STRUCTPARAM(Ilp0100_structFrameParams, pParamValues, AnalogGainCodeRed, ILP0100_DEBUG_UINT16);
DUMP_STRUCTPARAM(Ilp0100_structFrameParams, pParamValues, AnalogGainCodeBlue, ILP0100_DEBUG_UINT16);
DUMP_STRUCTPARAM(Ilp0100_structFrameParams, pParamValues, DigitalGainCodeGreen, ILP0100_DEBUG_UINT16);
DUMP_STRUCTPARAM(Ilp0100_structFrameParams, pParamValues, DigitalGainCodeRed, ILP0100_DEBUG_UINT16);
DUMP_STRUCTPARAM(Ilp0100_structFrameParams, pParamValues, DigitalGainCodeBlue, ILP0100_DEBUG_UINT16);
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pParamName, "Ilp0100_structDefcorConfig", ParamStrEqual){
ILP0100_DEBUG_WRITE_IN_LOG("\n");
DUMP_STRUCTPARAM(Ilp0100_structDefcorConfig, pParamValues, Mode, ILP0100_DEBUG_UINT8);
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pParamName, "Ilp0100_structDefcorParams", ParamStrEqual){
ILP0100_DEBUG_WRITE_IN_LOG("\n");
DUMP_STRUCTPARAM(Ilp0100_structDefcorParams, pParamValues, SingletThreshold, ILP0100_DEBUG_UINT8);
DUMP_STRUCTPARAM(Ilp0100_structDefcorParams, pParamValues, CoupletThreshold, ILP0100_DEBUG_UINT8);
DUMP_STRUCTPARAM(Ilp0100_structDefcorParams, pParamValues, BlackStrength, ILP0100_DEBUG_UINT8);
DUMP_STRUCTPARAM(Ilp0100_structDefcorParams, pParamValues, WhiteStrength, ILP0100_DEBUG_UINT8);
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pParamName, "Ilp0100_structChannelOffsetConfig", ParamStrEqual){
ILP0100_DEBUG_WRITE_IN_LOG("\n");
DUMP_STRUCTPARAM(Ilp0100_structChannelOffsetConfig, pParamValues, Enable, ILP0100_DEBUG_BOOL);
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pParamName, "Ilp0100_structChannelOffsetParams", ParamStrEqual){
ILP0100_DEBUG_WRITE_IN_LOG("\n");
DUMP_STRUCTPARAM(Ilp0100_structChannelOffsetParams, pParamValues, SensorPedestalGreenRed, ILP0100_DEBUG_UINT16);
DUMP_STRUCTPARAM(Ilp0100_structChannelOffsetParams, pParamValues, SensorPedestalRed, ILP0100_DEBUG_UINT16);
DUMP_STRUCTPARAM(Ilp0100_structChannelOffsetParams, pParamValues, SensorPedestalBlue, ILP0100_DEBUG_UINT16);
DUMP_STRUCTPARAM(Ilp0100_structChannelOffsetParams, pParamValues, SensorPedestalGreenBlue, ILP0100_DEBUG_UINT16);
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pParamName, "Ilp0100_structHdrMergeConfig", ParamStrEqual){
ILP0100_DEBUG_WRITE_IN_LOG("\n");
DUMP_STRUCTPARAM(Ilp0100_structHdrMergeConfig, pParamValues, Mode, ILP0100_DEBUG_UINT8);
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pParamName, "Ilp0100_structHdrMergeParams", ParamStrEqual){
ILP0100_DEBUG_WRITE_IN_LOG("\n");
DUMP_STRUCTPARAM(Ilp0100_structHdrMergeParams, pParamValues, Method, ILP0100_DEBUG_UINT8);
DUMP_STRUCTPARAM(Ilp0100_structHdrMergeParams, pParamValues, ImageCodes, ILP0100_DEBUG_UINT8);
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pParamName, "Ilp0100_structClsConfig", ParamStrEqual){
ILP0100_DEBUG_WRITE_IN_LOG("\n");
DUMP_STRUCTPARAM(Ilp0100_structClsConfig, pParamValues, Enable, ILP0100_DEBUG_BOOL);
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pParamName, "Ilp0100_structClsParams", ParamStrEqual){
ILP0100_DEBUG_WRITE_IN_LOG("\n");
DUMP_STRUCTPARAM(Ilp0100_structClsParams, pParamValues, BowlerCornerGain, ILP0100_DEBUG_UINT8);
DUMP_STRUCTPARAM(Ilp0100_structClsParams, pParamValues, ColorTempKelvin, ILP0100_DEBUG_UINT16);
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pParamName, "Ilp0100_structToneMappingConfig", ParamStrEqual){
ILP0100_DEBUG_WRITE_IN_LOG("\n");
DUMP_STRUCTPARAM(Ilp0100_structToneMappingConfig, pParamValues, Enable, ILP0100_DEBUG_BOOL);
DUMP_STRUCTPARAM(Ilp0100_structToneMappingConfig, pParamValues, UserDefinedCurveEnable, ILP0100_DEBUG_BOOL);
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pParamName, "Ilp0100_structToneMappingParams", ParamStrEqual){
ILP0100_DEBUG_WRITE_IN_LOG("\n");
DUMP_STRUCTPARAM( Ilp0100_structToneMappingParams, pParamValues, Strength, ILP0100_DEBUG_UINT8);
DUMP_STRUCTPARAM_ARRAY( Ilp0100_structToneMappingParams, pParamValues, UserDefinedCurve, 256, ILP0100_DEBUG_UINT16);
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pParamName, "Ilp0100_structGlaceConfig", ParamStrEqual){
ILP0100_DEBUG_WRITE_IN_LOG("\n");
DUMP_STRUCTPARAM(Ilp0100_structGlaceConfig, pParamValues, Enable, ILP0100_DEBUG_BOOL);
DUMP_STRUCTPARAM(Ilp0100_structGlaceConfig, pParamValues, RoiHStart, ILP0100_DEBUG_UINT16);
DUMP_STRUCTPARAM(Ilp0100_structGlaceConfig, pParamValues, RoiVStart, ILP0100_DEBUG_UINT16);
DUMP_STRUCTPARAM(Ilp0100_structGlaceConfig, pParamValues, RoiHBlockSize, ILP0100_DEBUG_UINT16);
DUMP_STRUCTPARAM(Ilp0100_structGlaceConfig, pParamValues, RoiVBlockSize, ILP0100_DEBUG_UINT16);
DUMP_STRUCTPARAM(Ilp0100_structGlaceConfig, pParamValues, RoiHNumberOfBlocks, ILP0100_DEBUG_UINT8);
DUMP_STRUCTPARAM(Ilp0100_structGlaceConfig, pParamValues, RoiVNumberOfBlocks, ILP0100_DEBUG_UINT8);
DUMP_STRUCTPARAM(Ilp0100_structGlaceConfig, pParamValues, SaturationLevelRed, ILP0100_DEBUG_UINT8);
DUMP_STRUCTPARAM(Ilp0100_structGlaceConfig, pParamValues, SaturationLevelGreen, ILP0100_DEBUG_UINT8);
DUMP_STRUCTPARAM(Ilp0100_structGlaceConfig, pParamValues, SaturationLevelBlue, ILP0100_DEBUG_UINT8);
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pParamName, "Ilp0100_structHistConfig", ParamStrEqual){
ILP0100_DEBUG_WRITE_IN_LOG("\n");
DUMP_STRUCTPARAM(Ilp0100_structHistConfig, pParamValues, Mode, ILP0100_DEBUG_UINT16);
DUMP_STRUCTPARAM(Ilp0100_structHistConfig, pParamValues, RoiXOffset, ILP0100_DEBUG_UINT16);
DUMP_STRUCTPARAM(Ilp0100_structHistConfig, pParamValues, RoiYOffset, ILP0100_DEBUG_UINT16);
DUMP_STRUCTPARAM(Ilp0100_structHistConfig, pParamValues, RoiXSize, ILP0100_DEBUG_UINT16);
DUMP_STRUCTPARAM(Ilp0100_structHistConfig, pParamValues, RoiYSize, ILP0100_DEBUG_UINT16);
DUMP_STRUCTPARAM(Ilp0100_structHistConfig, pParamValues, YConversionFactorGreenRed, ILP0100_DEBUG_UINT8);
DUMP_STRUCTPARAM(Ilp0100_structHistConfig, pParamValues, YConversionFactorRed, ILP0100_DEBUG_UINT8);
DUMP_STRUCTPARAM(Ilp0100_structHistConfig, pParamValues, YConversionFactorBlue, ILP0100_DEBUG_UINT8);
DUMP_STRUCTPARAM(Ilp0100_structHistConfig, pParamValues, YConversionFactorGreenBlue,ILP0100_DEBUG_UINT8);
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pParamName, "Ilp0100_structGlaceStatsData", ParamStrEqual){
ILP0100_DEBUG_WRITE_IN_LOG("\n");
DUMP_STRUCTPARAM_ARRAY(Ilp0100_structGlaceStatsData, pParamValues, GlaceStatsRedMean, 48, ILP0100_DEBUG_UINT8);
DUMP_STRUCTPARAM_ARRAY(Ilp0100_structGlaceStatsData, pParamValues, GlaceStatsGreenMean, 48, ILP0100_DEBUG_UINT8);
DUMP_STRUCTPARAM_ARRAY(Ilp0100_structGlaceStatsData, pParamValues, GlaceStatsBlueMean, 48, ILP0100_DEBUG_UINT8);
DUMP_STRUCTPARAM_ARRAY(Ilp0100_structGlaceStatsData, pParamValues, GlaceStatsNbOfSaturatedPixels, 48, ILP0100_DEBUG_UINT16);
}
ILP0100_DEBUG_TEST_STR_EQUALITY(pParamName, "Ilp0100_structHistStatsData", ParamStrEqual){
ILP0100_DEBUG_WRITE_IN_LOG("\n");
DUMP_STRUCTPARAM_ARRAY(Ilp0100_structHistStatsData, pParamValues, HistStatsRedBin, 64, ILP0100_DEBUG_UINT32);
DUMP_STRUCTPARAM_ARRAY(Ilp0100_structHistStatsData, pParamValues, HistStatsGreenBin, 64, ILP0100_DEBUG_UINT32);
DUMP_STRUCTPARAM_ARRAY(Ilp0100_structHistStatsData, pParamValues, HistStatsBlueBin, 64, ILP0100_DEBUG_UINT32);
DUMP_STRUCTPARAM( Ilp0100_structHistStatsData, pParamValues, HistStatsRedDarkestBin, ILP0100_DEBUG_UINT16);
DUMP_STRUCTPARAM( Ilp0100_structHistStatsData, pParamValues, HistStatsRedDarkestCount, ILP0100_DEBUG_UINT32);
DUMP_STRUCTPARAM( Ilp0100_structHistStatsData, pParamValues, HistStatsRedBrightestBin, ILP0100_DEBUG_UINT16);
DUMP_STRUCTPARAM( Ilp0100_structHistStatsData, pParamValues, HistStatsRedBrightestCount, ILP0100_DEBUG_UINT32);
DUMP_STRUCTPARAM( Ilp0100_structHistStatsData, pParamValues, HistStatsRedHighestBin, ILP0100_DEBUG_UINT16);
DUMP_STRUCTPARAM( Ilp0100_structHistStatsData, pParamValues, HistStatsRedHighestCount, ILP0100_DEBUG_UINT32);
DUMP_STRUCTPARAM( Ilp0100_structHistStatsData, pParamValues, HistStatsGreenDarkestBin, ILP0100_DEBUG_UINT16);
DUMP_STRUCTPARAM( Ilp0100_structHistStatsData, pParamValues, HistStatsGreenDarkestCount, ILP0100_DEBUG_UINT32);
DUMP_STRUCTPARAM( Ilp0100_structHistStatsData, pParamValues, HistStatsGreenBrightestBin, ILP0100_DEBUG_UINT16);
DUMP_STRUCTPARAM( Ilp0100_structHistStatsData, pParamValues, HistStatsGreenBrightestCount, ILP0100_DEBUG_UINT32);
DUMP_STRUCTPARAM( Ilp0100_structHistStatsData, pParamValues, HistStatsGreenHighestBin, ILP0100_DEBUG_UINT16);
DUMP_STRUCTPARAM( Ilp0100_structHistStatsData, pParamValues, HistStatsGreenHighestCount, ILP0100_DEBUG_UINT32);
DUMP_STRUCTPARAM( Ilp0100_structHistStatsData, pParamValues, HistStatsBlueDarkestBin, ILP0100_DEBUG_UINT16);
DUMP_STRUCTPARAM( Ilp0100_structHistStatsData, pParamValues, HistStatsBlueDarkestCount, ILP0100_DEBUG_UINT32);
DUMP_STRUCTPARAM( Ilp0100_structHistStatsData, pParamValues, HistStatsBlueBrightestBin, ILP0100_DEBUG_UINT16);
DUMP_STRUCTPARAM( Ilp0100_structHistStatsData, pParamValues, HistStatsBlueBrightestCount, ILP0100_DEBUG_UINT32);
DUMP_STRUCTPARAM( Ilp0100_structHistStatsData, pParamValues, HistStatsBlueHighestBin, ILP0100_DEBUG_UINT16);
DUMP_STRUCTPARAM( Ilp0100_structHistStatsData, pParamValues, HistStatsBlueHighestCount, ILP0100_DEBUG_UINT32);
}
} else {
for(I=0; I<ParamLength; I++){
switch(ParamType)
{
case ILP0100_DEBUG_NONE:
break;
case ILP0100_DEBUG_BYTE:
ILP0100_DEBUG_WRITE_IN_LOG("0x%02x",(uint8_t)*(pParamValues+I));
break;
case ILP0100_DEBUG_UINT16:
ILP0100_DEBUG_WRITE_IN_LOG("0x%04x",*(uint16_t*)(pParamValues+(I*2)));
break;
case ILP0100_DEBUG_UINT32:
ILP0100_DEBUG_WRITE_IN_LOG("0x%08x",*(uint32_t*)(pParamValues+(I*4)));
break;
case ILP0100_DEBUG_FLOAT:
ILP0100_DEBUG_WRITE_IN_LOG("0x%08x",*(uint32_t*)(pParamValues+(I*4)));
break;
default:
break;
}
if(I<(ParamLength-1))
ILP0100_DEBUG_WRITE_IN_LOG(", ");
}
}
ILP0100_DEBUG_WRITE_IN_LOG("</%s>\n",pParamName);
return Status;
}
#endif
| gpl-2.0 |
mayqueenEMBEDDED/mq-kernel | drivers/gpu/drm/msm/msm_atomic.c | 228 | 7763 | /*
* Copyright (C) 2014 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "msm_drv.h"
#include "msm_kms.h"
#include "msm_gem.h"
struct msm_commit {
struct drm_device *dev;
struct drm_atomic_state *state;
uint32_t fence;
struct msm_fence_cb fence_cb;
uint32_t crtc_mask;
};
static void fence_cb(struct msm_fence_cb *cb);
/* block until specified crtcs are no longer pending update, and
* atomically mark them as pending update
*/
static int start_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
{
int ret;
spin_lock(&priv->pending_crtcs_event.lock);
ret = wait_event_interruptible_locked(priv->pending_crtcs_event,
!(priv->pending_crtcs & crtc_mask));
if (ret == 0) {
DBG("start: %08x", crtc_mask);
priv->pending_crtcs |= crtc_mask;
}
spin_unlock(&priv->pending_crtcs_event.lock);
return ret;
}
/* clear specified crtcs (no longer pending update)
*/
static void end_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
{
spin_lock(&priv->pending_crtcs_event.lock);
DBG("end: %08x", crtc_mask);
priv->pending_crtcs &= ~crtc_mask;
wake_up_all_locked(&priv->pending_crtcs_event);
spin_unlock(&priv->pending_crtcs_event.lock);
}
static struct msm_commit *commit_init(struct drm_atomic_state *state)
{
struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
return NULL;
c->dev = state->dev;
c->state = state;
/* TODO we might need a way to indicate to run the cb on a
* different wq so wait_for_vblanks() doesn't block retiring
* bo's..
*/
INIT_FENCE_CB(&c->fence_cb, fence_cb);
return c;
}
static void commit_destroy(struct msm_commit *c)
{
end_atomic(c->dev->dev_private, c->crtc_mask);
kfree(c);
}
static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
struct drm_crtc *crtc;
struct msm_drm_private *priv = old_state->dev->dev_private;
struct msm_kms *kms = priv->kms;
int ncrtcs = old_state->dev->mode_config.num_crtc;
int i;
for (i = 0; i < ncrtcs; i++) {
crtc = old_state->crtcs[i];
if (!crtc)
continue;
if (!crtc->state->enable)
continue;
/* Legacy cursor ioctls are completely unsynced, and userspace
* relies on that (by doing tons of cursor updates). */
if (old_state->legacy_cursor_update)
continue;
kms->funcs->wait_for_crtc_commit_done(kms, crtc);
}
}
/* The (potentially) asynchronous part of the commit. At this point
* nothing can fail short of armageddon.
*/
static void complete_commit(struct msm_commit *c)
{
struct drm_atomic_state *state = c->state;
struct drm_device *dev = state->dev;
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
kms->funcs->prepare_commit(kms, state);
drm_atomic_helper_commit_modeset_disables(dev, state);
drm_atomic_helper_commit_planes(dev, state, false);
drm_atomic_helper_commit_modeset_enables(dev, state);
/* NOTE: _wait_for_vblanks() only waits for vblank on
* enabled CRTCs. So we end up faulting when disabling
* due to (potentially) unref'ing the outgoing fb's
* before the vblank when the disable has latched.
*
* But if it did wait on disabled (or newly disabled)
* CRTCs, that would be racy (ie. we could have missed
* the irq. We need some way to poll for pipe shut
* down. Or just live with occasionally hitting the
* timeout in the CRTC disable path (which really should
* not be critical path)
*/
msm_atomic_wait_for_commit_done(dev, state);
drm_atomic_helper_cleanup_planes(dev, state);
kms->funcs->complete_commit(kms, state);
drm_atomic_state_free(state);
commit_destroy(c);
}
static void fence_cb(struct msm_fence_cb *cb)
{
struct msm_commit *c =
container_of(cb, struct msm_commit, fence_cb);
complete_commit(c);
}
static void add_fb(struct msm_commit *c, struct drm_framebuffer *fb)
{
struct drm_gem_object *obj = msm_framebuffer_bo(fb, 0);
c->fence = max(c->fence, msm_gem_fence(to_msm_bo(obj), MSM_PREP_READ));
}
int msm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
int ret;
/*
* msm ->atomic_check can update ->mode_changed for pixel format
* changes, hence must be run before we check the modeset changes.
*/
ret = drm_atomic_helper_check_planes(dev, state);
if (ret)
return ret;
ret = drm_atomic_helper_check_modeset(dev, state);
if (ret)
return ret;
return ret;
}
/**
* drm_atomic_helper_commit - commit validated state object
* @dev: DRM device
* @state: the driver state object
* @async: asynchronous commit
*
* This function commits a with drm_atomic_helper_check() pre-validated state
* object. This can still fail when e.g. the framebuffer reservation fails. For
* now this doesn't implement asynchronous commits.
*
* RETURNS
* Zero for success or -errno.
*/
int msm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool async)
{
int nplanes = dev->mode_config.num_total_plane;
int ncrtcs = dev->mode_config.num_crtc;
ktime_t timeout;
struct msm_commit *c;
int i, ret;
ret = drm_atomic_helper_prepare_planes(dev, state);
if (ret)
return ret;
c = commit_init(state);
if (!c) {
ret = -ENOMEM;
goto error;
}
/*
* Figure out what crtcs we have:
*/
for (i = 0; i < ncrtcs; i++) {
struct drm_crtc *crtc = state->crtcs[i];
if (!crtc)
continue;
c->crtc_mask |= (1 << drm_crtc_index(crtc));
}
/*
* Figure out what fence to wait for:
*/
for (i = 0; i < nplanes; i++) {
struct drm_plane *plane = state->planes[i];
struct drm_plane_state *new_state = state->plane_states[i];
if (!plane)
continue;
if ((plane->state->fb != new_state->fb) && new_state->fb)
add_fb(c, new_state->fb);
}
/*
* Wait for pending updates on any of the same crtc's and then
* mark our set of crtc's as busy:
*/
ret = start_atomic(dev->dev_private, c->crtc_mask);
if (ret) {
kfree(c);
goto error;
}
/*
* This is the point of no return - everything below never fails except
* when the hw goes bonghits. Which means we can commit the new state on
* the software side now.
*/
drm_atomic_helper_swap_state(dev, state);
/*
* Everything below can be run asynchronously without the need to grab
* any modeset locks at all under one conditions: It must be guaranteed
* that the asynchronous work has either been cancelled (if the driver
* supports it, which at least requires that the framebuffers get
* cleaned up with drm_atomic_helper_cleanup_planes()) or completed
* before the new state gets committed on the software side with
* drm_atomic_helper_swap_state().
*
* This scheme allows new atomic state updates to be prepared and
* checked in parallel to the asynchronous completion of the previous
* update. Which is important since compositors need to figure out the
* composition of the next frame right after having submitted the
* current layout.
*/
if (async) {
msm_queue_fence_cb(dev, &c->fence_cb, c->fence);
return 0;
}
timeout = ktime_add_ms(ktime_get(), 1000);
/* uninterruptible wait */
msm_wait_fence(dev, c->fence, &timeout, false);
complete_commit(c);
return 0;
error:
drm_atomic_helper_cleanup_planes(dev, state);
return ret;
}
| gpl-2.0 |
96boards-bubblegum/linux | drivers/net/ethernet/intel/e1000/e1000_ethtool.c | 228 | 53154 | /*******************************************************************************
* Intel PRO/1000 Linux driver
* Copyright(c) 1999 - 2006 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* Linux NICS <linux.nics@intel.com>
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
******************************************************************************/
/* ethtool support for e1000 */
#include "e1000.h"
#include <linux/jiffies.h>
#include <linux/uaccess.h>
enum {NETDEV_STATS, E1000_STATS};
struct e1000_stats {
char stat_string[ETH_GSTRING_LEN];
int type;
int sizeof_stat;
int stat_offset;
};
#define E1000_STAT(m) E1000_STATS, \
sizeof(((struct e1000_adapter *)0)->m), \
offsetof(struct e1000_adapter, m)
#define E1000_NETDEV_STAT(m) NETDEV_STATS, \
sizeof(((struct net_device *)0)->m), \
offsetof(struct net_device, m)
static const struct e1000_stats e1000_gstrings_stats[] = {
{ "rx_packets", E1000_STAT(stats.gprc) },
{ "tx_packets", E1000_STAT(stats.gptc) },
{ "rx_bytes", E1000_STAT(stats.gorcl) },
{ "tx_bytes", E1000_STAT(stats.gotcl) },
{ "rx_broadcast", E1000_STAT(stats.bprc) },
{ "tx_broadcast", E1000_STAT(stats.bptc) },
{ "rx_multicast", E1000_STAT(stats.mprc) },
{ "tx_multicast", E1000_STAT(stats.mptc) },
{ "rx_errors", E1000_STAT(stats.rxerrc) },
{ "tx_errors", E1000_STAT(stats.txerrc) },
{ "tx_dropped", E1000_NETDEV_STAT(stats.tx_dropped) },
{ "multicast", E1000_STAT(stats.mprc) },
{ "collisions", E1000_STAT(stats.colc) },
{ "rx_length_errors", E1000_STAT(stats.rlerrc) },
{ "rx_over_errors", E1000_NETDEV_STAT(stats.rx_over_errors) },
{ "rx_crc_errors", E1000_STAT(stats.crcerrs) },
{ "rx_frame_errors", E1000_NETDEV_STAT(stats.rx_frame_errors) },
{ "rx_no_buffer_count", E1000_STAT(stats.rnbc) },
{ "rx_missed_errors", E1000_STAT(stats.mpc) },
{ "tx_aborted_errors", E1000_STAT(stats.ecol) },
{ "tx_carrier_errors", E1000_STAT(stats.tncrs) },
{ "tx_fifo_errors", E1000_NETDEV_STAT(stats.tx_fifo_errors) },
{ "tx_heartbeat_errors", E1000_NETDEV_STAT(stats.tx_heartbeat_errors) },
{ "tx_window_errors", E1000_STAT(stats.latecol) },
{ "tx_abort_late_coll", E1000_STAT(stats.latecol) },
{ "tx_deferred_ok", E1000_STAT(stats.dc) },
{ "tx_single_coll_ok", E1000_STAT(stats.scc) },
{ "tx_multi_coll_ok", E1000_STAT(stats.mcc) },
{ "tx_timeout_count", E1000_STAT(tx_timeout_count) },
{ "tx_restart_queue", E1000_STAT(restart_queue) },
{ "rx_long_length_errors", E1000_STAT(stats.roc) },
{ "rx_short_length_errors", E1000_STAT(stats.ruc) },
{ "rx_align_errors", E1000_STAT(stats.algnerrc) },
{ "tx_tcp_seg_good", E1000_STAT(stats.tsctc) },
{ "tx_tcp_seg_failed", E1000_STAT(stats.tsctfc) },
{ "rx_flow_control_xon", E1000_STAT(stats.xonrxc) },
{ "rx_flow_control_xoff", E1000_STAT(stats.xoffrxc) },
{ "tx_flow_control_xon", E1000_STAT(stats.xontxc) },
{ "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) },
{ "rx_long_byte_count", E1000_STAT(stats.gorcl) },
{ "rx_csum_offload_good", E1000_STAT(hw_csum_good) },
{ "rx_csum_offload_errors", E1000_STAT(hw_csum_err) },
{ "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) },
{ "tx_smbus", E1000_STAT(stats.mgptc) },
{ "rx_smbus", E1000_STAT(stats.mgprc) },
{ "dropped_smbus", E1000_STAT(stats.mgpdc) },
};
#define E1000_QUEUE_STATS_LEN 0
#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats)
#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN)
static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
"Register test (offline)", "Eeprom test (offline)",
"Interrupt test (offline)", "Loopback test (offline)",
"Link test (on/offline)"
};
#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test)
static int e1000_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
if (hw->media_type == e1000_media_type_copper) {
ecmd->supported = (SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
SUPPORTED_1000baseT_Full|
SUPPORTED_Autoneg |
SUPPORTED_TP);
ecmd->advertising = ADVERTISED_TP;
if (hw->autoneg == 1) {
ecmd->advertising |= ADVERTISED_Autoneg;
/* the e1000 autoneg seems to match ethtool nicely */
ecmd->advertising |= hw->autoneg_advertised;
}
ecmd->port = PORT_TP;
ecmd->phy_address = hw->phy_addr;
if (hw->mac_type == e1000_82543)
ecmd->transceiver = XCVR_EXTERNAL;
else
ecmd->transceiver = XCVR_INTERNAL;
} else {
ecmd->supported = (SUPPORTED_1000baseT_Full |
SUPPORTED_FIBRE |
SUPPORTED_Autoneg);
ecmd->advertising = (ADVERTISED_1000baseT_Full |
ADVERTISED_FIBRE |
ADVERTISED_Autoneg);
ecmd->port = PORT_FIBRE;
if (hw->mac_type >= e1000_82545)
ecmd->transceiver = XCVR_INTERNAL;
else
ecmd->transceiver = XCVR_EXTERNAL;
}
if (er32(STATUS) & E1000_STATUS_LU) {
e1000_get_speed_and_duplex(hw, &adapter->link_speed,
&adapter->link_duplex);
ethtool_cmd_speed_set(ecmd, adapter->link_speed);
/* unfortunately FULL_DUPLEX != DUPLEX_FULL
* and HALF_DUPLEX != DUPLEX_HALF
*/
if (adapter->link_duplex == FULL_DUPLEX)
ecmd->duplex = DUPLEX_FULL;
else
ecmd->duplex = DUPLEX_HALF;
} else {
ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
ecmd->duplex = DUPLEX_UNKNOWN;
}
ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) ||
hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
/* MDI-X => 1; MDI => 0 */
if ((hw->media_type == e1000_media_type_copper) &&
netif_carrier_ok(netdev))
ecmd->eth_tp_mdix = (!!adapter->phy_info.mdix_mode ?
ETH_TP_MDI_X : ETH_TP_MDI);
else
ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
if (hw->mdix == AUTO_ALL_MODES)
ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
else
ecmd->eth_tp_mdix_ctrl = hw->mdix;
return 0;
}
static int e1000_set_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
/* MDI setting is only allowed when autoneg enabled because
* some hardware doesn't allow MDI setting when speed or
* duplex is forced.
*/
if (ecmd->eth_tp_mdix_ctrl) {
if (hw->media_type != e1000_media_type_copper)
return -EOPNOTSUPP;
if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
(ecmd->autoneg != AUTONEG_ENABLE)) {
e_err(drv, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
return -EINVAL;
}
}
while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
msleep(1);
if (ecmd->autoneg == AUTONEG_ENABLE) {
hw->autoneg = 1;
if (hw->media_type == e1000_media_type_fiber)
hw->autoneg_advertised = ADVERTISED_1000baseT_Full |
ADVERTISED_FIBRE |
ADVERTISED_Autoneg;
else
hw->autoneg_advertised = ecmd->advertising |
ADVERTISED_TP |
ADVERTISED_Autoneg;
ecmd->advertising = hw->autoneg_advertised;
} else {
u32 speed = ethtool_cmd_speed(ecmd);
/* calling this overrides forced MDI setting */
if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
clear_bit(__E1000_RESETTING, &adapter->flags);
return -EINVAL;
}
}
/* MDI-X => 2; MDI => 1; Auto => 3 */
if (ecmd->eth_tp_mdix_ctrl) {
if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
hw->mdix = AUTO_ALL_MODES;
else
hw->mdix = ecmd->eth_tp_mdix_ctrl;
}
/* reset the link */
if (netif_running(adapter->netdev)) {
e1000_down(adapter);
e1000_up(adapter);
} else {
e1000_reset(adapter);
}
clear_bit(__E1000_RESETTING, &adapter->flags);
return 0;
}
static u32 e1000_get_link(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
/* If the link is not reported up to netdev, interrupts are disabled,
* and so the physical link state may have changed since we last
* looked. Set get_link_status to make sure that the true link
* state is interrogated, rather than pulling a cached and possibly
* stale link state from the driver.
*/
if (!netif_carrier_ok(netdev))
adapter->hw.get_link_status = 1;
return e1000_has_link(adapter);
}
static void e1000_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
pause->autoneg =
(adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
if (hw->fc == E1000_FC_RX_PAUSE) {
pause->rx_pause = 1;
} else if (hw->fc == E1000_FC_TX_PAUSE) {
pause->tx_pause = 1;
} else if (hw->fc == E1000_FC_FULL) {
pause->rx_pause = 1;
pause->tx_pause = 1;
}
}
static int e1000_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
int retval = 0;
adapter->fc_autoneg = pause->autoneg;
while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
msleep(1);
if (pause->rx_pause && pause->tx_pause)
hw->fc = E1000_FC_FULL;
else if (pause->rx_pause && !pause->tx_pause)
hw->fc = E1000_FC_RX_PAUSE;
else if (!pause->rx_pause && pause->tx_pause)
hw->fc = E1000_FC_TX_PAUSE;
else if (!pause->rx_pause && !pause->tx_pause)
hw->fc = E1000_FC_NONE;
hw->original_fc = hw->fc;
if (adapter->fc_autoneg == AUTONEG_ENABLE) {
if (netif_running(adapter->netdev)) {
e1000_down(adapter);
e1000_up(adapter);
} else {
e1000_reset(adapter);
}
} else
retval = ((hw->media_type == e1000_media_type_fiber) ?
e1000_setup_link(hw) : e1000_force_mac_fc(hw));
clear_bit(__E1000_RESETTING, &adapter->flags);
return retval;
}
static u32 e1000_get_msglevel(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
return adapter->msg_enable;
}
static void e1000_set_msglevel(struct net_device *netdev, u32 data)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
adapter->msg_enable = data;
}
static int e1000_get_regs_len(struct net_device *netdev)
{
#define E1000_REGS_LEN 32
return E1000_REGS_LEN * sizeof(u32);
}
static void e1000_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
void *p)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
u32 *regs_buff = p;
u16 phy_data;
memset(p, 0, E1000_REGS_LEN * sizeof(u32));
regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
regs_buff[0] = er32(CTRL);
regs_buff[1] = er32(STATUS);
regs_buff[2] = er32(RCTL);
regs_buff[3] = er32(RDLEN);
regs_buff[4] = er32(RDH);
regs_buff[5] = er32(RDT);
regs_buff[6] = er32(RDTR);
regs_buff[7] = er32(TCTL);
regs_buff[8] = er32(TDLEN);
regs_buff[9] = er32(TDH);
regs_buff[10] = er32(TDT);
regs_buff[11] = er32(TIDV);
regs_buff[12] = hw->phy_type; /* PHY type (IGP=1, M88=0) */
if (hw->phy_type == e1000_phy_igp) {
e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
IGP01E1000_PHY_AGC_A);
e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A &
IGP01E1000_PHY_PAGE_SELECT, &phy_data);
regs_buff[13] = (u32)phy_data; /* cable length */
e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
IGP01E1000_PHY_AGC_B);
e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_B &
IGP01E1000_PHY_PAGE_SELECT, &phy_data);
regs_buff[14] = (u32)phy_data; /* cable length */
e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
IGP01E1000_PHY_AGC_C);
e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_C &
IGP01E1000_PHY_PAGE_SELECT, &phy_data);
regs_buff[15] = (u32)phy_data; /* cable length */
e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
IGP01E1000_PHY_AGC_D);
e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_D &
IGP01E1000_PHY_PAGE_SELECT, &phy_data);
regs_buff[16] = (u32)phy_data; /* cable length */
regs_buff[17] = 0; /* extended 10bt distance (not needed) */
e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0);
e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS &
IGP01E1000_PHY_PAGE_SELECT, &phy_data);
regs_buff[18] = (u32)phy_data; /* cable polarity */
e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
IGP01E1000_PHY_PCS_INIT_REG);
e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG &
IGP01E1000_PHY_PAGE_SELECT, &phy_data);
regs_buff[19] = (u32)phy_data; /* cable polarity */
regs_buff[20] = 0; /* polarity correction enabled (always) */
regs_buff[22] = 0; /* phy receive errors (unavailable) */
regs_buff[23] = regs_buff[18]; /* mdix mode */
e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0);
} else {
e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
regs_buff[13] = (u32)phy_data; /* cable length */
regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */
regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */
regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */
e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
regs_buff[17] = (u32)phy_data; /* extended 10bt distance */
regs_buff[18] = regs_buff[13]; /* cable polarity */
regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */
regs_buff[20] = regs_buff[17]; /* polarity correction */
/* phy receive errors */
regs_buff[22] = adapter->phy_stats.receive_errors;
regs_buff[23] = regs_buff[13]; /* mdix mode */
}
regs_buff[21] = adapter->phy_stats.idle_errors; /* phy idle errors */
e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
regs_buff[24] = (u32)phy_data; /* phy local receiver status */
regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
if (hw->mac_type >= e1000_82540 &&
hw->media_type == e1000_media_type_copper) {
regs_buff[26] = er32(MANC);
}
}
static int e1000_get_eeprom_len(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
return hw->eeprom.word_size * 2;
}
static int e1000_get_eeprom(struct net_device *netdev,
struct ethtool_eeprom *eeprom, u8 *bytes)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
u16 *eeprom_buff;
int first_word, last_word;
int ret_val = 0;
u16 i;
if (eeprom->len == 0)
return -EINVAL;
eeprom->magic = hw->vendor_id | (hw->device_id << 16);
first_word = eeprom->offset >> 1;
last_word = (eeprom->offset + eeprom->len - 1) >> 1;
eeprom_buff = kmalloc(sizeof(u16) *
(last_word - first_word + 1), GFP_KERNEL);
if (!eeprom_buff)
return -ENOMEM;
if (hw->eeprom.type == e1000_eeprom_spi)
ret_val = e1000_read_eeprom(hw, first_word,
last_word - first_word + 1,
eeprom_buff);
else {
for (i = 0; i < last_word - first_word + 1; i++) {
ret_val = e1000_read_eeprom(hw, first_word + i, 1,
&eeprom_buff[i]);
if (ret_val)
break;
}
}
/* Device's eeprom is always little-endian, word addressable */
for (i = 0; i < last_word - first_word + 1; i++)
le16_to_cpus(&eeprom_buff[i]);
memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
eeprom->len);
kfree(eeprom_buff);
return ret_val;
}
static int e1000_set_eeprom(struct net_device *netdev,
struct ethtool_eeprom *eeprom, u8 *bytes)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
u16 *eeprom_buff;
void *ptr;
int max_len, first_word, last_word, ret_val = 0;
u16 i;
if (eeprom->len == 0)
return -EOPNOTSUPP;
if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
return -EFAULT;
max_len = hw->eeprom.word_size * 2;
first_word = eeprom->offset >> 1;
last_word = (eeprom->offset + eeprom->len - 1) >> 1;
eeprom_buff = kmalloc(max_len, GFP_KERNEL);
if (!eeprom_buff)
return -ENOMEM;
ptr = (void *)eeprom_buff;
if (eeprom->offset & 1) {
/* need read/modify/write of first changed EEPROM word
* only the second byte of the word is being modified
*/
ret_val = e1000_read_eeprom(hw, first_word, 1,
&eeprom_buff[0]);
ptr++;
}
if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
/* need read/modify/write of last changed EEPROM word
* only the first byte of the word is being modified
*/
ret_val = e1000_read_eeprom(hw, last_word, 1,
&eeprom_buff[last_word - first_word]);
}
/* Device's eeprom is always little-endian, word addressable */
for (i = 0; i < last_word - first_word + 1; i++)
le16_to_cpus(&eeprom_buff[i]);
memcpy(ptr, bytes, eeprom->len);
for (i = 0; i < last_word - first_word + 1; i++)
eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
ret_val = e1000_write_eeprom(hw, first_word,
last_word - first_word + 1, eeprom_buff);
/* Update the checksum over the first part of the EEPROM if needed */
if ((ret_val == 0) && (first_word <= EEPROM_CHECKSUM_REG))
e1000_update_eeprom_checksum(hw);
kfree(eeprom_buff);
return ret_val;
}
static void e1000_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
strlcpy(drvinfo->driver, e1000_driver_name,
sizeof(drvinfo->driver));
strlcpy(drvinfo->version, e1000_driver_version,
sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
static void e1000_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
e1000_mac_type mac_type = hw->mac_type;
struct e1000_tx_ring *txdr = adapter->tx_ring;
struct e1000_rx_ring *rxdr = adapter->rx_ring;
ring->rx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_RXD :
E1000_MAX_82544_RXD;
ring->tx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_TXD :
E1000_MAX_82544_TXD;
ring->rx_pending = rxdr->count;
ring->tx_pending = txdr->count;
}
static int e1000_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
e1000_mac_type mac_type = hw->mac_type;
struct e1000_tx_ring *txdr, *tx_old;
struct e1000_rx_ring *rxdr, *rx_old;
int i, err;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
msleep(1);
if (netif_running(adapter->netdev))
e1000_down(adapter);
tx_old = adapter->tx_ring;
rx_old = adapter->rx_ring;
err = -ENOMEM;
txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring),
GFP_KERNEL);
if (!txdr)
goto err_alloc_tx;
rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring),
GFP_KERNEL);
if (!rxdr)
goto err_alloc_rx;
adapter->tx_ring = txdr;
adapter->rx_ring = rxdr;
rxdr->count = max(ring->rx_pending, (u32)E1000_MIN_RXD);
rxdr->count = min(rxdr->count, (u32)(mac_type < e1000_82544 ?
E1000_MAX_RXD : E1000_MAX_82544_RXD));
rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE);
txdr->count = max(ring->tx_pending, (u32)E1000_MIN_TXD);
txdr->count = min(txdr->count, (u32)(mac_type < e1000_82544 ?
E1000_MAX_TXD : E1000_MAX_82544_TXD));
txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
for (i = 0; i < adapter->num_tx_queues; i++)
txdr[i].count = txdr->count;
for (i = 0; i < adapter->num_rx_queues; i++)
rxdr[i].count = rxdr->count;
if (netif_running(adapter->netdev)) {
/* Try to get new resources before deleting old */
err = e1000_setup_all_rx_resources(adapter);
if (err)
goto err_setup_rx;
err = e1000_setup_all_tx_resources(adapter);
if (err)
goto err_setup_tx;
/* save the new, restore the old in order to free it,
* then restore the new back again
*/
adapter->rx_ring = rx_old;
adapter->tx_ring = tx_old;
e1000_free_all_rx_resources(adapter);
e1000_free_all_tx_resources(adapter);
kfree(tx_old);
kfree(rx_old);
adapter->rx_ring = rxdr;
adapter->tx_ring = txdr;
err = e1000_up(adapter);
if (err)
goto err_setup;
}
clear_bit(__E1000_RESETTING, &adapter->flags);
return 0;
err_setup_tx:
e1000_free_all_rx_resources(adapter);
err_setup_rx:
adapter->rx_ring = rx_old;
adapter->tx_ring = tx_old;
kfree(rxdr);
err_alloc_rx:
kfree(txdr);
err_alloc_tx:
e1000_up(adapter);
err_setup:
clear_bit(__E1000_RESETTING, &adapter->flags);
return err;
}
static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, int reg,
u32 mask, u32 write)
{
struct e1000_hw *hw = &adapter->hw;
static const u32 test[] = {
0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
};
u8 __iomem *address = hw->hw_addr + reg;
u32 read;
int i;
for (i = 0; i < ARRAY_SIZE(test); i++) {
writel(write & test[i], address);
read = readl(address);
if (read != (write & test[i] & mask)) {
e_err(drv, "pattern test reg %04X failed: "
"got 0x%08X expected 0x%08X\n",
reg, read, (write & test[i] & mask));
*data = reg;
return true;
}
}
return false;
}
static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, int reg,
u32 mask, u32 write)
{
struct e1000_hw *hw = &adapter->hw;
u8 __iomem *address = hw->hw_addr + reg;
u32 read;
writel(write & mask, address);
read = readl(address);
if ((read & mask) != (write & mask)) {
e_err(drv, "set/check reg %04X test failed: "
"got 0x%08X expected 0x%08X\n",
reg, (read & mask), (write & mask));
*data = reg;
return true;
}
return false;
}
#define REG_PATTERN_TEST(reg, mask, write) \
do { \
if (reg_pattern_test(adapter, data, \
(hw->mac_type >= e1000_82543) \
? E1000_##reg : E1000_82542_##reg, \
mask, write)) \
return 1; \
} while (0)
#define REG_SET_AND_CHECK(reg, mask, write) \
do { \
if (reg_set_and_check(adapter, data, \
(hw->mac_type >= e1000_82543) \
? E1000_##reg : E1000_82542_##reg, \
mask, write)) \
return 1; \
} while (0)
static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
{
u32 value, before, after;
u32 i, toggle;
struct e1000_hw *hw = &adapter->hw;
/* The status register is Read Only, so a write should fail.
* Some bits that get toggled are ignored.
*/
/* there are several bits on newer hardware that are r/w */
toggle = 0xFFFFF833;
before = er32(STATUS);
value = (er32(STATUS) & toggle);
ew32(STATUS, toggle);
after = er32(STATUS) & toggle;
if (value != after) {
e_err(drv, "failed STATUS register test got: "
"0x%08X expected: 0x%08X\n", after, value);
*data = 1;
return 1;
}
/* restore previous status */
ew32(STATUS, before);
REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF);
REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF);
REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF);
REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF);
REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF);
REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
REG_PATTERN_TEST(RDLEN, 0x000FFF80, 0x000FFFFF);
REG_PATTERN_TEST(RDH, 0x0000FFFF, 0x0000FFFF);
REG_PATTERN_TEST(RDT, 0x0000FFFF, 0x0000FFFF);
REG_PATTERN_TEST(FCRTH, 0x0000FFF8, 0x0000FFF8);
REG_PATTERN_TEST(FCTTV, 0x0000FFFF, 0x0000FFFF);
REG_PATTERN_TEST(TIPG, 0x3FFFFFFF, 0x3FFFFFFF);
REG_PATTERN_TEST(TDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
REG_PATTERN_TEST(TDLEN, 0x000FFF80, 0x000FFFFF);
REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000);
before = 0x06DFB3FE;
REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB);
REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000);
if (hw->mac_type >= e1000_82543) {
REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF);
REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF);
REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF);
value = E1000_RAR_ENTRIES;
for (i = 0; i < value; i++) {
REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2),
0x8003FFFF, 0xFFFFFFFF);
}
} else {
REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF);
REG_PATTERN_TEST(RDBAL, 0xFFFFF000, 0xFFFFFFFF);
REG_PATTERN_TEST(TXCW, 0x0000FFFF, 0x0000FFFF);
REG_PATTERN_TEST(TDBAL, 0xFFFFF000, 0xFFFFFFFF);
}
value = E1000_MC_TBL_SIZE;
for (i = 0; i < value; i++)
REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF);
*data = 0;
return 0;
}
static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
{
struct e1000_hw *hw = &adapter->hw;
u16 temp;
u16 checksum = 0;
u16 i;
*data = 0;
/* Read and add up the contents of the EEPROM */
for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
if ((e1000_read_eeprom(hw, i, 1, &temp)) < 0) {
*data = 1;
break;
}
checksum += temp;
}
/* If Checksum is not Correct return error else test passed */
if ((checksum != (u16)EEPROM_SUM) && !(*data))
*data = 2;
return *data;
}
static irqreturn_t e1000_test_intr(int irq, void *data)
{
struct net_device *netdev = (struct net_device *)data;
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
adapter->test_icr |= er32(ICR);
return IRQ_HANDLED;
}
static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
{
struct net_device *netdev = adapter->netdev;
u32 mask, i = 0;
bool shared_int = true;
u32 irq = adapter->pdev->irq;
struct e1000_hw *hw = &adapter->hw;
*data = 0;
/* NOTE: we don't test MSI interrupts here, yet
* Hook up test interrupt handler just for this test
*/
if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
netdev))
shared_int = false;
else if (request_irq(irq, e1000_test_intr, IRQF_SHARED,
netdev->name, netdev)) {
*data = 1;
return -1;
}
e_info(hw, "testing %s interrupt\n", (shared_int ?
"shared" : "unshared"));
/* Disable all the interrupts */
ew32(IMC, 0xFFFFFFFF);
E1000_WRITE_FLUSH();
msleep(10);
/* Test each interrupt */
for (; i < 10; i++) {
/* Interrupt to test */
mask = 1 << i;
if (!shared_int) {
/* Disable the interrupt to be reported in
* the cause register and then force the same
* interrupt and see if one gets posted. If
* an interrupt was posted to the bus, the
* test failed.
*/
adapter->test_icr = 0;
ew32(IMC, mask);
ew32(ICS, mask);
E1000_WRITE_FLUSH();
msleep(10);
if (adapter->test_icr & mask) {
*data = 3;
break;
}
}
/* Enable the interrupt to be reported in
* the cause register and then force the same
* interrupt and see if one gets posted. If
* an interrupt was not posted to the bus, the
* test failed.
*/
adapter->test_icr = 0;
ew32(IMS, mask);
ew32(ICS, mask);
E1000_WRITE_FLUSH();
msleep(10);
if (!(adapter->test_icr & mask)) {
*data = 4;
break;
}
if (!shared_int) {
/* Disable the other interrupts to be reported in
* the cause register and then force the other
* interrupts and see if any get posted. If
* an interrupt was posted to the bus, the
* test failed.
*/
adapter->test_icr = 0;
ew32(IMC, ~mask & 0x00007FFF);
ew32(ICS, ~mask & 0x00007FFF);
E1000_WRITE_FLUSH();
msleep(10);
if (adapter->test_icr) {
*data = 5;
break;
}
}
}
/* Disable all the interrupts */
ew32(IMC, 0xFFFFFFFF);
E1000_WRITE_FLUSH();
msleep(10);
/* Unhook test interrupt handler */
free_irq(irq, netdev);
return *data;
}
static void e1000_free_desc_rings(struct e1000_adapter *adapter)
{
struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
struct pci_dev *pdev = adapter->pdev;
int i;
if (txdr->desc && txdr->buffer_info) {
for (i = 0; i < txdr->count; i++) {
if (txdr->buffer_info[i].dma)
dma_unmap_single(&pdev->dev,
txdr->buffer_info[i].dma,
txdr->buffer_info[i].length,
DMA_TO_DEVICE);
if (txdr->buffer_info[i].skb)
dev_kfree_skb(txdr->buffer_info[i].skb);
}
}
if (rxdr->desc && rxdr->buffer_info) {
for (i = 0; i < rxdr->count; i++) {
if (rxdr->buffer_info[i].dma)
dma_unmap_single(&pdev->dev,
rxdr->buffer_info[i].dma,
E1000_RXBUFFER_2048,
DMA_FROM_DEVICE);
kfree(rxdr->buffer_info[i].rxbuf.data);
}
}
if (txdr->desc) {
dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
txdr->dma);
txdr->desc = NULL;
}
if (rxdr->desc) {
dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
rxdr->dma);
rxdr->desc = NULL;
}
kfree(txdr->buffer_info);
txdr->buffer_info = NULL;
kfree(rxdr->buffer_info);
rxdr->buffer_info = NULL;
}
static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
struct pci_dev *pdev = adapter->pdev;
u32 rctl;
int i, ret_val;
/* Setup Tx descriptor ring and Tx buffers */
if (!txdr->count)
txdr->count = E1000_DEFAULT_TXD;
txdr->buffer_info = kcalloc(txdr->count, sizeof(struct e1000_tx_buffer),
GFP_KERNEL);
if (!txdr->buffer_info) {
ret_val = 1;
goto err_nomem;
}
txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
txdr->size = ALIGN(txdr->size, 4096);
txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
GFP_KERNEL);
if (!txdr->desc) {
ret_val = 2;
goto err_nomem;
}
txdr->next_to_use = txdr->next_to_clean = 0;
ew32(TDBAL, ((u64)txdr->dma & 0x00000000FFFFFFFF));
ew32(TDBAH, ((u64)txdr->dma >> 32));
ew32(TDLEN, txdr->count * sizeof(struct e1000_tx_desc));
ew32(TDH, 0);
ew32(TDT, 0);
ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN |
E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT);
for (i = 0; i < txdr->count; i++) {
struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i);
struct sk_buff *skb;
unsigned int size = 1024;
skb = alloc_skb(size, GFP_KERNEL);
if (!skb) {
ret_val = 3;
goto err_nomem;
}
skb_put(skb, size);
txdr->buffer_info[i].skb = skb;
txdr->buffer_info[i].length = skb->len;
txdr->buffer_info[i].dma =
dma_map_single(&pdev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
if (dma_mapping_error(&pdev->dev, txdr->buffer_info[i].dma)) {
ret_val = 4;
goto err_nomem;
}
tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma);
tx_desc->lower.data = cpu_to_le32(skb->len);
tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
E1000_TXD_CMD_IFCS |
E1000_TXD_CMD_RPS);
tx_desc->upper.data = 0;
}
/* Setup Rx descriptor ring and Rx buffers */
if (!rxdr->count)
rxdr->count = E1000_DEFAULT_RXD;
rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_rx_buffer),
GFP_KERNEL);
if (!rxdr->buffer_info) {
ret_val = 5;
goto err_nomem;
}
rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
GFP_KERNEL);
if (!rxdr->desc) {
ret_val = 6;
goto err_nomem;
}
rxdr->next_to_use = rxdr->next_to_clean = 0;
rctl = er32(RCTL);
ew32(RCTL, rctl & ~E1000_RCTL_EN);
ew32(RDBAL, ((u64)rxdr->dma & 0xFFFFFFFF));
ew32(RDBAH, ((u64)rxdr->dma >> 32));
ew32(RDLEN, rxdr->size);
ew32(RDH, 0);
ew32(RDT, 0);
rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
(hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
ew32(RCTL, rctl);
for (i = 0; i < rxdr->count; i++) {
struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i);
u8 *buf;
buf = kzalloc(E1000_RXBUFFER_2048 + NET_SKB_PAD + NET_IP_ALIGN,
GFP_KERNEL);
if (!buf) {
ret_val = 7;
goto err_nomem;
}
rxdr->buffer_info[i].rxbuf.data = buf;
rxdr->buffer_info[i].dma =
dma_map_single(&pdev->dev,
buf + NET_SKB_PAD + NET_IP_ALIGN,
E1000_RXBUFFER_2048, DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev, rxdr->buffer_info[i].dma)) {
ret_val = 8;
goto err_nomem;
}
rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma);
}
return 0;
err_nomem:
e1000_free_desc_rings(adapter);
return ret_val;
}
static void e1000_phy_disable_receiver(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
/* Write out to PHY registers 29 and 30 to disable the Receiver. */
e1000_write_phy_reg(hw, 29, 0x001F);
e1000_write_phy_reg(hw, 30, 0x8FFC);
e1000_write_phy_reg(hw, 29, 0x001A);
e1000_write_phy_reg(hw, 30, 0x8FF0);
}
static void e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u16 phy_reg;
/* Because we reset the PHY above, we need to re-force TX_CLK in the
* Extended PHY Specific Control Register to 25MHz clock. This
* value defaults back to a 2.5MHz clock when the PHY is reset.
*/
e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg);
phy_reg |= M88E1000_EPSCR_TX_CLK_25;
e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_reg);
/* In addition, because of the s/w reset above, we need to enable
* CRS on TX. This must be set for both full and half duplex
* operation.
*/
e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg);
phy_reg |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_reg);
}
static int e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 ctrl_reg;
u16 phy_reg;
/* Setup the Device Control Register for PHY loopback test. */
ctrl_reg = er32(CTRL);
ctrl_reg |= (E1000_CTRL_ILOS | /* Invert Loss-Of-Signal */
E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
E1000_CTRL_SPD_1000 | /* Force Speed to 1000 */
E1000_CTRL_FD); /* Force Duplex to FULL */
ew32(CTRL, ctrl_reg);
/* Read the PHY Specific Control Register (0x10) */
e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg);
/* Clear Auto-Crossover bits in PHY Specific Control Register
* (bits 6:5).
*/
phy_reg &= ~M88E1000_PSCR_AUTO_X_MODE;
e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_reg);
/* Perform software reset on the PHY */
e1000_phy_reset(hw);
/* Have to setup TX_CLK and TX_CRS after software reset */
e1000_phy_reset_clk_and_crs(adapter);
e1000_write_phy_reg(hw, PHY_CTRL, 0x8100);
/* Wait for reset to complete. */
udelay(500);
/* Have to setup TX_CLK and TX_CRS after software reset */
e1000_phy_reset_clk_and_crs(adapter);
/* Write out to PHY registers 29 and 30 to disable the Receiver. */
e1000_phy_disable_receiver(adapter);
/* Set the loopback bit in the PHY control register. */
e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
phy_reg |= MII_CR_LOOPBACK;
e1000_write_phy_reg(hw, PHY_CTRL, phy_reg);
/* Setup TX_CLK and TX_CRS one more time. */
e1000_phy_reset_clk_and_crs(adapter);
/* Check Phy Configuration */
e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
if (phy_reg != 0x4100)
return 9;
e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg);
if (phy_reg != 0x0070)
return 10;
e1000_read_phy_reg(hw, 29, &phy_reg);
if (phy_reg != 0x001A)
return 11;
return 0;
}
static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 ctrl_reg = 0;
u32 stat_reg = 0;
hw->autoneg = false;
if (hw->phy_type == e1000_phy_m88) {
/* Auto-MDI/MDIX Off */
e1000_write_phy_reg(hw,
M88E1000_PHY_SPEC_CTRL, 0x0808);
/* reset to update Auto-MDI/MDIX */
e1000_write_phy_reg(hw, PHY_CTRL, 0x9140);
/* autoneg off */
e1000_write_phy_reg(hw, PHY_CTRL, 0x8140);
}
ctrl_reg = er32(CTRL);
/* force 1000, set loopback */
e1000_write_phy_reg(hw, PHY_CTRL, 0x4140);
/* Now set up the MAC to the same speed/duplex as the PHY. */
ctrl_reg = er32(CTRL);
ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
E1000_CTRL_FD); /* Force Duplex to FULL */
if (hw->media_type == e1000_media_type_copper &&
hw->phy_type == e1000_phy_m88)
ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
else {
/* Set the ILOS bit on the fiber Nic is half
* duplex link is detected.
*/
stat_reg = er32(STATUS);
if ((stat_reg & E1000_STATUS_FD) == 0)
ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
}
ew32(CTRL, ctrl_reg);
/* Disable the receiver on the PHY so when a cable is plugged in, the
* PHY does not begin to autoneg when a cable is reconnected to the NIC.
*/
if (hw->phy_type == e1000_phy_m88)
e1000_phy_disable_receiver(adapter);
udelay(500);
return 0;
}
static int e1000_set_phy_loopback(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u16 phy_reg = 0;
u16 count = 0;
switch (hw->mac_type) {
case e1000_82543:
if (hw->media_type == e1000_media_type_copper) {
/* Attempt to setup Loopback mode on Non-integrated PHY.
* Some PHY registers get corrupted at random, so
* attempt this 10 times.
*/
while (e1000_nonintegrated_phy_loopback(adapter) &&
count++ < 10);
if (count < 11)
return 0;
}
break;
case e1000_82544:
case e1000_82540:
case e1000_82545:
case e1000_82545_rev_3:
case e1000_82546:
case e1000_82546_rev_3:
case e1000_82541:
case e1000_82541_rev_2:
case e1000_82547:
case e1000_82547_rev_2:
return e1000_integrated_phy_loopback(adapter);
default:
/* Default PHY loopback work is to read the MII
* control register and assert bit 14 (loopback mode).
*/
e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
phy_reg |= MII_CR_LOOPBACK;
e1000_write_phy_reg(hw, PHY_CTRL, phy_reg);
return 0;
}
return 8;
}
static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 rctl;
if (hw->media_type == e1000_media_type_fiber ||
hw->media_type == e1000_media_type_internal_serdes) {
switch (hw->mac_type) {
case e1000_82545:
case e1000_82546:
case e1000_82545_rev_3:
case e1000_82546_rev_3:
return e1000_set_phy_loopback(adapter);
default:
rctl = er32(RCTL);
rctl |= E1000_RCTL_LBM_TCVR;
ew32(RCTL, rctl);
return 0;
}
} else if (hw->media_type == e1000_media_type_copper) {
return e1000_set_phy_loopback(adapter);
}
return 7;
}
static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 rctl;
u16 phy_reg;
rctl = er32(RCTL);
rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
ew32(RCTL, rctl);
switch (hw->mac_type) {
case e1000_82545:
case e1000_82546:
case e1000_82545_rev_3:
case e1000_82546_rev_3:
default:
hw->autoneg = true;
e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
if (phy_reg & MII_CR_LOOPBACK) {
phy_reg &= ~MII_CR_LOOPBACK;
e1000_write_phy_reg(hw, PHY_CTRL, phy_reg);
e1000_phy_reset(hw);
}
break;
}
}
static void e1000_create_lbtest_frame(struct sk_buff *skb,
unsigned int frame_size)
{
memset(skb->data, 0xFF, frame_size);
frame_size &= ~1;
memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
}
static int e1000_check_lbtest_frame(const unsigned char *data,
unsigned int frame_size)
{
frame_size &= ~1;
if (*(data + 3) == 0xFF) {
if ((*(data + frame_size / 2 + 10) == 0xBE) &&
(*(data + frame_size / 2 + 12) == 0xAF)) {
return 0;
}
}
return 13;
}
static int e1000_run_loopback_test(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
struct pci_dev *pdev = adapter->pdev;
int i, j, k, l, lc, good_cnt, ret_val = 0;
unsigned long time;
ew32(RDT, rxdr->count - 1);
/* Calculate the loop count based on the largest descriptor ring
* The idea is to wrap the largest ring a number of times using 64
* send/receive pairs during each loop
*/
if (rxdr->count <= txdr->count)
lc = ((txdr->count / 64) * 2) + 1;
else
lc = ((rxdr->count / 64) * 2) + 1;
k = l = 0;
for (j = 0; j <= lc; j++) { /* loop count loop */
for (i = 0; i < 64; i++) { /* send the packets */
e1000_create_lbtest_frame(txdr->buffer_info[i].skb,
1024);
dma_sync_single_for_device(&pdev->dev,
txdr->buffer_info[k].dma,
txdr->buffer_info[k].length,
DMA_TO_DEVICE);
if (unlikely(++k == txdr->count))
k = 0;
}
ew32(TDT, k);
E1000_WRITE_FLUSH();
msleep(200);
time = jiffies; /* set the start time for the receive */
good_cnt = 0;
do { /* receive the sent packets */
dma_sync_single_for_cpu(&pdev->dev,
rxdr->buffer_info[l].dma,
E1000_RXBUFFER_2048,
DMA_FROM_DEVICE);
ret_val = e1000_check_lbtest_frame(
rxdr->buffer_info[l].rxbuf.data +
NET_SKB_PAD + NET_IP_ALIGN,
1024);
if (!ret_val)
good_cnt++;
if (unlikely(++l == rxdr->count))
l = 0;
/* time + 20 msecs (200 msecs on 2.4) is more than
* enough time to complete the receives, if it's
* exceeded, break and error off
*/
} while (good_cnt < 64 && time_after(time + 20, jiffies));
if (good_cnt != 64) {
ret_val = 13; /* ret_val is the same as mis-compare */
break;
}
if (time_after_eq(jiffies, time + 2)) {
ret_val = 14; /* error code for time out error */
break;
}
} /* end loop count loop */
return ret_val;
}
static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
{
*data = e1000_setup_desc_rings(adapter);
if (*data)
goto out;
*data = e1000_setup_loopback_test(adapter);
if (*data)
goto err_loopback;
*data = e1000_run_loopback_test(adapter);
e1000_loopback_cleanup(adapter);
err_loopback:
e1000_free_desc_rings(adapter);
out:
return *data;
}
static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
{
struct e1000_hw *hw = &adapter->hw;
*data = 0;
if (hw->media_type == e1000_media_type_internal_serdes) {
int i = 0;
hw->serdes_has_link = false;
/* On some blade server designs, link establishment
* could take as long as 2-3 minutes
*/
do {
e1000_check_for_link(hw);
if (hw->serdes_has_link)
return *data;
msleep(20);
} while (i++ < 3750);
*data = 1;
} else {
e1000_check_for_link(hw);
if (hw->autoneg) /* if auto_neg is set wait for it */
msleep(4000);
if (!(er32(STATUS) & E1000_STATUS_LU))
*data = 1;
}
return *data;
}
static int e1000_get_sset_count(struct net_device *netdev, int sset)
{
switch (sset) {
case ETH_SS_TEST:
return E1000_TEST_LEN;
case ETH_SS_STATS:
return E1000_STATS_LEN;
default:
return -EOPNOTSUPP;
}
}
static void e1000_diag_test(struct net_device *netdev,
struct ethtool_test *eth_test, u64 *data)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
bool if_running = netif_running(netdev);
set_bit(__E1000_TESTING, &adapter->flags);
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
/* Offline tests */
/* save speed, duplex, autoneg settings */
u16 autoneg_advertised = hw->autoneg_advertised;
u8 forced_speed_duplex = hw->forced_speed_duplex;
u8 autoneg = hw->autoneg;
e_info(hw, "offline testing starting\n");
/* Link test performed before hardware reset so autoneg doesn't
* interfere with test result
*/
if (e1000_link_test(adapter, &data[4]))
eth_test->flags |= ETH_TEST_FL_FAILED;
if (if_running)
/* indicate we're in test mode */
dev_close(netdev);
else
e1000_reset(adapter);
if (e1000_reg_test(adapter, &data[0]))
eth_test->flags |= ETH_TEST_FL_FAILED;
e1000_reset(adapter);
if (e1000_eeprom_test(adapter, &data[1]))
eth_test->flags |= ETH_TEST_FL_FAILED;
e1000_reset(adapter);
if (e1000_intr_test(adapter, &data[2]))
eth_test->flags |= ETH_TEST_FL_FAILED;
e1000_reset(adapter);
/* make sure the phy is powered up */
e1000_power_up_phy(adapter);
if (e1000_loopback_test(adapter, &data[3]))
eth_test->flags |= ETH_TEST_FL_FAILED;
/* restore speed, duplex, autoneg settings */
hw->autoneg_advertised = autoneg_advertised;
hw->forced_speed_duplex = forced_speed_duplex;
hw->autoneg = autoneg;
e1000_reset(adapter);
clear_bit(__E1000_TESTING, &adapter->flags);
if (if_running)
dev_open(netdev);
} else {
e_info(hw, "online testing starting\n");
/* Online tests */
if (e1000_link_test(adapter, &data[4]))
eth_test->flags |= ETH_TEST_FL_FAILED;
/* Online tests aren't run; pass by default */
data[0] = 0;
data[1] = 0;
data[2] = 0;
data[3] = 0;
clear_bit(__E1000_TESTING, &adapter->flags);
}
msleep_interruptible(4 * 1000);
}
static int e1000_wol_exclusion(struct e1000_adapter *adapter,
struct ethtool_wolinfo *wol)
{
struct e1000_hw *hw = &adapter->hw;
int retval = 1; /* fail by default */
switch (hw->device_id) {
case E1000_DEV_ID_82542:
case E1000_DEV_ID_82543GC_FIBER:
case E1000_DEV_ID_82543GC_COPPER:
case E1000_DEV_ID_82544EI_FIBER:
case E1000_DEV_ID_82546EB_QUAD_COPPER:
case E1000_DEV_ID_82545EM_FIBER:
case E1000_DEV_ID_82545EM_COPPER:
case E1000_DEV_ID_82546GB_QUAD_COPPER:
case E1000_DEV_ID_82546GB_PCIE:
/* these don't support WoL at all */
wol->supported = 0;
break;
case E1000_DEV_ID_82546EB_FIBER:
case E1000_DEV_ID_82546GB_FIBER:
/* Wake events not supported on port B */
if (er32(STATUS) & E1000_STATUS_FUNC_1) {
wol->supported = 0;
break;
}
/* return success for non excluded adapter ports */
retval = 0;
break;
case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
/* quad port adapters only support WoL on port A */
if (!adapter->quad_port_a) {
wol->supported = 0;
break;
}
/* return success for non excluded adapter ports */
retval = 0;
break;
default:
/* dual port cards only support WoL on port A from now on
* unless it was enabled in the eeprom for port B
* so exclude FUNC_1 ports from having WoL enabled
*/
if (er32(STATUS) & E1000_STATUS_FUNC_1 &&
!adapter->eeprom_wol) {
wol->supported = 0;
break;
}
retval = 0;
}
return retval;
}
static void e1000_get_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC;
wol->wolopts = 0;
/* this function will set ->supported = 0 and return 1 if wol is not
* supported by this hardware
*/
if (e1000_wol_exclusion(adapter, wol) ||
!device_can_wakeup(&adapter->pdev->dev))
return;
/* apply any specific unsupported masks here */
switch (hw->device_id) {
case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
/* KSP3 does not support UCAST wake-ups */
wol->supported &= ~WAKE_UCAST;
if (adapter->wol & E1000_WUFC_EX)
e_err(drv, "Interface does not support directed "
"(unicast) frame wake-up packets\n");
break;
default:
break;
}
if (adapter->wol & E1000_WUFC_EX)
wol->wolopts |= WAKE_UCAST;
if (adapter->wol & E1000_WUFC_MC)
wol->wolopts |= WAKE_MCAST;
if (adapter->wol & E1000_WUFC_BC)
wol->wolopts |= WAKE_BCAST;
if (adapter->wol & E1000_WUFC_MAG)
wol->wolopts |= WAKE_MAGIC;
}
static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
return -EOPNOTSUPP;
if (e1000_wol_exclusion(adapter, wol) ||
!device_can_wakeup(&adapter->pdev->dev))
return wol->wolopts ? -EOPNOTSUPP : 0;
switch (hw->device_id) {
case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
if (wol->wolopts & WAKE_UCAST) {
e_err(drv, "Interface does not support directed "
"(unicast) frame wake-up packets\n");
return -EOPNOTSUPP;
}
break;
default:
break;
}
/* these settings will always override what we currently have */
adapter->wol = 0;
if (wol->wolopts & WAKE_UCAST)
adapter->wol |= E1000_WUFC_EX;
if (wol->wolopts & WAKE_MCAST)
adapter->wol |= E1000_WUFC_MC;
if (wol->wolopts & WAKE_BCAST)
adapter->wol |= E1000_WUFC_BC;
if (wol->wolopts & WAKE_MAGIC)
adapter->wol |= E1000_WUFC_MAG;
device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
return 0;
}
static int e1000_set_phys_id(struct net_device *netdev,
enum ethtool_phys_id_state state)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
switch (state) {
case ETHTOOL_ID_ACTIVE:
e1000_setup_led(hw);
return 2;
case ETHTOOL_ID_ON:
e1000_led_on(hw);
break;
case ETHTOOL_ID_OFF:
e1000_led_off(hw);
break;
case ETHTOOL_ID_INACTIVE:
e1000_cleanup_led(hw);
}
return 0;
}
static int e1000_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
if (adapter->hw.mac_type < e1000_82545)
return -EOPNOTSUPP;
if (adapter->itr_setting <= 4)
ec->rx_coalesce_usecs = adapter->itr_setting;
else
ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting;
return 0;
}
static int e1000_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
if (hw->mac_type < e1000_82545)
return -EOPNOTSUPP;
if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) ||
((ec->rx_coalesce_usecs > 4) &&
(ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) ||
(ec->rx_coalesce_usecs == 2))
return -EINVAL;
if (ec->rx_coalesce_usecs == 4) {
adapter->itr = adapter->itr_setting = 4;
} else if (ec->rx_coalesce_usecs <= 3) {
adapter->itr = 20000;
adapter->itr_setting = ec->rx_coalesce_usecs;
} else {
adapter->itr = (1000000 / ec->rx_coalesce_usecs);
adapter->itr_setting = adapter->itr & ~3;
}
if (adapter->itr_setting != 0)
ew32(ITR, 1000000000 / (adapter->itr * 256));
else
ew32(ITR, 0);
return 0;
}
static int e1000_nway_reset(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
if (netif_running(netdev))
e1000_reinit_locked(adapter);
return 0;
}
static void e1000_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
int i;
char *p = NULL;
const struct e1000_stats *stat = e1000_gstrings_stats;
e1000_update_stats(adapter);
for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
switch (stat->type) {
case NETDEV_STATS:
p = (char *)netdev + stat->stat_offset;
break;
case E1000_STATS:
p = (char *)adapter + stat->stat_offset;
break;
default:
WARN_ONCE(1, "Invalid E1000 stat type: %u index %d\n",
stat->type, i);
break;
}
if (stat->sizeof_stat == sizeof(u64))
data[i] = *(u64 *)p;
else
data[i] = *(u32 *)p;
stat++;
}
/* BUG_ON(i != E1000_STATS_LEN); */
}
static void e1000_get_strings(struct net_device *netdev, u32 stringset,
u8 *data)
{
u8 *p = data;
int i;
switch (stringset) {
case ETH_SS_TEST:
memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test));
break;
case ETH_SS_STATS:
for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
memcpy(p, e1000_gstrings_stats[i].stat_string,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
/* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */
break;
}
}
static const struct ethtool_ops e1000_ethtool_ops = {
.get_settings = e1000_get_settings,
.set_settings = e1000_set_settings,
.get_drvinfo = e1000_get_drvinfo,
.get_regs_len = e1000_get_regs_len,
.get_regs = e1000_get_regs,
.get_wol = e1000_get_wol,
.set_wol = e1000_set_wol,
.get_msglevel = e1000_get_msglevel,
.set_msglevel = e1000_set_msglevel,
.nway_reset = e1000_nway_reset,
.get_link = e1000_get_link,
.get_eeprom_len = e1000_get_eeprom_len,
.get_eeprom = e1000_get_eeprom,
.set_eeprom = e1000_set_eeprom,
.get_ringparam = e1000_get_ringparam,
.set_ringparam = e1000_set_ringparam,
.get_pauseparam = e1000_get_pauseparam,
.set_pauseparam = e1000_set_pauseparam,
.self_test = e1000_diag_test,
.get_strings = e1000_get_strings,
.set_phys_id = e1000_set_phys_id,
.get_ethtool_stats = e1000_get_ethtool_stats,
.get_sset_count = e1000_get_sset_count,
.get_coalesce = e1000_get_coalesce,
.set_coalesce = e1000_set_coalesce,
.get_ts_info = ethtool_op_get_ts_info,
};
void e1000_set_ethtool_ops(struct net_device *netdev)
{
netdev->ethtool_ops = &e1000_ethtool_ops;
}
| gpl-2.0 |
Saylance/android_kernel_samsung_n7000 | arch/arm/mach-pxa/pxa27x.c | 2276 | 11717 | /*
* linux/arch/arm/mach-pxa/pxa27x.c
*
* Author: Nicolas Pitre
* Created: Nov 05, 2002
* Copyright: MontaVista Software Inc.
*
* Code specific to PXA27x aka Bulverde.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/suspend.h>
#include <linux/platform_device.h>
#include <linux/syscore_ops.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/i2c/pxa-i2c.h>
#include <asm/mach/map.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <mach/irqs.h>
#include <mach/gpio.h>
#include <mach/pxa27x.h>
#include <mach/reset.h>
#include <mach/ohci.h>
#include <mach/pm.h>
#include <mach/dma.h>
#include <mach/smemc.h>
#include "generic.h"
#include "devices.h"
#include "clock.h"
void pxa27x_clear_otgph(void)
{
if (cpu_is_pxa27x() && (PSSR & PSSR_OTGPH))
PSSR |= PSSR_OTGPH;
}
EXPORT_SYMBOL(pxa27x_clear_otgph);
static unsigned long ac97_reset_config[] = {
GPIO113_GPIO,
GPIO113_AC97_nRESET,
GPIO95_GPIO,
GPIO95_AC97_nRESET,
};
void pxa27x_assert_ac97reset(int reset_gpio, int on)
{
if (reset_gpio == 113)
pxa2xx_mfp_config(on ? &ac97_reset_config[0] :
&ac97_reset_config[1], 1);
if (reset_gpio == 95)
pxa2xx_mfp_config(on ? &ac97_reset_config[2] :
&ac97_reset_config[3], 1);
}
EXPORT_SYMBOL_GPL(pxa27x_assert_ac97reset);
/* Crystal clock: 13MHz */
#define BASE_CLK 13000000
/*
* Get the clock frequency as reflected by CCSR and the turbo flag.
* We assume these values have been applied via a fcs.
* If info is not 0 we also display the current settings.
*/
unsigned int pxa27x_get_clk_frequency_khz(int info)
{
unsigned long ccsr, clkcfg;
unsigned int l, L, m, M, n2, N, S;
int cccr_a, t, ht, b;
ccsr = CCSR;
cccr_a = CCCR & (1 << 25);
/* Read clkcfg register: it has turbo, b, half-turbo (and f) */
asm( "mrc\tp14, 0, %0, c6, c0, 0" : "=r" (clkcfg) );
t = clkcfg & (1 << 0);
ht = clkcfg & (1 << 2);
b = clkcfg & (1 << 3);
l = ccsr & 0x1f;
n2 = (ccsr>>7) & 0xf;
m = (l <= 10) ? 1 : (l <= 20) ? 2 : 4;
L = l * BASE_CLK;
N = (L * n2) / 2;
M = (!cccr_a) ? (L/m) : ((b) ? L : (L/2));
S = (b) ? L : (L/2);
if (info) {
printk( KERN_INFO "Run Mode clock: %d.%02dMHz (*%d)\n",
L / 1000000, (L % 1000000) / 10000, l );
printk( KERN_INFO "Turbo Mode clock: %d.%02dMHz (*%d.%d, %sactive)\n",
N / 1000000, (N % 1000000)/10000, n2 / 2, (n2 % 2)*5,
(t) ? "" : "in" );
printk( KERN_INFO "Memory clock: %d.%02dMHz (/%d)\n",
M / 1000000, (M % 1000000) / 10000, m );
printk( KERN_INFO "System bus clock: %d.%02dMHz \n",
S / 1000000, (S % 1000000) / 10000 );
}
return (t) ? (N/1000) : (L/1000);
}
/*
* Return the current mem clock frequency as reflected by CCCR[A], B, and L
*/
static unsigned long clk_pxa27x_mem_getrate(struct clk *clk)
{
unsigned long ccsr, clkcfg;
unsigned int l, L, m, M;
int cccr_a, b;
ccsr = CCSR;
cccr_a = CCCR & (1 << 25);
/* Read clkcfg register: it has turbo, b, half-turbo (and f) */
asm( "mrc\tp14, 0, %0, c6, c0, 0" : "=r" (clkcfg) );
b = clkcfg & (1 << 3);
l = ccsr & 0x1f;
m = (l <= 10) ? 1 : (l <= 20) ? 2 : 4;
L = l * BASE_CLK;
M = (!cccr_a) ? (L/m) : ((b) ? L : (L/2));
return M;
}
static const struct clkops clk_pxa27x_mem_ops = {
.enable = clk_dummy_enable,
.disable = clk_dummy_disable,
.getrate = clk_pxa27x_mem_getrate,
};
/*
* Return the current LCD clock frequency in units of 10kHz as
*/
static unsigned int pxa27x_get_lcdclk_frequency_10khz(void)
{
unsigned long ccsr;
unsigned int l, L, k, K;
ccsr = CCSR;
l = ccsr & 0x1f;
k = (l <= 7) ? 1 : (l <= 16) ? 2 : 4;
L = l * BASE_CLK;
K = L / k;
return (K / 10000);
}
static unsigned long clk_pxa27x_lcd_getrate(struct clk *clk)
{
return pxa27x_get_lcdclk_frequency_10khz() * 10000;
}
static const struct clkops clk_pxa27x_lcd_ops = {
.enable = clk_pxa2xx_cken_enable,
.disable = clk_pxa2xx_cken_disable,
.getrate = clk_pxa27x_lcd_getrate,
};
static DEFINE_PXA2_CKEN(pxa27x_ffuart, FFUART, 14857000, 1);
static DEFINE_PXA2_CKEN(pxa27x_btuart, BTUART, 14857000, 1);
static DEFINE_PXA2_CKEN(pxa27x_stuart, STUART, 14857000, 1);
static DEFINE_PXA2_CKEN(pxa27x_i2s, I2S, 14682000, 0);
static DEFINE_PXA2_CKEN(pxa27x_i2c, I2C, 32842000, 0);
static DEFINE_PXA2_CKEN(pxa27x_usb, USB, 48000000, 5);
static DEFINE_PXA2_CKEN(pxa27x_mmc, MMC, 19500000, 0);
static DEFINE_PXA2_CKEN(pxa27x_ficp, FICP, 48000000, 0);
static DEFINE_PXA2_CKEN(pxa27x_usbhost, USBHOST, 48000000, 0);
static DEFINE_PXA2_CKEN(pxa27x_pwri2c, PWRI2C, 13000000, 0);
static DEFINE_PXA2_CKEN(pxa27x_keypad, KEYPAD, 32768, 0);
static DEFINE_PXA2_CKEN(pxa27x_ssp1, SSP1, 13000000, 0);
static DEFINE_PXA2_CKEN(pxa27x_ssp2, SSP2, 13000000, 0);
static DEFINE_PXA2_CKEN(pxa27x_ssp3, SSP3, 13000000, 0);
static DEFINE_PXA2_CKEN(pxa27x_pwm0, PWM0, 13000000, 0);
static DEFINE_PXA2_CKEN(pxa27x_pwm1, PWM1, 13000000, 0);
static DEFINE_PXA2_CKEN(pxa27x_ac97, AC97, 24576000, 0);
static DEFINE_PXA2_CKEN(pxa27x_ac97conf, AC97CONF, 24576000, 0);
static DEFINE_PXA2_CKEN(pxa27x_msl, MSL, 48000000, 0);
static DEFINE_PXA2_CKEN(pxa27x_usim, USIM, 48000000, 0);
static DEFINE_PXA2_CKEN(pxa27x_memstk, MEMSTK, 19500000, 0);
static DEFINE_PXA2_CKEN(pxa27x_im, IM, 0, 0);
static DEFINE_PXA2_CKEN(pxa27x_memc, MEMC, 0, 0);
static DEFINE_CK(pxa27x_lcd, LCD, &clk_pxa27x_lcd_ops);
static DEFINE_CK(pxa27x_camera, CAMERA, &clk_pxa27x_lcd_ops);
static DEFINE_CLK(pxa27x_mem, &clk_pxa27x_mem_ops, 0, 0);
static struct clk_lookup pxa27x_clkregs[] = {
INIT_CLKREG(&clk_pxa27x_lcd, "pxa2xx-fb", NULL),
INIT_CLKREG(&clk_pxa27x_camera, "pxa27x-camera.0", NULL),
INIT_CLKREG(&clk_pxa27x_ffuart, "pxa2xx-uart.0", NULL),
INIT_CLKREG(&clk_pxa27x_btuart, "pxa2xx-uart.1", NULL),
INIT_CLKREG(&clk_pxa27x_stuart, "pxa2xx-uart.2", NULL),
INIT_CLKREG(&clk_pxa27x_i2s, "pxa2xx-i2s", NULL),
INIT_CLKREG(&clk_pxa27x_i2c, "pxa2xx-i2c.0", NULL),
INIT_CLKREG(&clk_pxa27x_usb, "pxa27x-udc", NULL),
INIT_CLKREG(&clk_pxa27x_mmc, "pxa2xx-mci.0", NULL),
INIT_CLKREG(&clk_pxa27x_stuart, "pxa2xx-ir", "UARTCLK"),
INIT_CLKREG(&clk_pxa27x_ficp, "pxa2xx-ir", "FICPCLK"),
INIT_CLKREG(&clk_pxa27x_usbhost, "pxa27x-ohci", NULL),
INIT_CLKREG(&clk_pxa27x_pwri2c, "pxa2xx-i2c.1", NULL),
INIT_CLKREG(&clk_pxa27x_keypad, "pxa27x-keypad", NULL),
INIT_CLKREG(&clk_pxa27x_ssp1, "pxa27x-ssp.0", NULL),
INIT_CLKREG(&clk_pxa27x_ssp2, "pxa27x-ssp.1", NULL),
INIT_CLKREG(&clk_pxa27x_ssp3, "pxa27x-ssp.2", NULL),
INIT_CLKREG(&clk_pxa27x_pwm0, "pxa27x-pwm.0", NULL),
INIT_CLKREG(&clk_pxa27x_pwm1, "pxa27x-pwm.1", NULL),
INIT_CLKREG(&clk_pxa27x_ac97, NULL, "AC97CLK"),
INIT_CLKREG(&clk_pxa27x_ac97conf, NULL, "AC97CONFCLK"),
INIT_CLKREG(&clk_pxa27x_msl, NULL, "MSLCLK"),
INIT_CLKREG(&clk_pxa27x_usim, NULL, "USIMCLK"),
INIT_CLKREG(&clk_pxa27x_memstk, NULL, "MSTKCLK"),
INIT_CLKREG(&clk_pxa27x_im, NULL, "IMCLK"),
INIT_CLKREG(&clk_pxa27x_memc, NULL, "MEMCLK"),
INIT_CLKREG(&clk_pxa27x_mem, "pxa2xx-pcmcia", NULL),
};
#ifdef CONFIG_PM
#define SAVE(x) sleep_save[SLEEP_SAVE_##x] = x
#define RESTORE(x) x = sleep_save[SLEEP_SAVE_##x]
/*
* allow platforms to override default PWRMODE setting used for PM_SUSPEND_MEM
*/
static unsigned int pwrmode = PWRMODE_SLEEP;
int __init pxa27x_set_pwrmode(unsigned int mode)
{
switch (mode) {
case PWRMODE_SLEEP:
case PWRMODE_DEEPSLEEP:
pwrmode = mode;
return 0;
}
return -EINVAL;
}
/*
* List of global PXA peripheral registers to preserve.
* More ones like CP and general purpose register values are preserved
* with the stack pointer in sleep.S.
*/
enum {
SLEEP_SAVE_PSTR,
SLEEP_SAVE_MDREFR,
SLEEP_SAVE_PCFR,
SLEEP_SAVE_COUNT
};
void pxa27x_cpu_pm_save(unsigned long *sleep_save)
{
sleep_save[SLEEP_SAVE_MDREFR] = __raw_readl(MDREFR);
SAVE(PCFR);
SAVE(PSTR);
}
void pxa27x_cpu_pm_restore(unsigned long *sleep_save)
{
__raw_writel(sleep_save[SLEEP_SAVE_MDREFR], MDREFR);
RESTORE(PCFR);
PSSR = PSSR_RDH | PSSR_PH;
RESTORE(PSTR);
}
void pxa27x_cpu_pm_enter(suspend_state_t state)
{
extern void pxa_cpu_standby(void);
/* ensure voltage-change sequencer not initiated, which hangs */
PCFR &= ~PCFR_FVC;
/* Clear edge-detect status register. */
PEDR = 0xDF12FE1B;
/* Clear reset status */
RCSR = RCSR_HWR | RCSR_WDR | RCSR_SMR | RCSR_GPR;
switch (state) {
case PM_SUSPEND_STANDBY:
pxa_cpu_standby();
break;
case PM_SUSPEND_MEM:
pxa27x_cpu_suspend(pwrmode, PLAT_PHYS_OFFSET - PAGE_OFFSET);
break;
}
}
static int pxa27x_cpu_pm_valid(suspend_state_t state)
{
return state == PM_SUSPEND_MEM || state == PM_SUSPEND_STANDBY;
}
static int pxa27x_cpu_pm_prepare(void)
{
/* set resume return address */
PSPR = virt_to_phys(cpu_resume);
return 0;
}
static void pxa27x_cpu_pm_finish(void)
{
/* ensure not to come back here if it wasn't intended */
PSPR = 0;
}
static struct pxa_cpu_pm_fns pxa27x_cpu_pm_fns = {
.save_count = SLEEP_SAVE_COUNT,
.save = pxa27x_cpu_pm_save,
.restore = pxa27x_cpu_pm_restore,
.valid = pxa27x_cpu_pm_valid,
.enter = pxa27x_cpu_pm_enter,
.prepare = pxa27x_cpu_pm_prepare,
.finish = pxa27x_cpu_pm_finish,
};
static void __init pxa27x_init_pm(void)
{
pxa_cpu_pm_fns = &pxa27x_cpu_pm_fns;
}
#else
static inline void pxa27x_init_pm(void) {}
#endif
/* PXA27x: Various gpios can issue wakeup events. This logic only
* handles the simple cases, not the WEMUX2 and WEMUX3 options
*/
static int pxa27x_set_wake(struct irq_data *d, unsigned int on)
{
int gpio = irq_to_gpio(d->irq);
uint32_t mask;
if (gpio >= 0 && gpio < 128)
return gpio_set_wake(gpio, on);
if (d->irq == IRQ_KEYPAD)
return keypad_set_wake(on);
switch (d->irq) {
case IRQ_RTCAlrm:
mask = PWER_RTC;
break;
case IRQ_USB:
mask = 1u << 26;
break;
default:
return -EINVAL;
}
if (on)
PWER |= mask;
else
PWER &=~mask;
return 0;
}
void __init pxa27x_init_irq(void)
{
pxa_init_irq(34, pxa27x_set_wake);
pxa_init_gpio(IRQ_GPIO_2_x, 2, 120, pxa27x_set_wake);
}
static struct map_desc pxa27x_io_desc[] __initdata = {
{ /* Mem Ctl */
.virtual = SMEMC_VIRT,
.pfn = __phys_to_pfn(PXA2XX_SMEMC_BASE),
.length = 0x00200000,
.type = MT_DEVICE
}, { /* IMem ctl */
.virtual = 0xfe000000,
.pfn = __phys_to_pfn(0x58000000),
.length = 0x00100000,
.type = MT_DEVICE
},
};
void __init pxa27x_map_io(void)
{
pxa_map_io();
iotable_init(ARRAY_AND_SIZE(pxa27x_io_desc));
pxa27x_get_clk_frequency_khz(1);
}
/*
* device registration specific to PXA27x.
*/
void __init pxa27x_set_i2c_power_info(struct i2c_pxa_platform_data *info)
{
local_irq_disable();
PCFR |= PCFR_PI2CEN;
local_irq_enable();
pxa_register_device(&pxa27x_device_i2c_power, info);
}
static struct platform_device *devices[] __initdata = {
&pxa27x_device_udc,
&pxa_device_pmu,
&pxa_device_i2s,
&pxa_device_asoc_ssp1,
&pxa_device_asoc_ssp2,
&pxa_device_asoc_ssp3,
&pxa_device_asoc_platform,
&sa1100_device_rtc,
&pxa_device_rtc,
&pxa27x_device_ssp1,
&pxa27x_device_ssp2,
&pxa27x_device_ssp3,
&pxa27x_device_pwm0,
&pxa27x_device_pwm1,
};
static int __init pxa27x_init(void)
{
int ret = 0;
if (cpu_is_pxa27x()) {
reset_status = RCSR;
clkdev_add_table(pxa27x_clkregs, ARRAY_SIZE(pxa27x_clkregs));
if ((ret = pxa_init_dma(IRQ_DMA, 32)))
return ret;
pxa27x_init_pm();
register_syscore_ops(&pxa_irq_syscore_ops);
register_syscore_ops(&pxa2xx_mfp_syscore_ops);
register_syscore_ops(&pxa_gpio_syscore_ops);
register_syscore_ops(&pxa2xx_clock_syscore_ops);
ret = platform_add_devices(devices, ARRAY_SIZE(devices));
}
return ret;
}
postcore_initcall(pxa27x_init);
| gpl-2.0 |
SerenityS/android_kernel_samsung_msm8916 | arch/arm/mach-kirkwood/board-km_kirkwood.c | 2276 | 1405 | /*
* Copyright 2012 2012 KEYMILE AG, CH-3097 Bern
* Valentin Longchamp <valentin.longchamp@keymile.com>
*
* arch/arm/mach-kirkwood/board-km_kirkwood.c
*
* Keymile km_kirkwood Reference Desing Init for drivers not converted to
* flattened device tree yet.
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mv643xx_eth.h>
#include <linux/clk.h>
#include <linux/clk-private.h>
#include "common.h"
static struct mv643xx_eth_platform_data km_kirkwood_ge00_data = {
.phy_addr = MV643XX_ETH_PHY_ADDR(0),
};
void __init km_kirkwood_init(void)
{
struct clk *sata_clk;
/*
* Our variant of kirkwood (integrated in the Bobcat) hangs on accessing
* SATA bits (14-15) of the Clock Gating Control Register. Since these
* devices are also not present in this variant, their clocks get
* disabled because unused when clk_disable_unused() gets called.
* That's why we change the flags to these clocks to CLK_IGNORE_UNUSED
*/
sata_clk = clk_get_sys("sata_mv.0", "0");
if (!IS_ERR(sata_clk))
sata_clk->flags |= CLK_IGNORE_UNUSED;
sata_clk = clk_get_sys("sata_mv.0", "1");
if (!IS_ERR(sata_clk))
sata_clk->flags |= CLK_IGNORE_UNUSED;
kirkwood_ge00_init(&km_kirkwood_ge00_data);
}
| gpl-2.0 |
skelitonlord/android_kernel_samsung_matissewifi | drivers/gpu/drm/exynos/exynos_drm_vidi.c | 4836 | 16344 | /* exynos_drm_vidi.c
*
* Copyright (C) 2012 Samsung Electronics Co.Ltd
* Authors:
* Inki Dae <inki.dae@samsung.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include "drmP.h"
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <drm/exynos_drm.h>
#include "drm_edid.h"
#include "drm_crtc_helper.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_crtc.h"
#include "exynos_drm_encoder.h"
/* vidi has totally three virtual windows. */
#define WINDOWS_NR 3
#define get_vidi_context(dev) platform_get_drvdata(to_platform_device(dev))
struct vidi_win_data {
unsigned int offset_x;
unsigned int offset_y;
unsigned int ovl_width;
unsigned int ovl_height;
unsigned int fb_width;
unsigned int fb_height;
unsigned int bpp;
dma_addr_t dma_addr;
void __iomem *vaddr;
unsigned int buf_offsize;
unsigned int line_size; /* bytes */
bool enabled;
};
struct vidi_context {
struct exynos_drm_subdrv subdrv;
struct drm_crtc *crtc;
struct vidi_win_data win_data[WINDOWS_NR];
struct edid *raw_edid;
unsigned int clkdiv;
unsigned int default_win;
unsigned long irq_flags;
unsigned int connected;
bool vblank_on;
bool suspended;
struct work_struct work;
struct mutex lock;
};
static const char fake_edid_info[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x4c, 0x2d, 0x05, 0x05,
0x00, 0x00, 0x00, 0x00, 0x30, 0x12, 0x01, 0x03, 0x80, 0x10, 0x09, 0x78,
0x0a, 0xee, 0x91, 0xa3, 0x54, 0x4c, 0x99, 0x26, 0x0f, 0x50, 0x54, 0xbd,
0xee, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x66, 0x21, 0x50, 0xb0, 0x51, 0x00,
0x1b, 0x30, 0x40, 0x70, 0x36, 0x00, 0xa0, 0x5a, 0x00, 0x00, 0x00, 0x1e,
0x01, 0x1d, 0x00, 0x72, 0x51, 0xd0, 0x1e, 0x20, 0x6e, 0x28, 0x55, 0x00,
0xa0, 0x5a, 0x00, 0x00, 0x00, 0x1e, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18,
0x4b, 0x1a, 0x44, 0x17, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x00, 0x00, 0x00, 0xfc, 0x00, 0x53, 0x41, 0x4d, 0x53, 0x55, 0x4e, 0x47,
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x01, 0xbc, 0x02, 0x03, 0x1e, 0xf1,
0x46, 0x84, 0x05, 0x03, 0x10, 0x20, 0x22, 0x23, 0x09, 0x07, 0x07, 0x83,
0x01, 0x00, 0x00, 0xe2, 0x00, 0x0f, 0x67, 0x03, 0x0c, 0x00, 0x10, 0x00,
0xb8, 0x2d, 0x01, 0x1d, 0x80, 0x18, 0x71, 0x1c, 0x16, 0x20, 0x58, 0x2c,
0x25, 0x00, 0xa0, 0x5a, 0x00, 0x00, 0x00, 0x9e, 0x8c, 0x0a, 0xd0, 0x8a,
0x20, 0xe0, 0x2d, 0x10, 0x10, 0x3e, 0x96, 0x00, 0xa0, 0x5a, 0x00, 0x00,
0x00, 0x18, 0x02, 0x3a, 0x80, 0x18, 0x71, 0x38, 0x2d, 0x40, 0x58, 0x2c,
0x45, 0x00, 0xa0, 0x5a, 0x00, 0x00, 0x00, 0x1e, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x06
};
static void vidi_fake_vblank_handler(struct work_struct *work);
static bool vidi_display_is_connected(struct device *dev)
{
struct vidi_context *ctx = get_vidi_context(dev);
DRM_DEBUG_KMS("%s\n", __FILE__);
/*
* connection request would come from user side
* to do hotplug through specific ioctl.
*/
return ctx->connected ? true : false;
}
static int vidi_get_edid(struct device *dev, struct drm_connector *connector,
u8 *edid, int len)
{
struct vidi_context *ctx = get_vidi_context(dev);
struct edid *raw_edid;
DRM_DEBUG_KMS("%s\n", __FILE__);
/*
* the edid data comes from user side and it would be set
* to ctx->raw_edid through specific ioctl.
*/
if (!ctx->raw_edid) {
DRM_DEBUG_KMS("raw_edid is null.\n");
return -EFAULT;
}
raw_edid = kzalloc(len, GFP_KERNEL);
if (!raw_edid) {
DRM_DEBUG_KMS("failed to allocate raw_edid.\n");
return -ENOMEM;
}
memcpy(raw_edid, ctx->raw_edid, min((1 + ctx->raw_edid->extensions)
* EDID_LENGTH, len));
/* attach the edid data to connector. */
connector->display_info.raw_edid = (char *)raw_edid;
memcpy(edid, ctx->raw_edid, min((1 + ctx->raw_edid->extensions)
* EDID_LENGTH, len));
return 0;
}
static void *vidi_get_panel(struct device *dev)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
/* TODO. */
return NULL;
}
static int vidi_check_timing(struct device *dev, void *timing)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
/* TODO. */
return 0;
}
static int vidi_display_power_on(struct device *dev, int mode)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
/* TODO */
return 0;
}
static struct exynos_drm_display_ops vidi_display_ops = {
.type = EXYNOS_DISPLAY_TYPE_VIDI,
.is_connected = vidi_display_is_connected,
.get_edid = vidi_get_edid,
.get_panel = vidi_get_panel,
.check_timing = vidi_check_timing,
.power_on = vidi_display_power_on,
};
static void vidi_dpms(struct device *subdrv_dev, int mode)
{
struct vidi_context *ctx = get_vidi_context(subdrv_dev);
DRM_DEBUG_KMS("%s, %d\n", __FILE__, mode);
mutex_lock(&ctx->lock);
switch (mode) {
case DRM_MODE_DPMS_ON:
/* TODO. */
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
/* TODO. */
break;
default:
DRM_DEBUG_KMS("unspecified mode %d\n", mode);
break;
}
mutex_unlock(&ctx->lock);
}
static void vidi_apply(struct device *subdrv_dev)
{
struct vidi_context *ctx = get_vidi_context(subdrv_dev);
struct exynos_drm_manager *mgr = ctx->subdrv.manager;
struct exynos_drm_manager_ops *mgr_ops = mgr->ops;
struct exynos_drm_overlay_ops *ovl_ops = mgr->overlay_ops;
struct vidi_win_data *win_data;
int i;
DRM_DEBUG_KMS("%s\n", __FILE__);
for (i = 0; i < WINDOWS_NR; i++) {
win_data = &ctx->win_data[i];
if (win_data->enabled && (ovl_ops && ovl_ops->commit))
ovl_ops->commit(subdrv_dev, i);
}
if (mgr_ops && mgr_ops->commit)
mgr_ops->commit(subdrv_dev);
}
static void vidi_commit(struct device *dev)
{
struct vidi_context *ctx = get_vidi_context(dev);
DRM_DEBUG_KMS("%s\n", __FILE__);
if (ctx->suspended)
return;
}
static int vidi_enable_vblank(struct device *dev)
{
struct vidi_context *ctx = get_vidi_context(dev);
DRM_DEBUG_KMS("%s\n", __FILE__);
if (ctx->suspended)
return -EPERM;
if (!test_and_set_bit(0, &ctx->irq_flags))
ctx->vblank_on = true;
return 0;
}
static void vidi_disable_vblank(struct device *dev)
{
struct vidi_context *ctx = get_vidi_context(dev);
DRM_DEBUG_KMS("%s\n", __FILE__);
if (ctx->suspended)
return;
if (test_and_clear_bit(0, &ctx->irq_flags))
ctx->vblank_on = false;
}
static struct exynos_drm_manager_ops vidi_manager_ops = {
.dpms = vidi_dpms,
.apply = vidi_apply,
.commit = vidi_commit,
.enable_vblank = vidi_enable_vblank,
.disable_vblank = vidi_disable_vblank,
};
static void vidi_win_mode_set(struct device *dev,
struct exynos_drm_overlay *overlay)
{
struct vidi_context *ctx = get_vidi_context(dev);
struct vidi_win_data *win_data;
int win;
unsigned long offset;
DRM_DEBUG_KMS("%s\n", __FILE__);
if (!overlay) {
dev_err(dev, "overlay is NULL\n");
return;
}
win = overlay->zpos;
if (win == DEFAULT_ZPOS)
win = ctx->default_win;
if (win < 0 || win > WINDOWS_NR)
return;
offset = overlay->fb_x * (overlay->bpp >> 3);
offset += overlay->fb_y * overlay->pitch;
DRM_DEBUG_KMS("offset = 0x%lx, pitch = %x\n", offset, overlay->pitch);
win_data = &ctx->win_data[win];
win_data->offset_x = overlay->crtc_x;
win_data->offset_y = overlay->crtc_y;
win_data->ovl_width = overlay->crtc_width;
win_data->ovl_height = overlay->crtc_height;
win_data->fb_width = overlay->fb_width;
win_data->fb_height = overlay->fb_height;
win_data->dma_addr = overlay->dma_addr[0] + offset;
win_data->vaddr = overlay->vaddr[0] + offset;
win_data->bpp = overlay->bpp;
win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
(overlay->bpp >> 3);
win_data->line_size = overlay->crtc_width * (overlay->bpp >> 3);
/*
* some parts of win_data should be transferred to user side
* through specific ioctl.
*/
DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n",
win_data->offset_x, win_data->offset_y);
DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
win_data->ovl_width, win_data->ovl_height);
DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n",
(unsigned long)win_data->dma_addr,
(unsigned long)win_data->vaddr);
DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
overlay->fb_width, overlay->crtc_width);
}
static void vidi_win_commit(struct device *dev, int zpos)
{
struct vidi_context *ctx = get_vidi_context(dev);
struct vidi_win_data *win_data;
int win = zpos;
DRM_DEBUG_KMS("%s\n", __FILE__);
if (ctx->suspended)
return;
if (win == DEFAULT_ZPOS)
win = ctx->default_win;
if (win < 0 || win > WINDOWS_NR)
return;
win_data = &ctx->win_data[win];
win_data->enabled = true;
DRM_DEBUG_KMS("dma_addr = 0x%x\n", win_data->dma_addr);
if (ctx->vblank_on)
schedule_work(&ctx->work);
}
static void vidi_win_disable(struct device *dev, int zpos)
{
struct vidi_context *ctx = get_vidi_context(dev);
struct vidi_win_data *win_data;
int win = zpos;
DRM_DEBUG_KMS("%s\n", __FILE__);
if (win == DEFAULT_ZPOS)
win = ctx->default_win;
if (win < 0 || win > WINDOWS_NR)
return;
win_data = &ctx->win_data[win];
win_data->enabled = false;
/* TODO. */
}
static struct exynos_drm_overlay_ops vidi_overlay_ops = {
.mode_set = vidi_win_mode_set,
.commit = vidi_win_commit,
.disable = vidi_win_disable,
};
static struct exynos_drm_manager vidi_manager = {
.pipe = -1,
.ops = &vidi_manager_ops,
.overlay_ops = &vidi_overlay_ops,
.display_ops = &vidi_display_ops,
};
static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
{
struct exynos_drm_private *dev_priv = drm_dev->dev_private;
struct drm_pending_vblank_event *e, *t;
struct timeval now;
unsigned long flags;
bool is_checked = false;
spin_lock_irqsave(&drm_dev->event_lock, flags);
list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list,
base.link) {
/* if event's pipe isn't same as crtc then ignore it. */
if (crtc != e->pipe)
continue;
is_checked = true;
do_gettimeofday(&now);
e->event.sequence = 0;
e->event.tv_sec = now.tv_sec;
e->event.tv_usec = now.tv_usec;
list_move_tail(&e->base.link, &e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
}
if (is_checked) {
/*
* call drm_vblank_put only in case that drm_vblank_get was
* called.
*/
if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
drm_vblank_put(drm_dev, crtc);
/*
* don't off vblank if vblank_disable_allowed is 1,
* because vblank would be off by timer handler.
*/
if (!drm_dev->vblank_disable_allowed)
drm_vblank_off(drm_dev, crtc);
}
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
}
static void vidi_fake_vblank_handler(struct work_struct *work)
{
struct vidi_context *ctx = container_of(work, struct vidi_context,
work);
struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
struct exynos_drm_manager *manager = subdrv->manager;
if (manager->pipe < 0)
return;
/* refresh rate is about 50Hz. */
usleep_range(16000, 20000);
drm_handle_vblank(subdrv->drm_dev, manager->pipe);
vidi_finish_pageflip(subdrv->drm_dev, manager->pipe);
}
static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
/*
* enable drm irq mode.
* - with irq_enabled = 1, we can use the vblank feature.
*
* P.S. note that we wouldn't use drm irq handler but
* just specific driver own one instead because
* drm framework supports only one irq handler.
*/
drm_dev->irq_enabled = 1;
/*
* with vblank_disable_allowed = 1, vblank interrupt will be disabled
* by drm timer once a current process gives up ownership of
* vblank event.(after drm_vblank_put function is called)
*/
drm_dev->vblank_disable_allowed = 1;
return 0;
}
static void vidi_subdrv_remove(struct drm_device *drm_dev)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
/* TODO. */
}
static int vidi_power_on(struct vidi_context *ctx, bool enable)
{
struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
struct device *dev = subdrv->dev;
DRM_DEBUG_KMS("%s\n", __FILE__);
if (enable != false && enable != true)
return -EINVAL;
if (enable) {
ctx->suspended = false;
/* if vblank was enabled status, enable it again. */
if (test_and_clear_bit(0, &ctx->irq_flags))
vidi_enable_vblank(dev);
vidi_apply(dev);
} else {
ctx->suspended = true;
}
return 0;
}
static int vidi_show_connection(struct device *dev,
struct device_attribute *attr, char *buf)
{
int rc;
struct vidi_context *ctx = get_vidi_context(dev);
mutex_lock(&ctx->lock);
rc = sprintf(buf, "%d\n", ctx->connected);
mutex_unlock(&ctx->lock);
return rc;
}
static int vidi_store_connection(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct vidi_context *ctx = get_vidi_context(dev);
int ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
ret = kstrtoint(buf, 0, &ctx->connected);
if (ret)
return ret;
if (ctx->connected > 1)
return -EINVAL;
DRM_DEBUG_KMS("requested connection.\n");
drm_helper_hpd_irq_event(ctx->subdrv.drm_dev);
return len;
}
static DEVICE_ATTR(connection, 0644, vidi_show_connection,
vidi_store_connection);
int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file_priv)
{
struct vidi_context *ctx = NULL;
struct drm_encoder *encoder;
struct exynos_drm_manager *manager;
struct exynos_drm_display_ops *display_ops;
struct drm_exynos_vidi_connection *vidi = data;
DRM_DEBUG_KMS("%s\n", __FILE__);
if (!vidi) {
DRM_DEBUG_KMS("user data for vidi is null.\n");
return -EINVAL;
}
if (!vidi->edid) {
DRM_DEBUG_KMS("edid data is null.\n");
return -EINVAL;
}
if (vidi->connection > 1) {
DRM_DEBUG_KMS("connection should be 0 or 1.\n");
return -EINVAL;
}
list_for_each_entry(encoder, &drm_dev->mode_config.encoder_list,
head) {
manager = exynos_drm_get_manager(encoder);
display_ops = manager->display_ops;
if (display_ops->type == EXYNOS_DISPLAY_TYPE_VIDI) {
ctx = get_vidi_context(manager->dev);
break;
}
}
if (!ctx) {
DRM_DEBUG_KMS("not found virtual device type encoder.\n");
return -EINVAL;
}
if (ctx->connected == vidi->connection) {
DRM_DEBUG_KMS("same connection request.\n");
return -EINVAL;
}
if (vidi->connection)
ctx->raw_edid = (struct edid *)vidi->edid;
ctx->connected = vidi->connection;
drm_helper_hpd_irq_event(ctx->subdrv.drm_dev);
return 0;
}
static int __devinit vidi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct vidi_context *ctx;
struct exynos_drm_subdrv *subdrv;
int ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->default_win = 0;
INIT_WORK(&ctx->work, vidi_fake_vblank_handler);
/* for test */
ctx->raw_edid = (struct edid *)fake_edid_info;
subdrv = &ctx->subdrv;
subdrv->dev = dev;
subdrv->manager = &vidi_manager;
subdrv->probe = vidi_subdrv_probe;
subdrv->remove = vidi_subdrv_remove;
mutex_init(&ctx->lock);
platform_set_drvdata(pdev, ctx);
ret = device_create_file(&pdev->dev, &dev_attr_connection);
if (ret < 0)
DRM_INFO("failed to create connection sysfs.\n");
exynos_drm_subdrv_register(subdrv);
return 0;
}
static int __devexit vidi_remove(struct platform_device *pdev)
{
struct vidi_context *ctx = platform_get_drvdata(pdev);
DRM_DEBUG_KMS("%s\n", __FILE__);
exynos_drm_subdrv_unregister(&ctx->subdrv);
kfree(ctx);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int vidi_suspend(struct device *dev)
{
struct vidi_context *ctx = get_vidi_context(dev);
return vidi_power_on(ctx, false);
}
static int vidi_resume(struct device *dev)
{
struct vidi_context *ctx = get_vidi_context(dev);
return vidi_power_on(ctx, true);
}
#endif
static const struct dev_pm_ops vidi_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(vidi_suspend, vidi_resume)
};
struct platform_driver vidi_driver = {
.probe = vidi_probe,
.remove = __devexit_p(vidi_remove),
.driver = {
.name = "exynos-drm-vidi",
.owner = THIS_MODULE,
.pm = &vidi_pm_ops,
},
};
| gpl-2.0 |
profglavcho/ALPS.L0.MP8.V2.1_LCSH6735_65C_HZ_L_KERNEL | sound/core/timer.c | 6372 | 52043 | /*
* Timers abstract layer
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/string.h>
#include <sound/core.h>
#include <sound/timer.h>
#include <sound/control.h>
#include <sound/info.h>
#include <sound/minors.h>
#include <sound/initval.h>
#include <linux/kmod.h>
#if defined(CONFIG_SND_HRTIMER) || defined(CONFIG_SND_HRTIMER_MODULE)
#define DEFAULT_TIMER_LIMIT 4
#elif defined(CONFIG_SND_RTCTIMER) || defined(CONFIG_SND_RTCTIMER_MODULE)
#define DEFAULT_TIMER_LIMIT 2
#else
#define DEFAULT_TIMER_LIMIT 1
#endif
static int timer_limit = DEFAULT_TIMER_LIMIT;
static int timer_tstamp_monotonic = 1;
MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>, Takashi Iwai <tiwai@suse.de>");
MODULE_DESCRIPTION("ALSA timer interface");
MODULE_LICENSE("GPL");
module_param(timer_limit, int, 0444);
MODULE_PARM_DESC(timer_limit, "Maximum global timers in system.");
module_param(timer_tstamp_monotonic, int, 0444);
MODULE_PARM_DESC(timer_tstamp_monotonic, "Use posix monotonic clock source for timestamps (default).");
MODULE_ALIAS_CHARDEV(CONFIG_SND_MAJOR, SNDRV_MINOR_TIMER);
MODULE_ALIAS("devname:snd/timer");
struct snd_timer_user {
struct snd_timer_instance *timeri;
int tread; /* enhanced read with timestamps and events */
unsigned long ticks;
unsigned long overrun;
int qhead;
int qtail;
int qused;
int queue_size;
struct snd_timer_read *queue;
struct snd_timer_tread *tqueue;
spinlock_t qlock;
unsigned long last_resolution;
unsigned int filter;
struct timespec tstamp; /* trigger tstamp */
wait_queue_head_t qchange_sleep;
struct fasync_struct *fasync;
struct mutex tread_sem;
};
/* list of timers */
static LIST_HEAD(snd_timer_list);
/* list of slave instances */
static LIST_HEAD(snd_timer_slave_list);
/* lock for slave active lists */
static DEFINE_SPINLOCK(slave_active_lock);
static DEFINE_MUTEX(register_mutex);
static int snd_timer_free(struct snd_timer *timer);
static int snd_timer_dev_free(struct snd_device *device);
static int snd_timer_dev_register(struct snd_device *device);
static int snd_timer_dev_disconnect(struct snd_device *device);
static void snd_timer_reschedule(struct snd_timer * timer, unsigned long ticks_left);
/*
* create a timer instance with the given owner string.
* when timer is not NULL, increments the module counter
*/
static struct snd_timer_instance *snd_timer_instance_new(char *owner,
struct snd_timer *timer)
{
struct snd_timer_instance *timeri;
timeri = kzalloc(sizeof(*timeri), GFP_KERNEL);
if (timeri == NULL)
return NULL;
timeri->owner = kstrdup(owner, GFP_KERNEL);
if (! timeri->owner) {
kfree(timeri);
return NULL;
}
INIT_LIST_HEAD(&timeri->open_list);
INIT_LIST_HEAD(&timeri->active_list);
INIT_LIST_HEAD(&timeri->ack_list);
INIT_LIST_HEAD(&timeri->slave_list_head);
INIT_LIST_HEAD(&timeri->slave_active_head);
timeri->timer = timer;
if (timer && !try_module_get(timer->module)) {
kfree(timeri->owner);
kfree(timeri);
return NULL;
}
return timeri;
}
/*
* find a timer instance from the given timer id
*/
static struct snd_timer *snd_timer_find(struct snd_timer_id *tid)
{
struct snd_timer *timer = NULL;
list_for_each_entry(timer, &snd_timer_list, device_list) {
if (timer->tmr_class != tid->dev_class)
continue;
if ((timer->tmr_class == SNDRV_TIMER_CLASS_CARD ||
timer->tmr_class == SNDRV_TIMER_CLASS_PCM) &&
(timer->card == NULL ||
timer->card->number != tid->card))
continue;
if (timer->tmr_device != tid->device)
continue;
if (timer->tmr_subdevice != tid->subdevice)
continue;
return timer;
}
return NULL;
}
#ifdef CONFIG_MODULES
static void snd_timer_request(struct snd_timer_id *tid)
{
switch (tid->dev_class) {
case SNDRV_TIMER_CLASS_GLOBAL:
if (tid->device < timer_limit)
request_module("snd-timer-%i", tid->device);
break;
case SNDRV_TIMER_CLASS_CARD:
case SNDRV_TIMER_CLASS_PCM:
if (tid->card < snd_ecards_limit)
request_module("snd-card-%i", tid->card);
break;
default:
break;
}
}
#endif
/*
* look for a master instance matching with the slave id of the given slave.
* when found, relink the open_link of the slave.
*
* call this with register_mutex down.
*/
static void snd_timer_check_slave(struct snd_timer_instance *slave)
{
struct snd_timer *timer;
struct snd_timer_instance *master;
/* FIXME: it's really dumb to look up all entries.. */
list_for_each_entry(timer, &snd_timer_list, device_list) {
list_for_each_entry(master, &timer->open_list_head, open_list) {
if (slave->slave_class == master->slave_class &&
slave->slave_id == master->slave_id) {
list_move_tail(&slave->open_list,
&master->slave_list_head);
spin_lock_irq(&slave_active_lock);
slave->master = master;
slave->timer = master->timer;
spin_unlock_irq(&slave_active_lock);
return;
}
}
}
}
/*
* look for slave instances matching with the slave id of the given master.
* when found, relink the open_link of slaves.
*
* call this with register_mutex down.
*/
static void snd_timer_check_master(struct snd_timer_instance *master)
{
struct snd_timer_instance *slave, *tmp;
/* check all pending slaves */
list_for_each_entry_safe(slave, tmp, &snd_timer_slave_list, open_list) {
if (slave->slave_class == master->slave_class &&
slave->slave_id == master->slave_id) {
list_move_tail(&slave->open_list, &master->slave_list_head);
spin_lock_irq(&slave_active_lock);
slave->master = master;
slave->timer = master->timer;
if (slave->flags & SNDRV_TIMER_IFLG_RUNNING)
list_add_tail(&slave->active_list,
&master->slave_active_head);
spin_unlock_irq(&slave_active_lock);
}
}
}
/*
* open a timer instance
* when opening a master, the slave id must be here given.
*/
int snd_timer_open(struct snd_timer_instance **ti,
char *owner, struct snd_timer_id *tid,
unsigned int slave_id)
{
struct snd_timer *timer;
struct snd_timer_instance *timeri = NULL;
if (tid->dev_class == SNDRV_TIMER_CLASS_SLAVE) {
/* open a slave instance */
if (tid->dev_sclass <= SNDRV_TIMER_SCLASS_NONE ||
tid->dev_sclass > SNDRV_TIMER_SCLASS_OSS_SEQUENCER) {
snd_printd("invalid slave class %i\n", tid->dev_sclass);
return -EINVAL;
}
mutex_lock(®ister_mutex);
timeri = snd_timer_instance_new(owner, NULL);
if (!timeri) {
mutex_unlock(®ister_mutex);
return -ENOMEM;
}
timeri->slave_class = tid->dev_sclass;
timeri->slave_id = tid->device;
timeri->flags |= SNDRV_TIMER_IFLG_SLAVE;
list_add_tail(&timeri->open_list, &snd_timer_slave_list);
snd_timer_check_slave(timeri);
mutex_unlock(®ister_mutex);
*ti = timeri;
return 0;
}
/* open a master instance */
mutex_lock(®ister_mutex);
timer = snd_timer_find(tid);
#ifdef CONFIG_MODULES
if (!timer) {
mutex_unlock(®ister_mutex);
snd_timer_request(tid);
mutex_lock(®ister_mutex);
timer = snd_timer_find(tid);
}
#endif
if (!timer) {
mutex_unlock(®ister_mutex);
return -ENODEV;
}
if (!list_empty(&timer->open_list_head)) {
timeri = list_entry(timer->open_list_head.next,
struct snd_timer_instance, open_list);
if (timeri->flags & SNDRV_TIMER_IFLG_EXCLUSIVE) {
mutex_unlock(®ister_mutex);
return -EBUSY;
}
}
timeri = snd_timer_instance_new(owner, timer);
if (!timeri) {
mutex_unlock(®ister_mutex);
return -ENOMEM;
}
timeri->slave_class = tid->dev_sclass;
timeri->slave_id = slave_id;
if (list_empty(&timer->open_list_head) && timer->hw.open)
timer->hw.open(timer);
list_add_tail(&timeri->open_list, &timer->open_list_head);
snd_timer_check_master(timeri);
mutex_unlock(®ister_mutex);
*ti = timeri;
return 0;
}
static int _snd_timer_stop(struct snd_timer_instance *timeri,
int keep_flag, int event);
/*
* close a timer instance
*/
int snd_timer_close(struct snd_timer_instance *timeri)
{
struct snd_timer *timer = NULL;
struct snd_timer_instance *slave, *tmp;
if (snd_BUG_ON(!timeri))
return -ENXIO;
/* force to stop the timer */
snd_timer_stop(timeri);
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
/* wait, until the active callback is finished */
spin_lock_irq(&slave_active_lock);
while (timeri->flags & SNDRV_TIMER_IFLG_CALLBACK) {
spin_unlock_irq(&slave_active_lock);
udelay(10);
spin_lock_irq(&slave_active_lock);
}
spin_unlock_irq(&slave_active_lock);
mutex_lock(®ister_mutex);
list_del(&timeri->open_list);
mutex_unlock(®ister_mutex);
} else {
timer = timeri->timer;
if (snd_BUG_ON(!timer))
goto out;
/* wait, until the active callback is finished */
spin_lock_irq(&timer->lock);
while (timeri->flags & SNDRV_TIMER_IFLG_CALLBACK) {
spin_unlock_irq(&timer->lock);
udelay(10);
spin_lock_irq(&timer->lock);
}
spin_unlock_irq(&timer->lock);
mutex_lock(®ister_mutex);
list_del(&timeri->open_list);
if (timer && list_empty(&timer->open_list_head) &&
timer->hw.close)
timer->hw.close(timer);
/* remove slave links */
list_for_each_entry_safe(slave, tmp, &timeri->slave_list_head,
open_list) {
spin_lock_irq(&slave_active_lock);
_snd_timer_stop(slave, 1, SNDRV_TIMER_EVENT_RESOLUTION);
list_move_tail(&slave->open_list, &snd_timer_slave_list);
slave->master = NULL;
slave->timer = NULL;
spin_unlock_irq(&slave_active_lock);
}
mutex_unlock(®ister_mutex);
}
out:
if (timeri->private_free)
timeri->private_free(timeri);
kfree(timeri->owner);
kfree(timeri);
if (timer)
module_put(timer->module);
return 0;
}
unsigned long snd_timer_resolution(struct snd_timer_instance *timeri)
{
struct snd_timer * timer;
if (timeri == NULL)
return 0;
if ((timer = timeri->timer) != NULL) {
if (timer->hw.c_resolution)
return timer->hw.c_resolution(timer);
return timer->hw.resolution;
}
return 0;
}
static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
{
struct snd_timer *timer;
unsigned long flags;
unsigned long resolution = 0;
struct snd_timer_instance *ts;
struct timespec tstamp;
if (timer_tstamp_monotonic)
do_posix_clock_monotonic_gettime(&tstamp);
else
getnstimeofday(&tstamp);
if (snd_BUG_ON(event < SNDRV_TIMER_EVENT_START ||
event > SNDRV_TIMER_EVENT_PAUSE))
return;
if (event == SNDRV_TIMER_EVENT_START ||
event == SNDRV_TIMER_EVENT_CONTINUE)
resolution = snd_timer_resolution(ti);
if (ti->ccallback)
ti->ccallback(ti, event, &tstamp, resolution);
if (ti->flags & SNDRV_TIMER_IFLG_SLAVE)
return;
timer = ti->timer;
if (timer == NULL)
return;
if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
return;
spin_lock_irqsave(&timer->lock, flags);
list_for_each_entry(ts, &ti->slave_active_head, active_list)
if (ts->ccallback)
ts->ccallback(ti, event + 100, &tstamp, resolution);
spin_unlock_irqrestore(&timer->lock, flags);
}
static int snd_timer_start1(struct snd_timer *timer, struct snd_timer_instance *timeri,
unsigned long sticks)
{
list_move_tail(&timeri->active_list, &timer->active_list_head);
if (timer->running) {
if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
goto __start_now;
timer->flags |= SNDRV_TIMER_FLG_RESCHED;
timeri->flags |= SNDRV_TIMER_IFLG_START;
return 1; /* delayed start */
} else {
timer->sticks = sticks;
timer->hw.start(timer);
__start_now:
timer->running++;
timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
return 0;
}
}
static int snd_timer_start_slave(struct snd_timer_instance *timeri)
{
unsigned long flags;
spin_lock_irqsave(&slave_active_lock, flags);
timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
if (timeri->master)
list_add_tail(&timeri->active_list,
&timeri->master->slave_active_head);
spin_unlock_irqrestore(&slave_active_lock, flags);
return 1; /* delayed start */
}
/*
* start the timer instance
*/
int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
{
struct snd_timer *timer;
int result = -EINVAL;
unsigned long flags;
if (timeri == NULL || ticks < 1)
return -EINVAL;
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
result = snd_timer_start_slave(timeri);
snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
return result;
}
timer = timeri->timer;
if (timer == NULL)
return -EINVAL;
spin_lock_irqsave(&timer->lock, flags);
timeri->ticks = timeri->cticks = ticks;
timeri->pticks = 0;
result = snd_timer_start1(timer, timeri, ticks);
spin_unlock_irqrestore(&timer->lock, flags);
snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
return result;
}
static int _snd_timer_stop(struct snd_timer_instance * timeri,
int keep_flag, int event)
{
struct snd_timer *timer;
unsigned long flags;
if (snd_BUG_ON(!timeri))
return -ENXIO;
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
if (!keep_flag) {
spin_lock_irqsave(&slave_active_lock, flags);
timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
spin_unlock_irqrestore(&slave_active_lock, flags);
}
goto __end;
}
timer = timeri->timer;
if (!timer)
return -EINVAL;
spin_lock_irqsave(&timer->lock, flags);
list_del_init(&timeri->ack_list);
list_del_init(&timeri->active_list);
if ((timeri->flags & SNDRV_TIMER_IFLG_RUNNING) &&
!(--timer->running)) {
timer->hw.stop(timer);
if (timer->flags & SNDRV_TIMER_FLG_RESCHED) {
timer->flags &= ~SNDRV_TIMER_FLG_RESCHED;
snd_timer_reschedule(timer, 0);
if (timer->flags & SNDRV_TIMER_FLG_CHANGE) {
timer->flags &= ~SNDRV_TIMER_FLG_CHANGE;
timer->hw.start(timer);
}
}
}
if (!keep_flag)
timeri->flags &=
~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
spin_unlock_irqrestore(&timer->lock, flags);
__end:
if (event != SNDRV_TIMER_EVENT_RESOLUTION)
snd_timer_notify1(timeri, event);
return 0;
}
/*
* stop the timer instance.
*
* do not call this from the timer callback!
*/
int snd_timer_stop(struct snd_timer_instance *timeri)
{
struct snd_timer *timer;
unsigned long flags;
int err;
err = _snd_timer_stop(timeri, 0, SNDRV_TIMER_EVENT_STOP);
if (err < 0)
return err;
timer = timeri->timer;
if (!timer)
return -EINVAL;
spin_lock_irqsave(&timer->lock, flags);
timeri->cticks = timeri->ticks;
timeri->pticks = 0;
spin_unlock_irqrestore(&timer->lock, flags);
return 0;
}
/*
* start again.. the tick is kept.
*/
int snd_timer_continue(struct snd_timer_instance *timeri)
{
struct snd_timer *timer;
int result = -EINVAL;
unsigned long flags;
if (timeri == NULL)
return result;
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
return snd_timer_start_slave(timeri);
timer = timeri->timer;
if (! timer)
return -EINVAL;
spin_lock_irqsave(&timer->lock, flags);
if (!timeri->cticks)
timeri->cticks = 1;
timeri->pticks = 0;
result = snd_timer_start1(timer, timeri, timer->sticks);
spin_unlock_irqrestore(&timer->lock, flags);
snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_CONTINUE);
return result;
}
/*
* pause.. remember the ticks left
*/
int snd_timer_pause(struct snd_timer_instance * timeri)
{
return _snd_timer_stop(timeri, 0, SNDRV_TIMER_EVENT_PAUSE);
}
/*
* reschedule the timer
*
* start pending instances and check the scheduling ticks.
* when the scheduling ticks is changed set CHANGE flag to reprogram the timer.
*/
static void snd_timer_reschedule(struct snd_timer * timer, unsigned long ticks_left)
{
struct snd_timer_instance *ti;
unsigned long ticks = ~0UL;
list_for_each_entry(ti, &timer->active_list_head, active_list) {
if (ti->flags & SNDRV_TIMER_IFLG_START) {
ti->flags &= ~SNDRV_TIMER_IFLG_START;
ti->flags |= SNDRV_TIMER_IFLG_RUNNING;
timer->running++;
}
if (ti->flags & SNDRV_TIMER_IFLG_RUNNING) {
if (ticks > ti->cticks)
ticks = ti->cticks;
}
}
if (ticks == ~0UL) {
timer->flags &= ~SNDRV_TIMER_FLG_RESCHED;
return;
}
if (ticks > timer->hw.ticks)
ticks = timer->hw.ticks;
if (ticks_left != ticks)
timer->flags |= SNDRV_TIMER_FLG_CHANGE;
timer->sticks = ticks;
}
/*
* timer tasklet
*
*/
static void snd_timer_tasklet(unsigned long arg)
{
struct snd_timer *timer = (struct snd_timer *) arg;
struct snd_timer_instance *ti;
struct list_head *p;
unsigned long resolution, ticks;
unsigned long flags;
spin_lock_irqsave(&timer->lock, flags);
/* now process all callbacks */
while (!list_empty(&timer->sack_list_head)) {
p = timer->sack_list_head.next; /* get first item */
ti = list_entry(p, struct snd_timer_instance, ack_list);
/* remove from ack_list and make empty */
list_del_init(p);
ticks = ti->pticks;
ti->pticks = 0;
resolution = ti->resolution;
ti->flags |= SNDRV_TIMER_IFLG_CALLBACK;
spin_unlock(&timer->lock);
if (ti->callback)
ti->callback(ti, resolution, ticks);
spin_lock(&timer->lock);
ti->flags &= ~SNDRV_TIMER_IFLG_CALLBACK;
}
spin_unlock_irqrestore(&timer->lock, flags);
}
/*
* timer interrupt
*
* ticks_left is usually equal to timer->sticks.
*
*/
void snd_timer_interrupt(struct snd_timer * timer, unsigned long ticks_left)
{
struct snd_timer_instance *ti, *ts, *tmp;
unsigned long resolution, ticks;
struct list_head *p, *ack_list_head;
unsigned long flags;
int use_tasklet = 0;
if (timer == NULL)
return;
spin_lock_irqsave(&timer->lock, flags);
/* remember the current resolution */
if (timer->hw.c_resolution)
resolution = timer->hw.c_resolution(timer);
else
resolution = timer->hw.resolution;
/* loop for all active instances
* Here we cannot use list_for_each_entry because the active_list of a
* processed instance is relinked to done_list_head before the callback
* is called.
*/
list_for_each_entry_safe(ti, tmp, &timer->active_list_head,
active_list) {
if (!(ti->flags & SNDRV_TIMER_IFLG_RUNNING))
continue;
ti->pticks += ticks_left;
ti->resolution = resolution;
if (ti->cticks < ticks_left)
ti->cticks = 0;
else
ti->cticks -= ticks_left;
if (ti->cticks) /* not expired */
continue;
if (ti->flags & SNDRV_TIMER_IFLG_AUTO) {
ti->cticks = ti->ticks;
} else {
ti->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
if (--timer->running)
list_del(&ti->active_list);
}
if ((timer->hw.flags & SNDRV_TIMER_HW_TASKLET) ||
(ti->flags & SNDRV_TIMER_IFLG_FAST))
ack_list_head = &timer->ack_list_head;
else
ack_list_head = &timer->sack_list_head;
if (list_empty(&ti->ack_list))
list_add_tail(&ti->ack_list, ack_list_head);
list_for_each_entry(ts, &ti->slave_active_head, active_list) {
ts->pticks = ti->pticks;
ts->resolution = resolution;
if (list_empty(&ts->ack_list))
list_add_tail(&ts->ack_list, ack_list_head);
}
}
if (timer->flags & SNDRV_TIMER_FLG_RESCHED)
snd_timer_reschedule(timer, timer->sticks);
if (timer->running) {
if (timer->hw.flags & SNDRV_TIMER_HW_STOP) {
timer->hw.stop(timer);
timer->flags |= SNDRV_TIMER_FLG_CHANGE;
}
if (!(timer->hw.flags & SNDRV_TIMER_HW_AUTO) ||
(timer->flags & SNDRV_TIMER_FLG_CHANGE)) {
/* restart timer */
timer->flags &= ~SNDRV_TIMER_FLG_CHANGE;
timer->hw.start(timer);
}
} else {
timer->hw.stop(timer);
}
/* now process all fast callbacks */
while (!list_empty(&timer->ack_list_head)) {
p = timer->ack_list_head.next; /* get first item */
ti = list_entry(p, struct snd_timer_instance, ack_list);
/* remove from ack_list and make empty */
list_del_init(p);
ticks = ti->pticks;
ti->pticks = 0;
ti->flags |= SNDRV_TIMER_IFLG_CALLBACK;
spin_unlock(&timer->lock);
if (ti->callback)
ti->callback(ti, resolution, ticks);
spin_lock(&timer->lock);
ti->flags &= ~SNDRV_TIMER_IFLG_CALLBACK;
}
/* do we have any slow callbacks? */
use_tasklet = !list_empty(&timer->sack_list_head);
spin_unlock_irqrestore(&timer->lock, flags);
if (use_tasklet)
tasklet_schedule(&timer->task_queue);
}
/*
*/
int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid,
struct snd_timer **rtimer)
{
struct snd_timer *timer;
int err;
static struct snd_device_ops ops = {
.dev_free = snd_timer_dev_free,
.dev_register = snd_timer_dev_register,
.dev_disconnect = snd_timer_dev_disconnect,
};
if (snd_BUG_ON(!tid))
return -EINVAL;
if (rtimer)
*rtimer = NULL;
timer = kzalloc(sizeof(*timer), GFP_KERNEL);
if (timer == NULL) {
snd_printk(KERN_ERR "timer: cannot allocate\n");
return -ENOMEM;
}
timer->tmr_class = tid->dev_class;
timer->card = card;
timer->tmr_device = tid->device;
timer->tmr_subdevice = tid->subdevice;
if (id)
strlcpy(timer->id, id, sizeof(timer->id));
INIT_LIST_HEAD(&timer->device_list);
INIT_LIST_HEAD(&timer->open_list_head);
INIT_LIST_HEAD(&timer->active_list_head);
INIT_LIST_HEAD(&timer->ack_list_head);
INIT_LIST_HEAD(&timer->sack_list_head);
spin_lock_init(&timer->lock);
tasklet_init(&timer->task_queue, snd_timer_tasklet,
(unsigned long)timer);
if (card != NULL) {
timer->module = card->module;
err = snd_device_new(card, SNDRV_DEV_TIMER, timer, &ops);
if (err < 0) {
snd_timer_free(timer);
return err;
}
}
if (rtimer)
*rtimer = timer;
return 0;
}
static int snd_timer_free(struct snd_timer *timer)
{
if (!timer)
return 0;
mutex_lock(®ister_mutex);
if (! list_empty(&timer->open_list_head)) {
struct list_head *p, *n;
struct snd_timer_instance *ti;
snd_printk(KERN_WARNING "timer %p is busy?\n", timer);
list_for_each_safe(p, n, &timer->open_list_head) {
list_del_init(p);
ti = list_entry(p, struct snd_timer_instance, open_list);
ti->timer = NULL;
}
}
list_del(&timer->device_list);
mutex_unlock(®ister_mutex);
if (timer->private_free)
timer->private_free(timer);
kfree(timer);
return 0;
}
static int snd_timer_dev_free(struct snd_device *device)
{
struct snd_timer *timer = device->device_data;
return snd_timer_free(timer);
}
static int snd_timer_dev_register(struct snd_device *dev)
{
struct snd_timer *timer = dev->device_data;
struct snd_timer *timer1;
if (snd_BUG_ON(!timer || !timer->hw.start || !timer->hw.stop))
return -ENXIO;
if (!(timer->hw.flags & SNDRV_TIMER_HW_SLAVE) &&
!timer->hw.resolution && timer->hw.c_resolution == NULL)
return -EINVAL;
mutex_lock(®ister_mutex);
list_for_each_entry(timer1, &snd_timer_list, device_list) {
if (timer1->tmr_class > timer->tmr_class)
break;
if (timer1->tmr_class < timer->tmr_class)
continue;
if (timer1->card && timer->card) {
if (timer1->card->number > timer->card->number)
break;
if (timer1->card->number < timer->card->number)
continue;
}
if (timer1->tmr_device > timer->tmr_device)
break;
if (timer1->tmr_device < timer->tmr_device)
continue;
if (timer1->tmr_subdevice > timer->tmr_subdevice)
break;
if (timer1->tmr_subdevice < timer->tmr_subdevice)
continue;
/* conflicts.. */
mutex_unlock(®ister_mutex);
return -EBUSY;
}
list_add_tail(&timer->device_list, &timer1->device_list);
mutex_unlock(®ister_mutex);
return 0;
}
static int snd_timer_dev_disconnect(struct snd_device *device)
{
struct snd_timer *timer = device->device_data;
mutex_lock(®ister_mutex);
list_del_init(&timer->device_list);
mutex_unlock(®ister_mutex);
return 0;
}
void snd_timer_notify(struct snd_timer *timer, int event, struct timespec *tstamp)
{
unsigned long flags;
unsigned long resolution = 0;
struct snd_timer_instance *ti, *ts;
if (! (timer->hw.flags & SNDRV_TIMER_HW_SLAVE))
return;
if (snd_BUG_ON(event < SNDRV_TIMER_EVENT_MSTART ||
event > SNDRV_TIMER_EVENT_MRESUME))
return;
spin_lock_irqsave(&timer->lock, flags);
if (event == SNDRV_TIMER_EVENT_MSTART ||
event == SNDRV_TIMER_EVENT_MCONTINUE ||
event == SNDRV_TIMER_EVENT_MRESUME) {
if (timer->hw.c_resolution)
resolution = timer->hw.c_resolution(timer);
else
resolution = timer->hw.resolution;
}
list_for_each_entry(ti, &timer->active_list_head, active_list) {
if (ti->ccallback)
ti->ccallback(ti, event, tstamp, resolution);
list_for_each_entry(ts, &ti->slave_active_head, active_list)
if (ts->ccallback)
ts->ccallback(ts, event, tstamp, resolution);
}
spin_unlock_irqrestore(&timer->lock, flags);
}
/*
* exported functions for global timers
*/
int snd_timer_global_new(char *id, int device, struct snd_timer **rtimer)
{
struct snd_timer_id tid;
tid.dev_class = SNDRV_TIMER_CLASS_GLOBAL;
tid.dev_sclass = SNDRV_TIMER_SCLASS_NONE;
tid.card = -1;
tid.device = device;
tid.subdevice = 0;
return snd_timer_new(NULL, id, &tid, rtimer);
}
int snd_timer_global_free(struct snd_timer *timer)
{
return snd_timer_free(timer);
}
int snd_timer_global_register(struct snd_timer *timer)
{
struct snd_device dev;
memset(&dev, 0, sizeof(dev));
dev.device_data = timer;
return snd_timer_dev_register(&dev);
}
/*
* System timer
*/
struct snd_timer_system_private {
struct timer_list tlist;
unsigned long last_expires;
unsigned long last_jiffies;
unsigned long correction;
};
static void snd_timer_s_function(unsigned long data)
{
struct snd_timer *timer = (struct snd_timer *)data;
struct snd_timer_system_private *priv = timer->private_data;
unsigned long jiff = jiffies;
if (time_after(jiff, priv->last_expires))
priv->correction += (long)jiff - (long)priv->last_expires;
snd_timer_interrupt(timer, (long)jiff - (long)priv->last_jiffies);
}
static int snd_timer_s_start(struct snd_timer * timer)
{
struct snd_timer_system_private *priv;
unsigned long njiff;
priv = (struct snd_timer_system_private *) timer->private_data;
njiff = (priv->last_jiffies = jiffies);
if (priv->correction > timer->sticks - 1) {
priv->correction -= timer->sticks - 1;
njiff++;
} else {
njiff += timer->sticks - priv->correction;
priv->correction = 0;
}
priv->last_expires = priv->tlist.expires = njiff;
add_timer(&priv->tlist);
return 0;
}
static int snd_timer_s_stop(struct snd_timer * timer)
{
struct snd_timer_system_private *priv;
unsigned long jiff;
priv = (struct snd_timer_system_private *) timer->private_data;
del_timer(&priv->tlist);
jiff = jiffies;
if (time_before(jiff, priv->last_expires))
timer->sticks = priv->last_expires - jiff;
else
timer->sticks = 1;
priv->correction = 0;
return 0;
}
static struct snd_timer_hardware snd_timer_system =
{
.flags = SNDRV_TIMER_HW_FIRST | SNDRV_TIMER_HW_TASKLET,
.resolution = 1000000000L / HZ,
.ticks = 10000000L,
.start = snd_timer_s_start,
.stop = snd_timer_s_stop
};
static void snd_timer_free_system(struct snd_timer *timer)
{
kfree(timer->private_data);
}
static int snd_timer_register_system(void)
{
struct snd_timer *timer;
struct snd_timer_system_private *priv;
int err;
err = snd_timer_global_new("system", SNDRV_TIMER_GLOBAL_SYSTEM, &timer);
if (err < 0)
return err;
strcpy(timer->name, "system timer");
timer->hw = snd_timer_system;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (priv == NULL) {
snd_timer_free(timer);
return -ENOMEM;
}
init_timer(&priv->tlist);
priv->tlist.function = snd_timer_s_function;
priv->tlist.data = (unsigned long) timer;
timer->private_data = priv;
timer->private_free = snd_timer_free_system;
return snd_timer_global_register(timer);
}
#ifdef CONFIG_PROC_FS
/*
* Info interface
*/
static void snd_timer_proc_read(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_timer *timer;
struct snd_timer_instance *ti;
mutex_lock(®ister_mutex);
list_for_each_entry(timer, &snd_timer_list, device_list) {
switch (timer->tmr_class) {
case SNDRV_TIMER_CLASS_GLOBAL:
snd_iprintf(buffer, "G%i: ", timer->tmr_device);
break;
case SNDRV_TIMER_CLASS_CARD:
snd_iprintf(buffer, "C%i-%i: ",
timer->card->number, timer->tmr_device);
break;
case SNDRV_TIMER_CLASS_PCM:
snd_iprintf(buffer, "P%i-%i-%i: ", timer->card->number,
timer->tmr_device, timer->tmr_subdevice);
break;
default:
snd_iprintf(buffer, "?%i-%i-%i-%i: ", timer->tmr_class,
timer->card ? timer->card->number : -1,
timer->tmr_device, timer->tmr_subdevice);
}
snd_iprintf(buffer, "%s :", timer->name);
if (timer->hw.resolution)
snd_iprintf(buffer, " %lu.%03luus (%lu ticks)",
timer->hw.resolution / 1000,
timer->hw.resolution % 1000,
timer->hw.ticks);
if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
snd_iprintf(buffer, " SLAVE");
snd_iprintf(buffer, "\n");
list_for_each_entry(ti, &timer->open_list_head, open_list)
snd_iprintf(buffer, " Client %s : %s\n",
ti->owner ? ti->owner : "unknown",
ti->flags & (SNDRV_TIMER_IFLG_START |
SNDRV_TIMER_IFLG_RUNNING)
? "running" : "stopped");
}
mutex_unlock(®ister_mutex);
}
static struct snd_info_entry *snd_timer_proc_entry;
static void __init snd_timer_proc_init(void)
{
struct snd_info_entry *entry;
entry = snd_info_create_module_entry(THIS_MODULE, "timers", NULL);
if (entry != NULL) {
entry->c.text.read = snd_timer_proc_read;
if (snd_info_register(entry) < 0) {
snd_info_free_entry(entry);
entry = NULL;
}
}
snd_timer_proc_entry = entry;
}
static void __exit snd_timer_proc_done(void)
{
snd_info_free_entry(snd_timer_proc_entry);
}
#else /* !CONFIG_PROC_FS */
#define snd_timer_proc_init()
#define snd_timer_proc_done()
#endif
/*
* USER SPACE interface
*/
static void snd_timer_user_interrupt(struct snd_timer_instance *timeri,
unsigned long resolution,
unsigned long ticks)
{
struct snd_timer_user *tu = timeri->callback_data;
struct snd_timer_read *r;
int prev;
spin_lock(&tu->qlock);
if (tu->qused > 0) {
prev = tu->qtail == 0 ? tu->queue_size - 1 : tu->qtail - 1;
r = &tu->queue[prev];
if (r->resolution == resolution) {
r->ticks += ticks;
goto __wake;
}
}
if (tu->qused >= tu->queue_size) {
tu->overrun++;
} else {
r = &tu->queue[tu->qtail++];
tu->qtail %= tu->queue_size;
r->resolution = resolution;
r->ticks = ticks;
tu->qused++;
}
__wake:
spin_unlock(&tu->qlock);
kill_fasync(&tu->fasync, SIGIO, POLL_IN);
wake_up(&tu->qchange_sleep);
}
static void snd_timer_user_append_to_tqueue(struct snd_timer_user *tu,
struct snd_timer_tread *tread)
{
if (tu->qused >= tu->queue_size) {
tu->overrun++;
} else {
memcpy(&tu->tqueue[tu->qtail++], tread, sizeof(*tread));
tu->qtail %= tu->queue_size;
tu->qused++;
}
}
static void snd_timer_user_ccallback(struct snd_timer_instance *timeri,
int event,
struct timespec *tstamp,
unsigned long resolution)
{
struct snd_timer_user *tu = timeri->callback_data;
struct snd_timer_tread r1;
unsigned long flags;
if (event >= SNDRV_TIMER_EVENT_START &&
event <= SNDRV_TIMER_EVENT_PAUSE)
tu->tstamp = *tstamp;
if ((tu->filter & (1 << event)) == 0 || !tu->tread)
return;
r1.event = event;
r1.tstamp = *tstamp;
r1.val = resolution;
spin_lock_irqsave(&tu->qlock, flags);
snd_timer_user_append_to_tqueue(tu, &r1);
spin_unlock_irqrestore(&tu->qlock, flags);
kill_fasync(&tu->fasync, SIGIO, POLL_IN);
wake_up(&tu->qchange_sleep);
}
static void snd_timer_user_tinterrupt(struct snd_timer_instance *timeri,
unsigned long resolution,
unsigned long ticks)
{
struct snd_timer_user *tu = timeri->callback_data;
struct snd_timer_tread *r, r1;
struct timespec tstamp;
int prev, append = 0;
memset(&tstamp, 0, sizeof(tstamp));
spin_lock(&tu->qlock);
if ((tu->filter & ((1 << SNDRV_TIMER_EVENT_RESOLUTION) |
(1 << SNDRV_TIMER_EVENT_TICK))) == 0) {
spin_unlock(&tu->qlock);
return;
}
if (tu->last_resolution != resolution || ticks > 0) {
if (timer_tstamp_monotonic)
do_posix_clock_monotonic_gettime(&tstamp);
else
getnstimeofday(&tstamp);
}
if ((tu->filter & (1 << SNDRV_TIMER_EVENT_RESOLUTION)) &&
tu->last_resolution != resolution) {
r1.event = SNDRV_TIMER_EVENT_RESOLUTION;
r1.tstamp = tstamp;
r1.val = resolution;
snd_timer_user_append_to_tqueue(tu, &r1);
tu->last_resolution = resolution;
append++;
}
if ((tu->filter & (1 << SNDRV_TIMER_EVENT_TICK)) == 0)
goto __wake;
if (ticks == 0)
goto __wake;
if (tu->qused > 0) {
prev = tu->qtail == 0 ? tu->queue_size - 1 : tu->qtail - 1;
r = &tu->tqueue[prev];
if (r->event == SNDRV_TIMER_EVENT_TICK) {
r->tstamp = tstamp;
r->val += ticks;
append++;
goto __wake;
}
}
r1.event = SNDRV_TIMER_EVENT_TICK;
r1.tstamp = tstamp;
r1.val = ticks;
snd_timer_user_append_to_tqueue(tu, &r1);
append++;
__wake:
spin_unlock(&tu->qlock);
if (append == 0)
return;
kill_fasync(&tu->fasync, SIGIO, POLL_IN);
wake_up(&tu->qchange_sleep);
}
static int snd_timer_user_open(struct inode *inode, struct file *file)
{
struct snd_timer_user *tu;
int err;
err = nonseekable_open(inode, file);
if (err < 0)
return err;
tu = kzalloc(sizeof(*tu), GFP_KERNEL);
if (tu == NULL)
return -ENOMEM;
spin_lock_init(&tu->qlock);
init_waitqueue_head(&tu->qchange_sleep);
mutex_init(&tu->tread_sem);
tu->ticks = 1;
tu->queue_size = 128;
tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read),
GFP_KERNEL);
if (tu->queue == NULL) {
kfree(tu);
return -ENOMEM;
}
file->private_data = tu;
return 0;
}
static int snd_timer_user_release(struct inode *inode, struct file *file)
{
struct snd_timer_user *tu;
if (file->private_data) {
tu = file->private_data;
file->private_data = NULL;
if (tu->timeri)
snd_timer_close(tu->timeri);
kfree(tu->queue);
kfree(tu->tqueue);
kfree(tu);
}
return 0;
}
static void snd_timer_user_zero_id(struct snd_timer_id *id)
{
id->dev_class = SNDRV_TIMER_CLASS_NONE;
id->dev_sclass = SNDRV_TIMER_SCLASS_NONE;
id->card = -1;
id->device = -1;
id->subdevice = -1;
}
static void snd_timer_user_copy_id(struct snd_timer_id *id, struct snd_timer *timer)
{
id->dev_class = timer->tmr_class;
id->dev_sclass = SNDRV_TIMER_SCLASS_NONE;
id->card = timer->card ? timer->card->number : -1;
id->device = timer->tmr_device;
id->subdevice = timer->tmr_subdevice;
}
static int snd_timer_user_next_device(struct snd_timer_id __user *_tid)
{
struct snd_timer_id id;
struct snd_timer *timer;
struct list_head *p;
if (copy_from_user(&id, _tid, sizeof(id)))
return -EFAULT;
mutex_lock(®ister_mutex);
if (id.dev_class < 0) { /* first item */
if (list_empty(&snd_timer_list))
snd_timer_user_zero_id(&id);
else {
timer = list_entry(snd_timer_list.next,
struct snd_timer, device_list);
snd_timer_user_copy_id(&id, timer);
}
} else {
switch (id.dev_class) {
case SNDRV_TIMER_CLASS_GLOBAL:
id.device = id.device < 0 ? 0 : id.device + 1;
list_for_each(p, &snd_timer_list) {
timer = list_entry(p, struct snd_timer, device_list);
if (timer->tmr_class > SNDRV_TIMER_CLASS_GLOBAL) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->tmr_device >= id.device) {
snd_timer_user_copy_id(&id, timer);
break;
}
}
if (p == &snd_timer_list)
snd_timer_user_zero_id(&id);
break;
case SNDRV_TIMER_CLASS_CARD:
case SNDRV_TIMER_CLASS_PCM:
if (id.card < 0) {
id.card = 0;
} else {
if (id.card < 0) {
id.card = 0;
} else {
if (id.device < 0) {
id.device = 0;
} else {
if (id.subdevice < 0) {
id.subdevice = 0;
} else {
id.subdevice++;
}
}
}
}
list_for_each(p, &snd_timer_list) {
timer = list_entry(p, struct snd_timer, device_list);
if (timer->tmr_class > id.dev_class) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->tmr_class < id.dev_class)
continue;
if (timer->card->number > id.card) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->card->number < id.card)
continue;
if (timer->tmr_device > id.device) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->tmr_device < id.device)
continue;
if (timer->tmr_subdevice > id.subdevice) {
snd_timer_user_copy_id(&id, timer);
break;
}
if (timer->tmr_subdevice < id.subdevice)
continue;
snd_timer_user_copy_id(&id, timer);
break;
}
if (p == &snd_timer_list)
snd_timer_user_zero_id(&id);
break;
default:
snd_timer_user_zero_id(&id);
}
}
mutex_unlock(®ister_mutex);
if (copy_to_user(_tid, &id, sizeof(*_tid)))
return -EFAULT;
return 0;
}
static int snd_timer_user_ginfo(struct file *file,
struct snd_timer_ginfo __user *_ginfo)
{
struct snd_timer_ginfo *ginfo;
struct snd_timer_id tid;
struct snd_timer *t;
struct list_head *p;
int err = 0;
ginfo = memdup_user(_ginfo, sizeof(*ginfo));
if (IS_ERR(ginfo))
return PTR_ERR(ginfo);
tid = ginfo->tid;
memset(ginfo, 0, sizeof(*ginfo));
ginfo->tid = tid;
mutex_lock(®ister_mutex);
t = snd_timer_find(&tid);
if (t != NULL) {
ginfo->card = t->card ? t->card->number : -1;
if (t->hw.flags & SNDRV_TIMER_HW_SLAVE)
ginfo->flags |= SNDRV_TIMER_FLG_SLAVE;
strlcpy(ginfo->id, t->id, sizeof(ginfo->id));
strlcpy(ginfo->name, t->name, sizeof(ginfo->name));
ginfo->resolution = t->hw.resolution;
if (t->hw.resolution_min > 0) {
ginfo->resolution_min = t->hw.resolution_min;
ginfo->resolution_max = t->hw.resolution_max;
}
list_for_each(p, &t->open_list_head) {
ginfo->clients++;
}
} else {
err = -ENODEV;
}
mutex_unlock(®ister_mutex);
if (err >= 0 && copy_to_user(_ginfo, ginfo, sizeof(*ginfo)))
err = -EFAULT;
kfree(ginfo);
return err;
}
static int snd_timer_user_gparams(struct file *file,
struct snd_timer_gparams __user *_gparams)
{
struct snd_timer_gparams gparams;
struct snd_timer *t;
int err;
if (copy_from_user(&gparams, _gparams, sizeof(gparams)))
return -EFAULT;
mutex_lock(®ister_mutex);
t = snd_timer_find(&gparams.tid);
if (!t) {
err = -ENODEV;
goto _error;
}
if (!list_empty(&t->open_list_head)) {
err = -EBUSY;
goto _error;
}
if (!t->hw.set_period) {
err = -ENOSYS;
goto _error;
}
err = t->hw.set_period(t, gparams.period_num, gparams.period_den);
_error:
mutex_unlock(®ister_mutex);
return err;
}
static int snd_timer_user_gstatus(struct file *file,
struct snd_timer_gstatus __user *_gstatus)
{
struct snd_timer_gstatus gstatus;
struct snd_timer_id tid;
struct snd_timer *t;
int err = 0;
if (copy_from_user(&gstatus, _gstatus, sizeof(gstatus)))
return -EFAULT;
tid = gstatus.tid;
memset(&gstatus, 0, sizeof(gstatus));
gstatus.tid = tid;
mutex_lock(®ister_mutex);
t = snd_timer_find(&tid);
if (t != NULL) {
if (t->hw.c_resolution)
gstatus.resolution = t->hw.c_resolution(t);
else
gstatus.resolution = t->hw.resolution;
if (t->hw.precise_resolution) {
t->hw.precise_resolution(t, &gstatus.resolution_num,
&gstatus.resolution_den);
} else {
gstatus.resolution_num = gstatus.resolution;
gstatus.resolution_den = 1000000000uL;
}
} else {
err = -ENODEV;
}
mutex_unlock(®ister_mutex);
if (err >= 0 && copy_to_user(_gstatus, &gstatus, sizeof(gstatus)))
err = -EFAULT;
return err;
}
static int snd_timer_user_tselect(struct file *file,
struct snd_timer_select __user *_tselect)
{
struct snd_timer_user *tu;
struct snd_timer_select tselect;
char str[32];
int err = 0;
tu = file->private_data;
mutex_lock(&tu->tread_sem);
if (tu->timeri) {
snd_timer_close(tu->timeri);
tu->timeri = NULL;
}
if (copy_from_user(&tselect, _tselect, sizeof(tselect))) {
err = -EFAULT;
goto __err;
}
sprintf(str, "application %i", current->pid);
if (tselect.id.dev_class != SNDRV_TIMER_CLASS_SLAVE)
tselect.id.dev_sclass = SNDRV_TIMER_SCLASS_APPLICATION;
err = snd_timer_open(&tu->timeri, str, &tselect.id, current->pid);
if (err < 0)
goto __err;
kfree(tu->queue);
tu->queue = NULL;
kfree(tu->tqueue);
tu->tqueue = NULL;
if (tu->tread) {
tu->tqueue = kmalloc(tu->queue_size * sizeof(struct snd_timer_tread),
GFP_KERNEL);
if (tu->tqueue == NULL)
err = -ENOMEM;
} else {
tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read),
GFP_KERNEL);
if (tu->queue == NULL)
err = -ENOMEM;
}
if (err < 0) {
snd_timer_close(tu->timeri);
tu->timeri = NULL;
} else {
tu->timeri->flags |= SNDRV_TIMER_IFLG_FAST;
tu->timeri->callback = tu->tread
? snd_timer_user_tinterrupt : snd_timer_user_interrupt;
tu->timeri->ccallback = snd_timer_user_ccallback;
tu->timeri->callback_data = (void *)tu;
}
__err:
mutex_unlock(&tu->tread_sem);
return err;
}
static int snd_timer_user_info(struct file *file,
struct snd_timer_info __user *_info)
{
struct snd_timer_user *tu;
struct snd_timer_info *info;
struct snd_timer *t;
int err = 0;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
t = tu->timeri->timer;
if (!t)
return -EBADFD;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (! info)
return -ENOMEM;
info->card = t->card ? t->card->number : -1;
if (t->hw.flags & SNDRV_TIMER_HW_SLAVE)
info->flags |= SNDRV_TIMER_FLG_SLAVE;
strlcpy(info->id, t->id, sizeof(info->id));
strlcpy(info->name, t->name, sizeof(info->name));
info->resolution = t->hw.resolution;
if (copy_to_user(_info, info, sizeof(*_info)))
err = -EFAULT;
kfree(info);
return err;
}
static int snd_timer_user_params(struct file *file,
struct snd_timer_params __user *_params)
{
struct snd_timer_user *tu;
struct snd_timer_params params;
struct snd_timer *t;
struct snd_timer_read *tr;
struct snd_timer_tread *ttr;
int err;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
t = tu->timeri->timer;
if (!t)
return -EBADFD;
if (copy_from_user(¶ms, _params, sizeof(params)))
return -EFAULT;
if (!(t->hw.flags & SNDRV_TIMER_HW_SLAVE) && params.ticks < 1) {
err = -EINVAL;
goto _end;
}
if (params.queue_size > 0 &&
(params.queue_size < 32 || params.queue_size > 1024)) {
err = -EINVAL;
goto _end;
}
if (params.filter & ~((1<<SNDRV_TIMER_EVENT_RESOLUTION)|
(1<<SNDRV_TIMER_EVENT_TICK)|
(1<<SNDRV_TIMER_EVENT_START)|
(1<<SNDRV_TIMER_EVENT_STOP)|
(1<<SNDRV_TIMER_EVENT_CONTINUE)|
(1<<SNDRV_TIMER_EVENT_PAUSE)|
(1<<SNDRV_TIMER_EVENT_SUSPEND)|
(1<<SNDRV_TIMER_EVENT_RESUME)|
(1<<SNDRV_TIMER_EVENT_MSTART)|
(1<<SNDRV_TIMER_EVENT_MSTOP)|
(1<<SNDRV_TIMER_EVENT_MCONTINUE)|
(1<<SNDRV_TIMER_EVENT_MPAUSE)|
(1<<SNDRV_TIMER_EVENT_MSUSPEND)|
(1<<SNDRV_TIMER_EVENT_MRESUME))) {
err = -EINVAL;
goto _end;
}
snd_timer_stop(tu->timeri);
spin_lock_irq(&t->lock);
tu->timeri->flags &= ~(SNDRV_TIMER_IFLG_AUTO|
SNDRV_TIMER_IFLG_EXCLUSIVE|
SNDRV_TIMER_IFLG_EARLY_EVENT);
if (params.flags & SNDRV_TIMER_PSFLG_AUTO)
tu->timeri->flags |= SNDRV_TIMER_IFLG_AUTO;
if (params.flags & SNDRV_TIMER_PSFLG_EXCLUSIVE)
tu->timeri->flags |= SNDRV_TIMER_IFLG_EXCLUSIVE;
if (params.flags & SNDRV_TIMER_PSFLG_EARLY_EVENT)
tu->timeri->flags |= SNDRV_TIMER_IFLG_EARLY_EVENT;
spin_unlock_irq(&t->lock);
if (params.queue_size > 0 &&
(unsigned int)tu->queue_size != params.queue_size) {
if (tu->tread) {
ttr = kmalloc(params.queue_size * sizeof(*ttr),
GFP_KERNEL);
if (ttr) {
kfree(tu->tqueue);
tu->queue_size = params.queue_size;
tu->tqueue = ttr;
}
} else {
tr = kmalloc(params.queue_size * sizeof(*tr),
GFP_KERNEL);
if (tr) {
kfree(tu->queue);
tu->queue_size = params.queue_size;
tu->queue = tr;
}
}
}
tu->qhead = tu->qtail = tu->qused = 0;
if (tu->timeri->flags & SNDRV_TIMER_IFLG_EARLY_EVENT) {
if (tu->tread) {
struct snd_timer_tread tread;
tread.event = SNDRV_TIMER_EVENT_EARLY;
tread.tstamp.tv_sec = 0;
tread.tstamp.tv_nsec = 0;
tread.val = 0;
snd_timer_user_append_to_tqueue(tu, &tread);
} else {
struct snd_timer_read *r = &tu->queue[0];
r->resolution = 0;
r->ticks = 0;
tu->qused++;
tu->qtail++;
}
}
tu->filter = params.filter;
tu->ticks = params.ticks;
err = 0;
_end:
if (copy_to_user(_params, ¶ms, sizeof(params)))
return -EFAULT;
return err;
}
static int snd_timer_user_status(struct file *file,
struct snd_timer_status __user *_status)
{
struct snd_timer_user *tu;
struct snd_timer_status status;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
memset(&status, 0, sizeof(status));
status.tstamp = tu->tstamp;
status.resolution = snd_timer_resolution(tu->timeri);
status.lost = tu->timeri->lost;
status.overrun = tu->overrun;
spin_lock_irq(&tu->qlock);
status.queue = tu->qused;
spin_unlock_irq(&tu->qlock);
if (copy_to_user(_status, &status, sizeof(status)))
return -EFAULT;
return 0;
}
static int snd_timer_user_start(struct file *file)
{
int err;
struct snd_timer_user *tu;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
snd_timer_stop(tu->timeri);
tu->timeri->lost = 0;
tu->last_resolution = 0;
return (err = snd_timer_start(tu->timeri, tu->ticks)) < 0 ? err : 0;
}
static int snd_timer_user_stop(struct file *file)
{
int err;
struct snd_timer_user *tu;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
return (err = snd_timer_stop(tu->timeri)) < 0 ? err : 0;
}
static int snd_timer_user_continue(struct file *file)
{
int err;
struct snd_timer_user *tu;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
tu->timeri->lost = 0;
return (err = snd_timer_continue(tu->timeri)) < 0 ? err : 0;
}
static int snd_timer_user_pause(struct file *file)
{
int err;
struct snd_timer_user *tu;
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
return (err = snd_timer_pause(tu->timeri)) < 0 ? err : 0;
}
enum {
SNDRV_TIMER_IOCTL_START_OLD = _IO('T', 0x20),
SNDRV_TIMER_IOCTL_STOP_OLD = _IO('T', 0x21),
SNDRV_TIMER_IOCTL_CONTINUE_OLD = _IO('T', 0x22),
SNDRV_TIMER_IOCTL_PAUSE_OLD = _IO('T', 0x23),
};
static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct snd_timer_user *tu;
void __user *argp = (void __user *)arg;
int __user *p = argp;
tu = file->private_data;
switch (cmd) {
case SNDRV_TIMER_IOCTL_PVERSION:
return put_user(SNDRV_TIMER_VERSION, p) ? -EFAULT : 0;
case SNDRV_TIMER_IOCTL_NEXT_DEVICE:
return snd_timer_user_next_device(argp);
case SNDRV_TIMER_IOCTL_TREAD:
{
int xarg;
mutex_lock(&tu->tread_sem);
if (tu->timeri) { /* too late */
mutex_unlock(&tu->tread_sem);
return -EBUSY;
}
if (get_user(xarg, p)) {
mutex_unlock(&tu->tread_sem);
return -EFAULT;
}
tu->tread = xarg ? 1 : 0;
mutex_unlock(&tu->tread_sem);
return 0;
}
case SNDRV_TIMER_IOCTL_GINFO:
return snd_timer_user_ginfo(file, argp);
case SNDRV_TIMER_IOCTL_GPARAMS:
return snd_timer_user_gparams(file, argp);
case SNDRV_TIMER_IOCTL_GSTATUS:
return snd_timer_user_gstatus(file, argp);
case SNDRV_TIMER_IOCTL_SELECT:
return snd_timer_user_tselect(file, argp);
case SNDRV_TIMER_IOCTL_INFO:
return snd_timer_user_info(file, argp);
case SNDRV_TIMER_IOCTL_PARAMS:
return snd_timer_user_params(file, argp);
case SNDRV_TIMER_IOCTL_STATUS:
return snd_timer_user_status(file, argp);
case SNDRV_TIMER_IOCTL_START:
case SNDRV_TIMER_IOCTL_START_OLD:
return snd_timer_user_start(file);
case SNDRV_TIMER_IOCTL_STOP:
case SNDRV_TIMER_IOCTL_STOP_OLD:
return snd_timer_user_stop(file);
case SNDRV_TIMER_IOCTL_CONTINUE:
case SNDRV_TIMER_IOCTL_CONTINUE_OLD:
return snd_timer_user_continue(file);
case SNDRV_TIMER_IOCTL_PAUSE:
case SNDRV_TIMER_IOCTL_PAUSE_OLD:
return snd_timer_user_pause(file);
}
return -ENOTTY;
}
static int snd_timer_user_fasync(int fd, struct file * file, int on)
{
struct snd_timer_user *tu;
tu = file->private_data;
return fasync_helper(fd, file, on, &tu->fasync);
}
static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
size_t count, loff_t *offset)
{
struct snd_timer_user *tu;
long result = 0, unit;
int err = 0;
tu = file->private_data;
unit = tu->tread ? sizeof(struct snd_timer_tread) : sizeof(struct snd_timer_read);
spin_lock_irq(&tu->qlock);
while ((long)count - result >= unit) {
while (!tu->qused) {
wait_queue_t wait;
if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
err = -EAGAIN;
break;
}
set_current_state(TASK_INTERRUPTIBLE);
init_waitqueue_entry(&wait, current);
add_wait_queue(&tu->qchange_sleep, &wait);
spin_unlock_irq(&tu->qlock);
schedule();
spin_lock_irq(&tu->qlock);
remove_wait_queue(&tu->qchange_sleep, &wait);
if (signal_pending(current)) {
err = -ERESTARTSYS;
break;
}
}
spin_unlock_irq(&tu->qlock);
if (err < 0)
goto _error;
if (tu->tread) {
if (copy_to_user(buffer, &tu->tqueue[tu->qhead++],
sizeof(struct snd_timer_tread))) {
err = -EFAULT;
goto _error;
}
} else {
if (copy_to_user(buffer, &tu->queue[tu->qhead++],
sizeof(struct snd_timer_read))) {
err = -EFAULT;
goto _error;
}
}
tu->qhead %= tu->queue_size;
result += unit;
buffer += unit;
spin_lock_irq(&tu->qlock);
tu->qused--;
}
spin_unlock_irq(&tu->qlock);
_error:
return result > 0 ? result : err;
}
static unsigned int snd_timer_user_poll(struct file *file, poll_table * wait)
{
unsigned int mask;
struct snd_timer_user *tu;
tu = file->private_data;
poll_wait(file, &tu->qchange_sleep, wait);
mask = 0;
if (tu->qused)
mask |= POLLIN | POLLRDNORM;
return mask;
}
#ifdef CONFIG_COMPAT
#include "timer_compat.c"
#else
#define snd_timer_user_ioctl_compat NULL
#endif
static const struct file_operations snd_timer_f_ops =
{
.owner = THIS_MODULE,
.read = snd_timer_user_read,
.open = snd_timer_user_open,
.release = snd_timer_user_release,
.llseek = no_llseek,
.poll = snd_timer_user_poll,
.unlocked_ioctl = snd_timer_user_ioctl,
.compat_ioctl = snd_timer_user_ioctl_compat,
.fasync = snd_timer_user_fasync,
};
/*
* ENTRY functions
*/
static int __init alsa_timer_init(void)
{
int err;
#ifdef SNDRV_OSS_INFO_DEV_TIMERS
snd_oss_info_register(SNDRV_OSS_INFO_DEV_TIMERS, SNDRV_CARDS - 1,
"system timer");
#endif
if ((err = snd_timer_register_system()) < 0)
snd_printk(KERN_ERR "unable to register system timer (%i)\n",
err);
if ((err = snd_register_device(SNDRV_DEVICE_TYPE_TIMER, NULL, 0,
&snd_timer_f_ops, NULL, "timer")) < 0)
snd_printk(KERN_ERR "unable to register timer device (%i)\n",
err);
snd_timer_proc_init();
return 0;
}
static void __exit alsa_timer_exit(void)
{
struct list_head *p, *n;
snd_unregister_device(SNDRV_DEVICE_TYPE_TIMER, NULL, 0);
/* unregister the system timer */
list_for_each_safe(p, n, &snd_timer_list) {
struct snd_timer *timer = list_entry(p, struct snd_timer, device_list);
snd_timer_free(timer);
}
snd_timer_proc_done();
#ifdef SNDRV_OSS_INFO_DEV_TIMERS
snd_oss_info_unregister(SNDRV_OSS_INFO_DEV_TIMERS, SNDRV_CARDS - 1);
#endif
}
module_init(alsa_timer_init)
module_exit(alsa_timer_exit)
EXPORT_SYMBOL(snd_timer_open);
EXPORT_SYMBOL(snd_timer_close);
EXPORT_SYMBOL(snd_timer_resolution);
EXPORT_SYMBOL(snd_timer_start);
EXPORT_SYMBOL(snd_timer_stop);
EXPORT_SYMBOL(snd_timer_continue);
EXPORT_SYMBOL(snd_timer_pause);
EXPORT_SYMBOL(snd_timer_new);
EXPORT_SYMBOL(snd_timer_notify);
EXPORT_SYMBOL(snd_timer_global_new);
EXPORT_SYMBOL(snd_timer_global_free);
EXPORT_SYMBOL(snd_timer_global_register);
EXPORT_SYMBOL(snd_timer_interrupt);
| gpl-2.0 |
bestmjh47/android_kernel_A780L-stock | drivers/video/console/bitblit.c | 7652 | 10951 | /*
* linux/drivers/video/console/bitblit.c -- BitBlitting Operation
*
* Originally from the 'accel_*' routines in drivers/video/console/fbcon.c
*
* Copyright (C) 2004 Antonino Daplas <adaplas @pol.net>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/fb.h>
#include <linux/vt_kern.h>
#include <linux/console.h>
#include <asm/types.h>
#include "fbcon.h"
/*
* Accelerated handlers.
*/
static void update_attr(u8 *dst, u8 *src, int attribute,
struct vc_data *vc)
{
int i, offset = (vc->vc_font.height < 10) ? 1 : 2;
int width = DIV_ROUND_UP(vc->vc_font.width, 8);
unsigned int cellsize = vc->vc_font.height * width;
u8 c;
offset = cellsize - (offset * width);
for (i = 0; i < cellsize; i++) {
c = src[i];
if (attribute & FBCON_ATTRIBUTE_UNDERLINE && i >= offset)
c = 0xff;
if (attribute & FBCON_ATTRIBUTE_BOLD)
c |= c >> 1;
if (attribute & FBCON_ATTRIBUTE_REVERSE)
c = ~c;
dst[i] = c;
}
}
static void bit_bmove(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int dy, int dx, int height, int width)
{
struct fb_copyarea area;
area.sx = sx * vc->vc_font.width;
area.sy = sy * vc->vc_font.height;
area.dx = dx * vc->vc_font.width;
area.dy = dy * vc->vc_font.height;
area.height = height * vc->vc_font.height;
area.width = width * vc->vc_font.width;
info->fbops->fb_copyarea(info, &area);
}
static void bit_clear(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int height, int width)
{
int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
struct fb_fillrect region;
region.color = attr_bgcol_ec(bgshift, vc, info);
region.dx = sx * vc->vc_font.width;
region.dy = sy * vc->vc_font.height;
region.width = width * vc->vc_font.width;
region.height = height * vc->vc_font.height;
region.rop = ROP_COPY;
info->fbops->fb_fillrect(info, ®ion);
}
static inline void bit_putcs_aligned(struct vc_data *vc, struct fb_info *info,
const u16 *s, u32 attr, u32 cnt,
u32 d_pitch, u32 s_pitch, u32 cellsize,
struct fb_image *image, u8 *buf, u8 *dst)
{
u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
u32 idx = vc->vc_font.width >> 3;
u8 *src;
while (cnt--) {
src = vc->vc_font.data + (scr_readw(s++)&
charmask)*cellsize;
if (attr) {
update_attr(buf, src, attr, vc);
src = buf;
}
if (likely(idx == 1))
__fb_pad_aligned_buffer(dst, d_pitch, src, idx,
image->height);
else
fb_pad_aligned_buffer(dst, d_pitch, src, idx,
image->height);
dst += s_pitch;
}
info->fbops->fb_imageblit(info, image);
}
static inline void bit_putcs_unaligned(struct vc_data *vc,
struct fb_info *info, const u16 *s,
u32 attr, u32 cnt, u32 d_pitch,
u32 s_pitch, u32 cellsize,
struct fb_image *image, u8 *buf,
u8 *dst)
{
u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
u32 shift_low = 0, mod = vc->vc_font.width % 8;
u32 shift_high = 8;
u32 idx = vc->vc_font.width >> 3;
u8 *src;
while (cnt--) {
src = vc->vc_font.data + (scr_readw(s++)&
charmask)*cellsize;
if (attr) {
update_attr(buf, src, attr, vc);
src = buf;
}
fb_pad_unaligned_buffer(dst, d_pitch, src, idx,
image->height, shift_high,
shift_low, mod);
shift_low += mod;
dst += (shift_low >= 8) ? s_pitch : s_pitch - 1;
shift_low &= 7;
shift_high = 8 - shift_low;
}
info->fbops->fb_imageblit(info, image);
}
static void bit_putcs(struct vc_data *vc, struct fb_info *info,
const unsigned short *s, int count, int yy, int xx,
int fg, int bg)
{
struct fb_image image;
u32 width = DIV_ROUND_UP(vc->vc_font.width, 8);
u32 cellsize = width * vc->vc_font.height;
u32 maxcnt = info->pixmap.size/cellsize;
u32 scan_align = info->pixmap.scan_align - 1;
u32 buf_align = info->pixmap.buf_align - 1;
u32 mod = vc->vc_font.width % 8, cnt, pitch, size;
u32 attribute = get_attribute(info, scr_readw(s));
u8 *dst, *buf = NULL;
image.fg_color = fg;
image.bg_color = bg;
image.dx = xx * vc->vc_font.width;
image.dy = yy * vc->vc_font.height;
image.height = vc->vc_font.height;
image.depth = 1;
if (attribute) {
buf = kmalloc(cellsize, GFP_KERNEL);
if (!buf)
return;
}
while (count) {
if (count > maxcnt)
cnt = maxcnt;
else
cnt = count;
image.width = vc->vc_font.width * cnt;
pitch = DIV_ROUND_UP(image.width, 8) + scan_align;
pitch &= ~scan_align;
size = pitch * image.height + buf_align;
size &= ~buf_align;
dst = fb_get_buffer_offset(info, &info->pixmap, size);
image.data = dst;
if (!mod)
bit_putcs_aligned(vc, info, s, attribute, cnt, pitch,
width, cellsize, &image, buf, dst);
else
bit_putcs_unaligned(vc, info, s, attribute, cnt,
pitch, width, cellsize, &image,
buf, dst);
image.dx += cnt * vc->vc_font.width;
count -= cnt;
s += cnt;
}
/* buf is always NULL except when in monochrome mode, so in this case
it's a gain to check buf against NULL even though kfree() handles
NULL pointers just fine */
if (unlikely(buf))
kfree(buf);
}
static void bit_clear_margins(struct vc_data *vc, struct fb_info *info,
int bottom_only)
{
int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
unsigned int cw = vc->vc_font.width;
unsigned int ch = vc->vc_font.height;
unsigned int rw = info->var.xres - (vc->vc_cols*cw);
unsigned int bh = info->var.yres - (vc->vc_rows*ch);
unsigned int rs = info->var.xres - rw;
unsigned int bs = info->var.yres - bh;
struct fb_fillrect region;
region.color = attr_bgcol_ec(bgshift, vc, info);
region.rop = ROP_COPY;
if (rw && !bottom_only) {
region.dx = info->var.xoffset + rs;
region.dy = 0;
region.width = rw;
region.height = info->var.yres_virtual;
info->fbops->fb_fillrect(info, ®ion);
}
if (bh) {
region.dx = info->var.xoffset;
region.dy = info->var.yoffset + bs;
region.width = rs;
region.height = bh;
info->fbops->fb_fillrect(info, ®ion);
}
}
static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode,
int softback_lines, int fg, int bg)
{
struct fb_cursor cursor;
struct fbcon_ops *ops = info->fbcon_par;
unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
int w = DIV_ROUND_UP(vc->vc_font.width, 8), c;
int y = real_y(ops->p, vc->vc_y);
int attribute, use_sw = (vc->vc_cursor_type & 0x10);
int err = 1;
char *src;
cursor.set = 0;
if (softback_lines) {
if (y + softback_lines >= vc->vc_rows) {
mode = CM_ERASE;
ops->cursor_flash = 0;
return;
} else
y += softback_lines;
}
c = scr_readw((u16 *) vc->vc_pos);
attribute = get_attribute(info, c);
src = vc->vc_font.data + ((c & charmask) * (w * vc->vc_font.height));
if (ops->cursor_state.image.data != src ||
ops->cursor_reset) {
ops->cursor_state.image.data = src;
cursor.set |= FB_CUR_SETIMAGE;
}
if (attribute) {
u8 *dst;
dst = kmalloc(w * vc->vc_font.height, GFP_ATOMIC);
if (!dst)
return;
kfree(ops->cursor_data);
ops->cursor_data = dst;
update_attr(dst, src, attribute, vc);
src = dst;
}
if (ops->cursor_state.image.fg_color != fg ||
ops->cursor_state.image.bg_color != bg ||
ops->cursor_reset) {
ops->cursor_state.image.fg_color = fg;
ops->cursor_state.image.bg_color = bg;
cursor.set |= FB_CUR_SETCMAP;
}
if ((ops->cursor_state.image.dx != (vc->vc_font.width * vc->vc_x)) ||
(ops->cursor_state.image.dy != (vc->vc_font.height * y)) ||
ops->cursor_reset) {
ops->cursor_state.image.dx = vc->vc_font.width * vc->vc_x;
ops->cursor_state.image.dy = vc->vc_font.height * y;
cursor.set |= FB_CUR_SETPOS;
}
if (ops->cursor_state.image.height != vc->vc_font.height ||
ops->cursor_state.image.width != vc->vc_font.width ||
ops->cursor_reset) {
ops->cursor_state.image.height = vc->vc_font.height;
ops->cursor_state.image.width = vc->vc_font.width;
cursor.set |= FB_CUR_SETSIZE;
}
if (ops->cursor_state.hot.x || ops->cursor_state.hot.y ||
ops->cursor_reset) {
ops->cursor_state.hot.x = cursor.hot.y = 0;
cursor.set |= FB_CUR_SETHOT;
}
if (cursor.set & FB_CUR_SETSIZE ||
vc->vc_cursor_type != ops->p->cursor_shape ||
ops->cursor_state.mask == NULL ||
ops->cursor_reset) {
char *mask = kmalloc(w*vc->vc_font.height, GFP_ATOMIC);
int cur_height, size, i = 0;
u8 msk = 0xff;
if (!mask)
return;
kfree(ops->cursor_state.mask);
ops->cursor_state.mask = mask;
ops->p->cursor_shape = vc->vc_cursor_type;
cursor.set |= FB_CUR_SETSHAPE;
switch (ops->p->cursor_shape & CUR_HWMASK) {
case CUR_NONE:
cur_height = 0;
break;
case CUR_UNDERLINE:
cur_height = (vc->vc_font.height < 10) ? 1 : 2;
break;
case CUR_LOWER_THIRD:
cur_height = vc->vc_font.height/3;
break;
case CUR_LOWER_HALF:
cur_height = vc->vc_font.height >> 1;
break;
case CUR_TWO_THIRDS:
cur_height = (vc->vc_font.height << 1)/3;
break;
case CUR_BLOCK:
default:
cur_height = vc->vc_font.height;
break;
}
size = (vc->vc_font.height - cur_height) * w;
while (size--)
mask[i++] = ~msk;
size = cur_height * w;
while (size--)
mask[i++] = msk;
}
switch (mode) {
case CM_ERASE:
ops->cursor_state.enable = 0;
break;
case CM_DRAW:
case CM_MOVE:
default:
ops->cursor_state.enable = (use_sw) ? 0 : 1;
break;
}
cursor.image.data = src;
cursor.image.fg_color = ops->cursor_state.image.fg_color;
cursor.image.bg_color = ops->cursor_state.image.bg_color;
cursor.image.dx = ops->cursor_state.image.dx;
cursor.image.dy = ops->cursor_state.image.dy;
cursor.image.height = ops->cursor_state.image.height;
cursor.image.width = ops->cursor_state.image.width;
cursor.hot.x = ops->cursor_state.hot.x;
cursor.hot.y = ops->cursor_state.hot.y;
cursor.mask = ops->cursor_state.mask;
cursor.enable = ops->cursor_state.enable;
cursor.image.depth = 1;
cursor.rop = ROP_XOR;
if (info->fbops->fb_cursor)
err = info->fbops->fb_cursor(info, &cursor);
if (err)
soft_cursor(info, &cursor);
ops->cursor_reset = 0;
}
static int bit_update_start(struct fb_info *info)
{
struct fbcon_ops *ops = info->fbcon_par;
int err;
err = fb_pan_display(info, &ops->var);
ops->var.xoffset = info->var.xoffset;
ops->var.yoffset = info->var.yoffset;
ops->var.vmode = info->var.vmode;
return err;
}
void fbcon_set_bitops(struct fbcon_ops *ops)
{
ops->bmove = bit_bmove;
ops->clear = bit_clear;
ops->putcs = bit_putcs;
ops->clear_margins = bit_clear_margins;
ops->cursor = bit_cursor;
ops->update_start = bit_update_start;
ops->rotate_font = NULL;
if (ops->rotate)
fbcon_set_rotate(ops);
}
EXPORT_SYMBOL(fbcon_set_bitops);
MODULE_AUTHOR("Antonino Daplas <adaplas@pol.net>");
MODULE_DESCRIPTION("Bit Blitting Operation");
MODULE_LICENSE("GPL");
| gpl-2.0 |
ShinySide/HispAsian_5.1.1 | drivers/gpu/drm/nouveau/nv17_tv_modes.c | 8164 | 21909 | /*
* Copyright (C) 2009 Francisco Jerez.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "drmP.h"
#include "drm_crtc_helper.h"
#include "nouveau_drv.h"
#include "nouveau_encoder.h"
#include "nouveau_crtc.h"
#include "nouveau_hw.h"
#include "nv17_tv.h"
char *nv17_tv_norm_names[NUM_TV_NORMS] = {
[TV_NORM_PAL] = "PAL",
[TV_NORM_PAL_M] = "PAL-M",
[TV_NORM_PAL_N] = "PAL-N",
[TV_NORM_PAL_NC] = "PAL-Nc",
[TV_NORM_NTSC_M] = "NTSC-M",
[TV_NORM_NTSC_J] = "NTSC-J",
[TV_NORM_HD480I] = "hd480i",
[TV_NORM_HD480P] = "hd480p",
[TV_NORM_HD576I] = "hd576i",
[TV_NORM_HD576P] = "hd576p",
[TV_NORM_HD720P] = "hd720p",
[TV_NORM_HD1080I] = "hd1080i"
};
/* TV standard specific parameters */
struct nv17_tv_norm_params nv17_tv_norms[NUM_TV_NORMS] = {
[TV_NORM_PAL] = { TV_ENC_MODE, {
.tv_enc_mode = { 720, 576, 50000, {
0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18,
0x7e, 0x40, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3,
0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3,
0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5,
0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0
} } } },
[TV_NORM_PAL_M] = { TV_ENC_MODE, {
.tv_enc_mode = { 720, 480, 59940, {
0x21, 0xe6, 0xef, 0xe3, 0x0, 0x0, 0xb, 0x18,
0x7e, 0x44, 0x76, 0x32, 0x25, 0x0, 0x3c, 0x0,
0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5,
0x0, 0x18, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
0x0, 0xb4, 0x0, 0x15, 0x40, 0x10, 0x0, 0x9c,
0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
} } } },
[TV_NORM_PAL_N] = { TV_ENC_MODE, {
.tv_enc_mode = { 720, 576, 50000, {
0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18,
0x7e, 0x40, 0x8a, 0x32, 0x25, 0x0, 0x3c, 0x0,
0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5,
0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
0xbd, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
} } } },
[TV_NORM_PAL_NC] = { TV_ENC_MODE, {
.tv_enc_mode = { 720, 576, 50000, {
0x21, 0xf6, 0x94, 0x46, 0x0, 0x0, 0xb, 0x18,
0x7e, 0x44, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3,
0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3,
0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5,
0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0
} } } },
[TV_NORM_NTSC_M] = { TV_ENC_MODE, {
.tv_enc_mode = { 720, 480, 59940, {
0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18,
0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x3c, 0x0,
0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5,
0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0x9c,
0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
} } } },
[TV_NORM_NTSC_J] = { TV_ENC_MODE, {
.tv_enc_mode = { 720, 480, 59940, {
0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18,
0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x32, 0x0,
0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
0xcf, 0x4, 0xcf, 0x1, 0x2, 0x0, 0xa, 0x5,
0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0xa4,
0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
} } } },
[TV_NORM_HD480I] = { TV_ENC_MODE, {
.tv_enc_mode = { 720, 480, 59940, {
0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18,
0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x32, 0x0,
0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
0xcf, 0x4, 0xcf, 0x1, 0x2, 0x0, 0xa, 0x5,
0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0xa4,
0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
} } } },
[TV_NORM_HD576I] = { TV_ENC_MODE, {
.tv_enc_mode = { 720, 576, 50000, {
0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18,
0x7e, 0x40, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3,
0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3,
0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5,
0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0
} } } },
[TV_NORM_HD480P] = { CTV_ENC_MODE, {
.ctv_enc_mode = {
.mode = { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000,
720, 735, 743, 858, 0, 480, 490, 494, 525, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
.ctv_regs = { 0x3540000, 0x0, 0x0, 0x314,
0x354003a, 0x40000, 0x6f0344, 0x18100000,
0x10160004, 0x10060005, 0x1006000c, 0x10060020,
0x10060021, 0x140e0022, 0x10060202, 0x1802020a,
0x1810020b, 0x10000fff, 0x10000fff, 0x10000fff,
0x10000fff, 0x10000fff, 0x10000fff, 0x70,
0x3ff0000, 0x57, 0x2e001e, 0x258012c,
0xa0aa04ec, 0x30, 0x80960019, 0x12c0300,
0x2019, 0x600, 0x32060019, 0x0, 0x0, 0x400
} } } },
[TV_NORM_HD576P] = { CTV_ENC_MODE, {
.ctv_enc_mode = {
.mode = { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000,
720, 730, 738, 864, 0, 576, 581, 585, 625, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
.ctv_regs = { 0x3540000, 0x0, 0x0, 0x314,
0x354003a, 0x40000, 0x6f0344, 0x18100000,
0x10060001, 0x10060009, 0x10060026, 0x10060027,
0x140e0028, 0x10060268, 0x1810026d, 0x10000fff,
0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff,
0x10000fff, 0x10000fff, 0x10000fff, 0x69,
0x3ff0000, 0x57, 0x2e001e, 0x258012c,
0xa0aa04ec, 0x30, 0x80960019, 0x12c0300,
0x2019, 0x600, 0x32060019, 0x0, 0x0, 0x400
} } } },
[TV_NORM_HD720P] = { CTV_ENC_MODE, {
.ctv_enc_mode = {
.mode = { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250,
1280, 1349, 1357, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
.ctv_regs = { 0x1260394, 0x0, 0x0, 0x622,
0x66b0021, 0x6004a, 0x1210626, 0x8170000,
0x70004, 0x70016, 0x70017, 0x40f0018,
0x702e8, 0x81702ed, 0xfff, 0xfff,
0xfff, 0xfff, 0xfff, 0xfff,
0xfff, 0xfff, 0xfff, 0x0,
0x2e40001, 0x58, 0x2e001e, 0x258012c,
0xa0aa04ec, 0x30, 0x810c0039, 0x12c0300,
0xc0002039, 0x600, 0x32060039, 0x0, 0x0, 0x0
} } } },
[TV_NORM_HD1080I] = { CTV_ENC_MODE, {
.ctv_enc_mode = {
.mode = { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250,
1920, 1961, 2049, 2200, 0, 1080, 1084, 1088, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC
| DRM_MODE_FLAG_INTERLACE) },
.ctv_regs = { 0xac0420, 0x44c0478, 0x4a4, 0x4fc0868,
0x8940028, 0x60054, 0xe80870, 0xbf70000,
0xbc70004, 0x70005, 0x70012, 0x70013,
0x40f0014, 0x70230, 0xbf70232, 0xbf70233,
0x1c70237, 0x70238, 0x70244, 0x70245,
0x40f0246, 0x70462, 0x1f70464, 0x0,
0x2e40001, 0x58, 0x2e001e, 0x258012c,
0xa0aa04ec, 0x30, 0x815f004c, 0x12c0300,
0xc000204c, 0x600, 0x3206004c, 0x0, 0x0, 0x0
} } } }
};
/*
* The following is some guesswork on how the TV encoder flicker
* filter/rescaler works:
*
* It seems to use some sort of resampling filter, it is controlled
* through the registers at NV_PTV_HFILTER and NV_PTV_VFILTER, they
* control the horizontal and vertical stage respectively, there is
* also NV_PTV_HFILTER2 the blob fills identically to NV_PTV_HFILTER,
* but they seem to do nothing. A rough guess might be that they could
* be used to independently control the filtering of each interlaced
* field, but I don't know how they are enabled. The whole filtering
* process seems to be disabled with bits 26:27 of PTV_200, but we
* aren't doing that.
*
* The layout of both register sets is the same:
*
* A: [BASE+0x18]...[BASE+0x0] [BASE+0x58]..[BASE+0x40]
* B: [BASE+0x34]...[BASE+0x1c] [BASE+0x74]..[BASE+0x5c]
*
* Each coefficient is stored in bits [31],[15:9] in two's complement
* format. They seem to be some kind of weights used in a low-pass
* filter. Both A and B coefficients are applied to the 14 nearest
* samples on each side (Listed from nearest to furthermost. They
* roughly cover 2 framebuffer pixels on each side). They are
* probably multiplied with some more hardwired weights before being
* used: B-coefficients are applied the same on both sides,
* A-coefficients are inverted before being applied to the opposite
* side.
*
* After all the hassle, I got the following formula by empirical
* means...
*/
#define calc_overscan(o) interpolate(0x100, 0xe1, 0xc1, o)
#define id1 (1LL << 8)
#define id2 (1LL << 16)
#define id3 (1LL << 24)
#define id4 (1LL << 32)
#define id5 (1LL << 48)
static struct filter_params{
int64_t k1;
int64_t ki;
int64_t ki2;
int64_t ki3;
int64_t kr;
int64_t kir;
int64_t ki2r;
int64_t ki3r;
int64_t kf;
int64_t kif;
int64_t ki2f;
int64_t ki3f;
int64_t krf;
int64_t kirf;
int64_t ki2rf;
int64_t ki3rf;
} fparams[2][4] = {
/* Horizontal filter parameters */
{
{64.311690 * id5, -39.516924 * id5, 6.586143 * id5, 0.000002 * id5,
0.051285 * id4, 26.168746 * id4, -4.361449 * id4, -0.000001 * id4,
9.308169 * id3, 78.180965 * id3, -13.030158 * id3, -0.000001 * id3,
-8.801540 * id1, -46.572890 * id1, 7.762145 * id1, -0.000000 * id1},
{-44.565569 * id5, -68.081246 * id5, 39.812074 * id5, -4.009316 * id5,
29.832207 * id4, 50.047322 * id4, -25.380017 * id4, 2.546422 * id4,
104.605622 * id3, 141.908641 * id3, -74.322319 * id3, 7.484316 * id3,
-37.081621 * id1, -90.397510 * id1, 42.784229 * id1, -4.289952 * id1},
{-56.793244 * id5, 31.153584 * id5, -5.192247 * id5, -0.000003 * id5,
33.541131 * id4, -34.149302 * id4, 5.691537 * id4, 0.000002 * id4,
87.196610 * id3, -88.995169 * id3, 14.832456 * id3, 0.000012 * id3,
17.288138 * id1, 71.864786 * id1, -11.977408 * id1, -0.000009 * id1},
{51.787796 * id5, 21.211771 * id5, -18.993730 * id5, 1.853310 * id5,
-41.470726 * id4, -17.775823 * id4, 13.057821 * id4, -1.15823 * id4,
-154.235673 * id3, -44.878641 * id3, 40.656077 * id3, -3.695595 * id3,
112.201065 * id1, 39.992155 * id1, -25.155714 * id1, 2.113984 * id1},
},
/* Vertical filter parameters */
{
{67.601979 * id5, 0.428319 * id5, -0.071318 * id5, -0.000012 * id5,
-3.402339 * id4, 0.000209 * id4, -0.000092 * id4, 0.000010 * id4,
-9.180996 * id3, 6.111270 * id3, -1.024457 * id3, 0.001043 * id3,
6.060315 * id1, -0.017425 * id1, 0.007830 * id1, -0.000869 * id1},
{6.755647 * id5, 5.841348 * id5, 1.469734 * id5, -0.149656 * id5,
8.293120 * id4, -1.192888 * id4, -0.947652 * id4, 0.094507 * id4,
37.526655 * id3, 10.257875 * id3, -10.823275 * id3, 1.081497 * id3,
-2.361928 * id1, -2.059432 * id1, 1.840671 * id1, -0.168100 * id1},
{-14.780391 * id5, -16.042148 * id5, 2.673692 * id5, -0.000000 * id5,
39.541978 * id4, 5.680053 * id4, -0.946676 * id4, 0.000000 * id4,
152.994486 * id3, 12.625439 * id3, -2.119579 * id3, 0.002708 * id3,
-38.125089 * id1, -0.855880 * id1, 0.155359 * id1, -0.002245 * id1},
{-27.476193 * id5, -1.454976 * id5, 1.286557 * id5, 0.025346 * id5,
20.687300 * id4, 3.014003 * id4, -0.557786 * id4, -0.01311 * id4,
60.008737 * id3, -0.738273 * id3, 5.408217 * id3, -0.796798 * id3,
-17.296835 * id1, 4.438577 * id1, -2.809420 * id1, 0.385491 * id1},
}
};
static void tv_setup_filter(struct drm_encoder *encoder)
{
struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
struct drm_display_mode *mode = &encoder->crtc->mode;
uint32_t (*filters[])[4][7] = {&tv_enc->state.hfilter,
&tv_enc->state.vfilter};
int i, j, k;
int32_t overscan = calc_overscan(tv_enc->overscan);
int64_t flicker = (tv_enc->flicker - 50) * (id3 / 100);
uint64_t rs[] = {mode->hdisplay * id3,
mode->vdisplay * id3};
do_div(rs[0], overscan * tv_norm->tv_enc_mode.hdisplay);
do_div(rs[1], overscan * tv_norm->tv_enc_mode.vdisplay);
for (k = 0; k < 2; k++) {
rs[k] = max((int64_t)rs[k], id2);
for (j = 0; j < 4; j++) {
struct filter_params *p = &fparams[k][j];
for (i = 0; i < 7; i++) {
int64_t c = (p->k1 + p->ki*i + p->ki2*i*i +
p->ki3*i*i*i)
+ (p->kr + p->kir*i + p->ki2r*i*i +
p->ki3r*i*i*i) * rs[k]
+ (p->kf + p->kif*i + p->ki2f*i*i +
p->ki3f*i*i*i) * flicker
+ (p->krf + p->kirf*i + p->ki2rf*i*i +
p->ki3rf*i*i*i) * flicker * rs[k];
(*filters[k])[j][i] = (c + id5/2) >> 39
& (0x1 << 31 | 0x7f << 9);
}
}
}
}
/* Hardware state saving/restoring */
static void tv_save_filter(struct drm_device *dev, uint32_t base,
uint32_t regs[4][7])
{
int i, j;
uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c };
for (i = 0; i < 4; i++) {
for (j = 0; j < 7; j++)
regs[i][j] = nv_read_ptv(dev, offsets[i]+4*j);
}
}
static void tv_load_filter(struct drm_device *dev, uint32_t base,
uint32_t regs[4][7])
{
int i, j;
uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c };
for (i = 0; i < 4; i++) {
for (j = 0; j < 7; j++)
nv_write_ptv(dev, offsets[i]+4*j, regs[i][j]);
}
}
void nv17_tv_state_save(struct drm_device *dev, struct nv17_tv_state *state)
{
int i;
for (i = 0; i < 0x40; i++)
state->tv_enc[i] = nv_read_tv_enc(dev, i);
tv_save_filter(dev, NV_PTV_HFILTER, state->hfilter);
tv_save_filter(dev, NV_PTV_HFILTER2, state->hfilter2);
tv_save_filter(dev, NV_PTV_VFILTER, state->vfilter);
nv_save_ptv(dev, state, 200);
nv_save_ptv(dev, state, 204);
nv_save_ptv(dev, state, 208);
nv_save_ptv(dev, state, 20c);
nv_save_ptv(dev, state, 304);
nv_save_ptv(dev, state, 500);
nv_save_ptv(dev, state, 504);
nv_save_ptv(dev, state, 508);
nv_save_ptv(dev, state, 600);
nv_save_ptv(dev, state, 604);
nv_save_ptv(dev, state, 608);
nv_save_ptv(dev, state, 60c);
nv_save_ptv(dev, state, 610);
nv_save_ptv(dev, state, 614);
}
void nv17_tv_state_load(struct drm_device *dev, struct nv17_tv_state *state)
{
int i;
for (i = 0; i < 0x40; i++)
nv_write_tv_enc(dev, i, state->tv_enc[i]);
tv_load_filter(dev, NV_PTV_HFILTER, state->hfilter);
tv_load_filter(dev, NV_PTV_HFILTER2, state->hfilter2);
tv_load_filter(dev, NV_PTV_VFILTER, state->vfilter);
nv_load_ptv(dev, state, 200);
nv_load_ptv(dev, state, 204);
nv_load_ptv(dev, state, 208);
nv_load_ptv(dev, state, 20c);
nv_load_ptv(dev, state, 304);
nv_load_ptv(dev, state, 500);
nv_load_ptv(dev, state, 504);
nv_load_ptv(dev, state, 508);
nv_load_ptv(dev, state, 600);
nv_load_ptv(dev, state, 604);
nv_load_ptv(dev, state, 608);
nv_load_ptv(dev, state, 60c);
nv_load_ptv(dev, state, 610);
nv_load_ptv(dev, state, 614);
/* This is required for some settings to kick in. */
nv_write_tv_enc(dev, 0x3e, 1);
nv_write_tv_enc(dev, 0x3e, 0);
}
/* Timings similar to the ones the blob sets */
const struct drm_display_mode nv17_tv_modes[] = {
{ DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 0,
320, 344, 392, 560, 0, 200, 200, 202, 220, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC
| DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) },
{ DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 0,
320, 344, 392, 560, 0, 240, 240, 246, 263, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC
| DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) },
{ DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 0,
400, 432, 496, 640, 0, 300, 300, 303, 314, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC
| DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) },
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 0,
640, 672, 768, 880, 0, 480, 480, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 0,
720, 752, 872, 960, 0, 480, 480, 493, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 0,
720, 776, 856, 960, 0, 576, 576, 588, 597, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 0,
800, 840, 920, 1040, 0, 600, 600, 604, 618, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 0,
1024, 1064, 1200, 1344, 0, 768, 768, 777, 806, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
{}
};
void nv17_tv_update_properties(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
struct nv17_tv_state *regs = &tv_enc->state;
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
int subconnector = tv_enc->select_subconnector ?
tv_enc->select_subconnector :
tv_enc->subconnector;
switch (subconnector) {
case DRM_MODE_SUBCONNECTOR_Composite:
{
regs->ptv_204 = 0x2;
/* The composite connector may be found on either pin. */
if (tv_enc->pin_mask & 0x4)
regs->ptv_204 |= 0x010000;
else if (tv_enc->pin_mask & 0x2)
regs->ptv_204 |= 0x100000;
else
regs->ptv_204 |= 0x110000;
regs->tv_enc[0x7] = 0x10;
break;
}
case DRM_MODE_SUBCONNECTOR_SVIDEO:
regs->ptv_204 = 0x11012;
regs->tv_enc[0x7] = 0x18;
break;
case DRM_MODE_SUBCONNECTOR_Component:
regs->ptv_204 = 0x111333;
regs->tv_enc[0x7] = 0x14;
break;
case DRM_MODE_SUBCONNECTOR_SCART:
regs->ptv_204 = 0x111012;
regs->tv_enc[0x7] = 0x18;
break;
}
regs->tv_enc[0x20] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x20],
255, tv_enc->saturation);
regs->tv_enc[0x22] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x22],
255, tv_enc->saturation);
regs->tv_enc[0x25] = tv_enc->hue * 255 / 100;
nv_load_ptv(dev, regs, 204);
nv_load_tv_enc(dev, regs, 7);
nv_load_tv_enc(dev, regs, 20);
nv_load_tv_enc(dev, regs, 22);
nv_load_tv_enc(dev, regs, 25);
}
void nv17_tv_update_rescaler(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
struct nv17_tv_state *regs = &tv_enc->state;
regs->ptv_208 = 0x40 | (calc_overscan(tv_enc->overscan) << 8);
tv_setup_filter(encoder);
nv_load_ptv(dev, regs, 208);
tv_load_filter(dev, NV_PTV_HFILTER, regs->hfilter);
tv_load_filter(dev, NV_PTV_HFILTER2, regs->hfilter2);
tv_load_filter(dev, NV_PTV_VFILTER, regs->vfilter);
}
void nv17_ctv_update_rescaler(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
int head = nouveau_crtc(encoder->crtc)->index;
struct nv04_crtc_reg *regs = &dev_priv->mode_reg.crtc_reg[head];
struct drm_display_mode *crtc_mode = &encoder->crtc->mode;
struct drm_display_mode *output_mode =
&get_tv_norm(encoder)->ctv_enc_mode.mode;
int overscan, hmargin, vmargin, hratio, vratio;
/* The rescaler doesn't do the right thing for interlaced modes. */
if (output_mode->flags & DRM_MODE_FLAG_INTERLACE)
overscan = 100;
else
overscan = tv_enc->overscan;
hmargin = (output_mode->hdisplay - crtc_mode->hdisplay) / 2;
vmargin = (output_mode->vdisplay - crtc_mode->vdisplay) / 2;
hmargin = interpolate(0, min(hmargin, output_mode->hdisplay/20),
hmargin, overscan);
vmargin = interpolate(0, min(vmargin, output_mode->vdisplay/20),
vmargin, overscan);
hratio = crtc_mode->hdisplay * 0x800 /
(output_mode->hdisplay - 2*hmargin);
vratio = crtc_mode->vdisplay * 0x800 /
(output_mode->vdisplay - 2*vmargin) & ~3;
regs->fp_horiz_regs[FP_VALID_START] = hmargin;
regs->fp_horiz_regs[FP_VALID_END] = output_mode->hdisplay - hmargin - 1;
regs->fp_vert_regs[FP_VALID_START] = vmargin;
regs->fp_vert_regs[FP_VALID_END] = output_mode->vdisplay - vmargin - 1;
regs->fp_debug_1 = NV_PRAMDAC_FP_DEBUG_1_YSCALE_TESTMODE_ENABLE |
XLATE(vratio, 0, NV_PRAMDAC_FP_DEBUG_1_YSCALE_VALUE) |
NV_PRAMDAC_FP_DEBUG_1_XSCALE_TESTMODE_ENABLE |
XLATE(hratio, 0, NV_PRAMDAC_FP_DEBUG_1_XSCALE_VALUE);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HVALID_START,
regs->fp_horiz_regs[FP_VALID_START]);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HVALID_END,
regs->fp_horiz_regs[FP_VALID_END]);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_VVALID_START,
regs->fp_vert_regs[FP_VALID_START]);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_VVALID_END,
regs->fp_vert_regs[FP_VALID_END]);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1, regs->fp_debug_1);
}
| gpl-2.0 |
keiranFTW/android_kernel_sony_montblanc | net/llc/llc_conn.c | 8932 | 27806 | /*
* llc_conn.c - Driver routines for connection component.
*
* Copyright (c) 1997 by Procom Technology, Inc.
* 2001-2003 by Arnaldo Carvalho de Melo <acme@conectiva.com.br>
*
* This program can be redistributed or modified under the terms of the
* GNU General Public License as published by the Free Software Foundation.
* This program is distributed without any warranty or implied warranty
* of merchantability or fitness for a particular purpose.
*
* See the GNU General Public License for more details.
*/
#include <linux/init.h>
#include <linux/slab.h>
#include <net/llc_sap.h>
#include <net/llc_conn.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <net/llc_c_ev.h>
#include <net/llc_c_ac.h>
#include <net/llc_c_st.h>
#include <net/llc_pdu.h>
#if 0
#define dprintk(args...) printk(KERN_DEBUG args)
#else
#define dprintk(args...)
#endif
static int llc_find_offset(int state, int ev_type);
static void llc_conn_send_pdus(struct sock *sk);
static int llc_conn_service(struct sock *sk, struct sk_buff *skb);
static int llc_exec_conn_trans_actions(struct sock *sk,
struct llc_conn_state_trans *trans,
struct sk_buff *ev);
static struct llc_conn_state_trans *llc_qualify_conn_ev(struct sock *sk,
struct sk_buff *skb);
/* Offset table on connection states transition diagram */
static int llc_offset_table[NBR_CONN_STATES][NBR_CONN_EV];
int sysctl_llc2_ack_timeout = LLC2_ACK_TIME * HZ;
int sysctl_llc2_p_timeout = LLC2_P_TIME * HZ;
int sysctl_llc2_rej_timeout = LLC2_REJ_TIME * HZ;
int sysctl_llc2_busy_timeout = LLC2_BUSY_TIME * HZ;
/**
* llc_conn_state_process - sends event to connection state machine
* @sk: connection
* @skb: occurred event
*
* Sends an event to connection state machine. After processing event
* (executing it's actions and changing state), upper layer will be
* indicated or confirmed, if needed. Returns 0 for success, 1 for
* failure. The socket lock has to be held before calling this function.
*/
int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
{
int rc;
struct llc_sock *llc = llc_sk(skb->sk);
struct llc_conn_state_ev *ev = llc_conn_ev(skb);
/*
* We have to hold the skb, because llc_conn_service will kfree it in
* the sending path and we need to look at the skb->cb, where we encode
* llc_conn_state_ev.
*/
skb_get(skb);
ev->ind_prim = ev->cfm_prim = 0;
/*
* Send event to state machine
*/
rc = llc_conn_service(skb->sk, skb);
if (unlikely(rc != 0)) {
printk(KERN_ERR "%s: llc_conn_service failed\n", __func__);
goto out_kfree_skb;
}
if (unlikely(!ev->ind_prim && !ev->cfm_prim)) {
/* indicate or confirm not required */
if (!skb->next)
goto out_kfree_skb;
goto out_skb_put;
}
if (unlikely(ev->ind_prim && ev->cfm_prim)) /* Paranoia */
skb_get(skb);
switch (ev->ind_prim) {
case LLC_DATA_PRIM:
llc_save_primitive(sk, skb, LLC_DATA_PRIM);
if (unlikely(sock_queue_rcv_skb(sk, skb))) {
/*
* shouldn't happen
*/
printk(KERN_ERR "%s: sock_queue_rcv_skb failed!\n",
__func__);
kfree_skb(skb);
}
break;
case LLC_CONN_PRIM:
/*
* Can't be sock_queue_rcv_skb, because we have to leave the
* skb->sk pointing to the newly created struct sock in
* llc_conn_handler. -acme
*/
skb_queue_tail(&sk->sk_receive_queue, skb);
sk->sk_state_change(sk);
break;
case LLC_DISC_PRIM:
sock_hold(sk);
if (sk->sk_type == SOCK_STREAM &&
sk->sk_state == TCP_ESTABLISHED) {
sk->sk_shutdown = SHUTDOWN_MASK;
sk->sk_socket->state = SS_UNCONNECTED;
sk->sk_state = TCP_CLOSE;
if (!sock_flag(sk, SOCK_DEAD)) {
sock_set_flag(sk, SOCK_DEAD);
sk->sk_state_change(sk);
}
}
kfree_skb(skb);
sock_put(sk);
break;
case LLC_RESET_PRIM:
/*
* FIXME:
* RESET is not being notified to upper layers for now
*/
printk(KERN_INFO "%s: received a reset ind!\n", __func__);
kfree_skb(skb);
break;
default:
if (ev->ind_prim) {
printk(KERN_INFO "%s: received unknown %d prim!\n",
__func__, ev->ind_prim);
kfree_skb(skb);
}
/* No indication */
break;
}
switch (ev->cfm_prim) {
case LLC_DATA_PRIM:
if (!llc_data_accept_state(llc->state))
sk->sk_write_space(sk);
else
rc = llc->failed_data_req = 1;
break;
case LLC_CONN_PRIM:
if (sk->sk_type == SOCK_STREAM &&
sk->sk_state == TCP_SYN_SENT) {
if (ev->status) {
sk->sk_socket->state = SS_UNCONNECTED;
sk->sk_state = TCP_CLOSE;
} else {
sk->sk_socket->state = SS_CONNECTED;
sk->sk_state = TCP_ESTABLISHED;
}
sk->sk_state_change(sk);
}
break;
case LLC_DISC_PRIM:
sock_hold(sk);
if (sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_CLOSING) {
sk->sk_socket->state = SS_UNCONNECTED;
sk->sk_state = TCP_CLOSE;
sk->sk_state_change(sk);
}
sock_put(sk);
break;
case LLC_RESET_PRIM:
/*
* FIXME:
* RESET is not being notified to upper layers for now
*/
printk(KERN_INFO "%s: received a reset conf!\n", __func__);
break;
default:
if (ev->cfm_prim) {
printk(KERN_INFO "%s: received unknown %d prim!\n",
__func__, ev->cfm_prim);
break;
}
goto out_skb_put; /* No confirmation */
}
out_kfree_skb:
kfree_skb(skb);
out_skb_put:
kfree_skb(skb);
return rc;
}
void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb)
{
/* queue PDU to send to MAC layer */
skb_queue_tail(&sk->sk_write_queue, skb);
llc_conn_send_pdus(sk);
}
/**
* llc_conn_rtn_pdu - sends received data pdu to upper layer
* @sk: Active connection
* @skb: Received data frame
*
* Sends received data pdu to upper layer (by using indicate function).
* Prepares service parameters (prim and prim_data). calling indication
* function will be done in llc_conn_state_process.
*/
void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb)
{
struct llc_conn_state_ev *ev = llc_conn_ev(skb);
ev->ind_prim = LLC_DATA_PRIM;
}
/**
* llc_conn_resend_i_pdu_as_cmd - resend all all unacknowledged I PDUs
* @sk: active connection
* @nr: NR
* @first_p_bit: p_bit value of first pdu
*
* Resend all unacknowledged I PDUs, starting with the NR; send first as
* command PDU with P bit equal first_p_bit; if more than one send
* subsequent as command PDUs with P bit equal zero (0).
*/
void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit)
{
struct sk_buff *skb;
struct llc_pdu_sn *pdu;
u16 nbr_unack_pdus;
struct llc_sock *llc;
u8 howmany_resend = 0;
llc_conn_remove_acked_pdus(sk, nr, &nbr_unack_pdus);
if (!nbr_unack_pdus)
goto out;
/*
* Process unack PDUs only if unack queue is not empty; remove
* appropriate PDUs, fix them up, and put them on mac_pdu_q.
*/
llc = llc_sk(sk);
while ((skb = skb_dequeue(&llc->pdu_unack_q)) != NULL) {
pdu = llc_pdu_sn_hdr(skb);
llc_pdu_set_cmd_rsp(skb, LLC_PDU_CMD);
llc_pdu_set_pf_bit(skb, first_p_bit);
skb_queue_tail(&sk->sk_write_queue, skb);
first_p_bit = 0;
llc->vS = LLC_I_GET_NS(pdu);
howmany_resend++;
}
if (howmany_resend > 0)
llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO;
/* any PDUs to re-send are queued up; start sending to MAC */
llc_conn_send_pdus(sk);
out:;
}
/**
* llc_conn_resend_i_pdu_as_rsp - Resend all unacknowledged I PDUs
* @sk: active connection.
* @nr: NR
* @first_f_bit: f_bit value of first pdu.
*
* Resend all unacknowledged I PDUs, starting with the NR; send first as
* response PDU with F bit equal first_f_bit; if more than one send
* subsequent as response PDUs with F bit equal zero (0).
*/
void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit)
{
struct sk_buff *skb;
u16 nbr_unack_pdus;
struct llc_sock *llc = llc_sk(sk);
u8 howmany_resend = 0;
llc_conn_remove_acked_pdus(sk, nr, &nbr_unack_pdus);
if (!nbr_unack_pdus)
goto out;
/*
* Process unack PDUs only if unack queue is not empty; remove
* appropriate PDUs, fix them up, and put them on mac_pdu_q
*/
while ((skb = skb_dequeue(&llc->pdu_unack_q)) != NULL) {
struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
llc_pdu_set_cmd_rsp(skb, LLC_PDU_RSP);
llc_pdu_set_pf_bit(skb, first_f_bit);
skb_queue_tail(&sk->sk_write_queue, skb);
first_f_bit = 0;
llc->vS = LLC_I_GET_NS(pdu);
howmany_resend++;
}
if (howmany_resend > 0)
llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO;
/* any PDUs to re-send are queued up; start sending to MAC */
llc_conn_send_pdus(sk);
out:;
}
/**
* llc_conn_remove_acked_pdus - Removes acknowledged pdus from tx queue
* @sk: active connection
* nr: NR
* how_many_unacked: size of pdu_unack_q after removing acked pdus
*
* Removes acknowledged pdus from transmit queue (pdu_unack_q). Returns
* the number of pdus that removed from queue.
*/
int llc_conn_remove_acked_pdus(struct sock *sk, u8 nr, u16 *how_many_unacked)
{
int pdu_pos, i;
struct sk_buff *skb;
struct llc_pdu_sn *pdu;
int nbr_acked = 0;
struct llc_sock *llc = llc_sk(sk);
int q_len = skb_queue_len(&llc->pdu_unack_q);
if (!q_len)
goto out;
skb = skb_peek(&llc->pdu_unack_q);
pdu = llc_pdu_sn_hdr(skb);
/* finding position of last acked pdu in queue */
pdu_pos = ((int)LLC_2_SEQ_NBR_MODULO + (int)nr -
(int)LLC_I_GET_NS(pdu)) % LLC_2_SEQ_NBR_MODULO;
for (i = 0; i < pdu_pos && i < q_len; i++) {
skb = skb_dequeue(&llc->pdu_unack_q);
kfree_skb(skb);
nbr_acked++;
}
out:
*how_many_unacked = skb_queue_len(&llc->pdu_unack_q);
return nbr_acked;
}
/**
* llc_conn_send_pdus - Sends queued PDUs
* @sk: active connection
*
* Sends queued pdus to MAC layer for transmission.
*/
static void llc_conn_send_pdus(struct sock *sk)
{
struct sk_buff *skb;
while ((skb = skb_dequeue(&sk->sk_write_queue)) != NULL) {
struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
if (LLC_PDU_TYPE_IS_I(pdu) &&
!(skb->dev->flags & IFF_LOOPBACK)) {
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
skb_queue_tail(&llc_sk(sk)->pdu_unack_q, skb);
if (!skb2)
break;
skb = skb2;
}
dev_queue_xmit(skb);
}
}
/**
* llc_conn_service - finds transition and changes state of connection
* @sk: connection
* @skb: happened event
*
* This function finds transition that matches with happened event, then
* executes related actions and finally changes state of connection.
* Returns 0 for success, 1 for failure.
*/
static int llc_conn_service(struct sock *sk, struct sk_buff *skb)
{
int rc = 1;
struct llc_sock *llc = llc_sk(sk);
struct llc_conn_state_trans *trans;
if (llc->state > NBR_CONN_STATES)
goto out;
rc = 0;
trans = llc_qualify_conn_ev(sk, skb);
if (trans) {
rc = llc_exec_conn_trans_actions(sk, trans, skb);
if (!rc && trans->next_state != NO_STATE_CHANGE) {
llc->state = trans->next_state;
if (!llc_data_accept_state(llc->state))
sk->sk_state_change(sk);
}
}
out:
return rc;
}
/**
* llc_qualify_conn_ev - finds transition for event
* @sk: connection
* @skb: happened event
*
* This function finds transition that matches with happened event.
* Returns pointer to found transition on success, %NULL otherwise.
*/
static struct llc_conn_state_trans *llc_qualify_conn_ev(struct sock *sk,
struct sk_buff *skb)
{
struct llc_conn_state_trans **next_trans;
llc_conn_ev_qfyr_t *next_qualifier;
struct llc_conn_state_ev *ev = llc_conn_ev(skb);
struct llc_sock *llc = llc_sk(sk);
struct llc_conn_state *curr_state =
&llc_conn_state_table[llc->state - 1];
/* search thru events for this state until
* list exhausted or until no more
*/
for (next_trans = curr_state->transitions +
llc_find_offset(llc->state - 1, ev->type);
(*next_trans)->ev; next_trans++) {
if (!((*next_trans)->ev)(sk, skb)) {
/* got POSSIBLE event match; the event may require
* qualification based on the values of a number of
* state flags; if all qualifications are met (i.e.,
* if all qualifying functions return success, or 0,
* then this is THE event we're looking for
*/
for (next_qualifier = (*next_trans)->ev_qualifiers;
next_qualifier && *next_qualifier &&
!(*next_qualifier)(sk, skb); next_qualifier++)
/* nothing */;
if (!next_qualifier || !*next_qualifier)
/* all qualifiers executed successfully; this is
* our transition; return it so we can perform
* the associated actions & change the state
*/
return *next_trans;
}
}
return NULL;
}
/**
* llc_exec_conn_trans_actions - executes related actions
* @sk: connection
* @trans: transition that it's actions must be performed
* @skb: event
*
* Executes actions that is related to happened event. Returns 0 for
* success, 1 to indicate failure of at least one action.
*/
static int llc_exec_conn_trans_actions(struct sock *sk,
struct llc_conn_state_trans *trans,
struct sk_buff *skb)
{
int rc = 0;
llc_conn_action_t *next_action;
for (next_action = trans->ev_actions;
next_action && *next_action; next_action++) {
int rc2 = (*next_action)(sk, skb);
if (rc2 == 2) {
rc = rc2;
break;
} else if (rc2)
rc = 1;
}
return rc;
}
static inline bool llc_estab_match(const struct llc_sap *sap,
const struct llc_addr *daddr,
const struct llc_addr *laddr,
const struct sock *sk)
{
struct llc_sock *llc = llc_sk(sk);
return llc->laddr.lsap == laddr->lsap &&
llc->daddr.lsap == daddr->lsap &&
llc_mac_match(llc->laddr.mac, laddr->mac) &&
llc_mac_match(llc->daddr.mac, daddr->mac);
}
/**
* __llc_lookup_established - Finds connection for the remote/local sap/mac
* @sap: SAP
* @daddr: address of remote LLC (MAC + SAP)
* @laddr: address of local LLC (MAC + SAP)
*
* Search connection list of the SAP and finds connection using the remote
* mac, remote sap, local mac, and local sap. Returns pointer for
* connection found, %NULL otherwise.
* Caller has to make sure local_bh is disabled.
*/
static struct sock *__llc_lookup_established(struct llc_sap *sap,
struct llc_addr *daddr,
struct llc_addr *laddr)
{
struct sock *rc;
struct hlist_nulls_node *node;
int slot = llc_sk_laddr_hashfn(sap, laddr);
struct hlist_nulls_head *laddr_hb = &sap->sk_laddr_hash[slot];
rcu_read_lock();
again:
sk_nulls_for_each_rcu(rc, node, laddr_hb) {
if (llc_estab_match(sap, daddr, laddr, rc)) {
/* Extra checks required by SLAB_DESTROY_BY_RCU */
if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
goto again;
if (unlikely(llc_sk(rc)->sap != sap ||
!llc_estab_match(sap, daddr, laddr, rc))) {
sock_put(rc);
continue;
}
goto found;
}
}
rc = NULL;
/*
* if the nulls value we got at the end of this lookup is
* not the expected one, we must restart lookup.
* We probably met an item that was moved to another chain.
*/
if (unlikely(get_nulls_value(node) != slot))
goto again;
found:
rcu_read_unlock();
return rc;
}
struct sock *llc_lookup_established(struct llc_sap *sap,
struct llc_addr *daddr,
struct llc_addr *laddr)
{
struct sock *sk;
local_bh_disable();
sk = __llc_lookup_established(sap, daddr, laddr);
local_bh_enable();
return sk;
}
static inline bool llc_listener_match(const struct llc_sap *sap,
const struct llc_addr *laddr,
const struct sock *sk)
{
struct llc_sock *llc = llc_sk(sk);
return sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN &&
llc->laddr.lsap == laddr->lsap &&
llc_mac_match(llc->laddr.mac, laddr->mac);
}
static struct sock *__llc_lookup_listener(struct llc_sap *sap,
struct llc_addr *laddr)
{
struct sock *rc;
struct hlist_nulls_node *node;
int slot = llc_sk_laddr_hashfn(sap, laddr);
struct hlist_nulls_head *laddr_hb = &sap->sk_laddr_hash[slot];
rcu_read_lock();
again:
sk_nulls_for_each_rcu(rc, node, laddr_hb) {
if (llc_listener_match(sap, laddr, rc)) {
/* Extra checks required by SLAB_DESTROY_BY_RCU */
if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
goto again;
if (unlikely(llc_sk(rc)->sap != sap ||
!llc_listener_match(sap, laddr, rc))) {
sock_put(rc);
continue;
}
goto found;
}
}
rc = NULL;
/*
* if the nulls value we got at the end of this lookup is
* not the expected one, we must restart lookup.
* We probably met an item that was moved to another chain.
*/
if (unlikely(get_nulls_value(node) != slot))
goto again;
found:
rcu_read_unlock();
return rc;
}
/**
* llc_lookup_listener - Finds listener for local MAC + SAP
* @sap: SAP
* @laddr: address of local LLC (MAC + SAP)
*
* Search connection list of the SAP and finds connection listening on
* local mac, and local sap. Returns pointer for parent socket found,
* %NULL otherwise.
* Caller has to make sure local_bh is disabled.
*/
static struct sock *llc_lookup_listener(struct llc_sap *sap,
struct llc_addr *laddr)
{
static struct llc_addr null_addr;
struct sock *rc = __llc_lookup_listener(sap, laddr);
if (!rc)
rc = __llc_lookup_listener(sap, &null_addr);
return rc;
}
static struct sock *__llc_lookup(struct llc_sap *sap,
struct llc_addr *daddr,
struct llc_addr *laddr)
{
struct sock *sk = __llc_lookup_established(sap, daddr, laddr);
return sk ? : llc_lookup_listener(sap, laddr);
}
/**
* llc_data_accept_state - designates if in this state data can be sent.
* @state: state of connection.
*
* Returns 0 if data can be sent, 1 otherwise.
*/
u8 llc_data_accept_state(u8 state)
{
return state != LLC_CONN_STATE_NORMAL && state != LLC_CONN_STATE_BUSY &&
state != LLC_CONN_STATE_REJ;
}
/**
* llc_find_next_offset - finds offset for next category of transitions
* @state: state table.
* @offset: start offset.
*
* Finds offset of next category of transitions in transition table.
* Returns the start index of next category.
*/
static u16 __init llc_find_next_offset(struct llc_conn_state *state, u16 offset)
{
u16 cnt = 0;
struct llc_conn_state_trans **next_trans;
for (next_trans = state->transitions + offset;
(*next_trans)->ev; next_trans++)
++cnt;
return cnt;
}
/**
* llc_build_offset_table - builds offset table of connection
*
* Fills offset table of connection state transition table
* (llc_offset_table).
*/
void __init llc_build_offset_table(void)
{
struct llc_conn_state *curr_state;
int state, ev_type, next_offset;
for (state = 0; state < NBR_CONN_STATES; state++) {
curr_state = &llc_conn_state_table[state];
next_offset = 0;
for (ev_type = 0; ev_type < NBR_CONN_EV; ev_type++) {
llc_offset_table[state][ev_type] = next_offset;
next_offset += llc_find_next_offset(curr_state,
next_offset) + 1;
}
}
}
/**
* llc_find_offset - finds start offset of category of transitions
* @state: state of connection
* @ev_type: type of happened event
*
* Finds start offset of desired category of transitions. Returns the
* desired start offset.
*/
static int llc_find_offset(int state, int ev_type)
{
int rc = 0;
/* at this stage, llc_offset_table[..][2] is not important. it is for
* init_pf_cycle and I don't know what is it.
*/
switch (ev_type) {
case LLC_CONN_EV_TYPE_PRIM:
rc = llc_offset_table[state][0]; break;
case LLC_CONN_EV_TYPE_PDU:
rc = llc_offset_table[state][4]; break;
case LLC_CONN_EV_TYPE_SIMPLE:
rc = llc_offset_table[state][1]; break;
case LLC_CONN_EV_TYPE_P_TMR:
case LLC_CONN_EV_TYPE_ACK_TMR:
case LLC_CONN_EV_TYPE_REJ_TMR:
case LLC_CONN_EV_TYPE_BUSY_TMR:
rc = llc_offset_table[state][3]; break;
}
return rc;
}
/**
* llc_sap_add_socket - adds a socket to a SAP
* @sap: SAP
* @sk: socket
*
* This function adds a socket to the hash tables of a SAP.
*/
void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk)
{
struct llc_sock *llc = llc_sk(sk);
struct hlist_head *dev_hb = llc_sk_dev_hash(sap, llc->dev->ifindex);
struct hlist_nulls_head *laddr_hb = llc_sk_laddr_hash(sap, &llc->laddr);
llc_sap_hold(sap);
llc_sk(sk)->sap = sap;
spin_lock_bh(&sap->sk_lock);
sap->sk_count++;
sk_nulls_add_node_rcu(sk, laddr_hb);
hlist_add_head(&llc->dev_hash_node, dev_hb);
spin_unlock_bh(&sap->sk_lock);
}
/**
* llc_sap_remove_socket - removes a socket from SAP
* @sap: SAP
* @sk: socket
*
* This function removes a connection from the hash tables of a SAP if
* the connection was in this list.
*/
void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk)
{
struct llc_sock *llc = llc_sk(sk);
spin_lock_bh(&sap->sk_lock);
sk_nulls_del_node_init_rcu(sk);
hlist_del(&llc->dev_hash_node);
sap->sk_count--;
spin_unlock_bh(&sap->sk_lock);
llc_sap_put(sap);
}
/**
* llc_conn_rcv - sends received pdus to the connection state machine
* @sk: current connection structure.
* @skb: received frame.
*
* Sends received pdus to the connection state machine.
*/
static int llc_conn_rcv(struct sock* sk, struct sk_buff *skb)
{
struct llc_conn_state_ev *ev = llc_conn_ev(skb);
ev->type = LLC_CONN_EV_TYPE_PDU;
ev->reason = 0;
return llc_conn_state_process(sk, skb);
}
static struct sock *llc_create_incoming_sock(struct sock *sk,
struct net_device *dev,
struct llc_addr *saddr,
struct llc_addr *daddr)
{
struct sock *newsk = llc_sk_alloc(sock_net(sk), sk->sk_family, GFP_ATOMIC,
sk->sk_prot);
struct llc_sock *newllc, *llc = llc_sk(sk);
if (!newsk)
goto out;
newllc = llc_sk(newsk);
memcpy(&newllc->laddr, daddr, sizeof(newllc->laddr));
memcpy(&newllc->daddr, saddr, sizeof(newllc->daddr));
newllc->dev = dev;
dev_hold(dev);
llc_sap_add_socket(llc->sap, newsk);
llc_sap_hold(llc->sap);
out:
return newsk;
}
void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
{
struct llc_addr saddr, daddr;
struct sock *sk;
llc_pdu_decode_sa(skb, saddr.mac);
llc_pdu_decode_ssap(skb, &saddr.lsap);
llc_pdu_decode_da(skb, daddr.mac);
llc_pdu_decode_dsap(skb, &daddr.lsap);
sk = __llc_lookup(sap, &saddr, &daddr);
if (!sk)
goto drop;
bh_lock_sock(sk);
/*
* This has to be done here and not at the upper layer ->accept
* method because of the way the PROCOM state machine works:
* it needs to set several state variables (see, for instance,
* llc_adm_actions_2 in net/llc/llc_c_st.c) and send a packet to
* the originator of the new connection, and this state has to be
* in the newly created struct sock private area. -acme
*/
if (unlikely(sk->sk_state == TCP_LISTEN)) {
struct sock *newsk = llc_create_incoming_sock(sk, skb->dev,
&saddr, &daddr);
if (!newsk)
goto drop_unlock;
skb_set_owner_r(skb, newsk);
} else {
/*
* Can't be skb_set_owner_r, this will be done at the
* llc_conn_state_process function, later on, when we will use
* skb_queue_rcv_skb to send it to upper layers, this is
* another trick required to cope with how the PROCOM state
* machine works. -acme
*/
skb->sk = sk;
}
if (!sock_owned_by_user(sk))
llc_conn_rcv(sk, skb);
else {
dprintk("%s: adding to backlog...\n", __func__);
llc_set_backlog_type(skb, LLC_PACKET);
if (sk_add_backlog(sk, skb))
goto drop_unlock;
}
out:
bh_unlock_sock(sk);
sock_put(sk);
return;
drop:
kfree_skb(skb);
return;
drop_unlock:
kfree_skb(skb);
goto out;
}
#undef LLC_REFCNT_DEBUG
#ifdef LLC_REFCNT_DEBUG
static atomic_t llc_sock_nr;
#endif
/**
* llc_backlog_rcv - Processes rx frames and expired timers.
* @sk: LLC sock (p8022 connection)
* @skb: queued rx frame or event
*
* This function processes frames that has received and timers that has
* expired during sending an I pdu (refer to data_req_handler). frames
* queue by llc_rcv function (llc_mac.c) and timers queue by timer
* callback functions(llc_c_ac.c).
*/
static int llc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
int rc = 0;
struct llc_sock *llc = llc_sk(sk);
if (likely(llc_backlog_type(skb) == LLC_PACKET)) {
if (likely(llc->state > 1)) /* not closed */
rc = llc_conn_rcv(sk, skb);
else
goto out_kfree_skb;
} else if (llc_backlog_type(skb) == LLC_EVENT) {
/* timer expiration event */
if (likely(llc->state > 1)) /* not closed */
rc = llc_conn_state_process(sk, skb);
else
goto out_kfree_skb;
} else {
printk(KERN_ERR "%s: invalid skb in backlog\n", __func__);
goto out_kfree_skb;
}
out:
return rc;
out_kfree_skb:
kfree_skb(skb);
goto out;
}
/**
* llc_sk_init - Initializes a socket with default llc values.
* @sk: socket to initialize.
*
* Initializes a socket with default llc values.
*/
static void llc_sk_init(struct sock* sk)
{
struct llc_sock *llc = llc_sk(sk);
llc->state = LLC_CONN_STATE_ADM;
llc->inc_cntr = llc->dec_cntr = 2;
llc->dec_step = llc->connect_step = 1;
setup_timer(&llc->ack_timer.timer, llc_conn_ack_tmr_cb,
(unsigned long)sk);
llc->ack_timer.expire = sysctl_llc2_ack_timeout;
setup_timer(&llc->pf_cycle_timer.timer, llc_conn_pf_cycle_tmr_cb,
(unsigned long)sk);
llc->pf_cycle_timer.expire = sysctl_llc2_p_timeout;
setup_timer(&llc->rej_sent_timer.timer, llc_conn_rej_tmr_cb,
(unsigned long)sk);
llc->rej_sent_timer.expire = sysctl_llc2_rej_timeout;
setup_timer(&llc->busy_state_timer.timer, llc_conn_busy_tmr_cb,
(unsigned long)sk);
llc->busy_state_timer.expire = sysctl_llc2_busy_timeout;
llc->n2 = 2; /* max retransmit */
llc->k = 2; /* tx win size, will adjust dynam */
llc->rw = 128; /* rx win size (opt and equal to
* tx_win of remote LLC) */
skb_queue_head_init(&llc->pdu_unack_q);
sk->sk_backlog_rcv = llc_backlog_rcv;
}
/**
* llc_sk_alloc - Allocates LLC sock
* @family: upper layer protocol family
* @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
*
* Allocates a LLC sock and initializes it. Returns the new LLC sock
* or %NULL if there's no memory available for one
*/
struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority, struct proto *prot)
{
struct sock *sk = sk_alloc(net, family, priority, prot);
if (!sk)
goto out;
llc_sk_init(sk);
sock_init_data(NULL, sk);
#ifdef LLC_REFCNT_DEBUG
atomic_inc(&llc_sock_nr);
printk(KERN_DEBUG "LLC socket %p created in %s, now we have %d alive\n", sk,
__func__, atomic_read(&llc_sock_nr));
#endif
out:
return sk;
}
/**
* llc_sk_free - Frees a LLC socket
* @sk - socket to free
*
* Frees a LLC socket
*/
void llc_sk_free(struct sock *sk)
{
struct llc_sock *llc = llc_sk(sk);
llc->state = LLC_CONN_OUT_OF_SVC;
/* Stop all (possibly) running timers */
llc_conn_ac_stop_all_timers(sk, NULL);
#ifdef DEBUG_LLC_CONN_ALLOC
printk(KERN_INFO "%s: unackq=%d, txq=%d\n", __func__,
skb_queue_len(&llc->pdu_unack_q),
skb_queue_len(&sk->sk_write_queue));
#endif
skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&sk->sk_write_queue);
skb_queue_purge(&llc->pdu_unack_q);
#ifdef LLC_REFCNT_DEBUG
if (atomic_read(&sk->sk_refcnt) != 1) {
printk(KERN_DEBUG "Destruction of LLC sock %p delayed in %s, cnt=%d\n",
sk, __func__, atomic_read(&sk->sk_refcnt));
printk(KERN_DEBUG "%d LLC sockets are still alive\n",
atomic_read(&llc_sock_nr));
} else {
atomic_dec(&llc_sock_nr);
printk(KERN_DEBUG "LLC socket %p released in %s, %d are still alive\n", sk,
__func__, atomic_read(&llc_sock_nr));
}
#endif
sock_put(sk);
}
/**
* llc_sk_reset - resets a connection
* @sk: LLC socket to reset
*
* Resets a connection to the out of service state. Stops its timers
* and frees any frames in the queues of the connection.
*/
void llc_sk_reset(struct sock *sk)
{
struct llc_sock *llc = llc_sk(sk);
llc_conn_ac_stop_all_timers(sk, NULL);
skb_queue_purge(&sk->sk_write_queue);
skb_queue_purge(&llc->pdu_unack_q);
llc->remote_busy_flag = 0;
llc->cause_flag = 0;
llc->retry_count = 0;
llc_conn_set_p_flag(sk, 0);
llc->f_flag = 0;
llc->s_flag = 0;
llc->ack_pf = 0;
llc->first_pdu_Ns = 0;
llc->ack_must_be_send = 0;
llc->dec_step = 1;
llc->inc_cntr = 2;
llc->dec_cntr = 2;
llc->X = 0;
llc->failed_data_req = 0 ;
llc->last_nr = 0;
}
| gpl-2.0 |
robcore/machinex_kernel | drivers/video/omap/lcdc.c | 9188 | 19816 | /*
* OMAP1 internal LCD controller
*
* Copyright (C) 2004 Nokia Corporation
* Author: Imre Deak <imre.deak@nokia.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/module.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/err.h>
#include <linux/mm.h>
#include <linux/fb.h>
#include <linux/dma-mapping.h>
#include <linux/vmalloc.h>
#include <linux/clk.h>
#include <linux/gfp.h>
#include <mach/lcdc.h>
#include <plat/dma.h>
#include <asm/mach-types.h>
#include "omapfb.h"
#include "lcdc.h"
#define MODULE_NAME "lcdc"
#define MAX_PALETTE_SIZE PAGE_SIZE
enum lcdc_load_mode {
OMAP_LCDC_LOAD_PALETTE,
OMAP_LCDC_LOAD_FRAME,
OMAP_LCDC_LOAD_PALETTE_AND_FRAME
};
static struct omap_lcd_controller {
enum omapfb_update_mode update_mode;
int ext_mode;
unsigned long frame_offset;
int screen_width;
int xres;
int yres;
enum omapfb_color_format color_mode;
int bpp;
void *palette_virt;
dma_addr_t palette_phys;
int palette_code;
int palette_size;
unsigned int irq_mask;
struct completion last_frame_complete;
struct completion palette_load_complete;
struct clk *lcd_ck;
struct omapfb_device *fbdev;
void (*dma_callback)(void *data);
void *dma_callback_data;
int fbmem_allocated;
dma_addr_t vram_phys;
void *vram_virt;
unsigned long vram_size;
} lcdc;
static void inline enable_irqs(int mask)
{
lcdc.irq_mask |= mask;
}
static void inline disable_irqs(int mask)
{
lcdc.irq_mask &= ~mask;
}
static void set_load_mode(enum lcdc_load_mode mode)
{
u32 l;
l = omap_readl(OMAP_LCDC_CONTROL);
l &= ~(3 << 20);
switch (mode) {
case OMAP_LCDC_LOAD_PALETTE:
l |= 1 << 20;
break;
case OMAP_LCDC_LOAD_FRAME:
l |= 2 << 20;
break;
case OMAP_LCDC_LOAD_PALETTE_AND_FRAME:
break;
default:
BUG();
}
omap_writel(l, OMAP_LCDC_CONTROL);
}
static void enable_controller(void)
{
u32 l;
l = omap_readl(OMAP_LCDC_CONTROL);
l |= OMAP_LCDC_CTRL_LCD_EN;
l &= ~OMAP_LCDC_IRQ_MASK;
l |= lcdc.irq_mask | OMAP_LCDC_IRQ_DONE; /* enabled IRQs */
omap_writel(l, OMAP_LCDC_CONTROL);
}
static void disable_controller_async(void)
{
u32 l;
u32 mask;
l = omap_readl(OMAP_LCDC_CONTROL);
mask = OMAP_LCDC_CTRL_LCD_EN | OMAP_LCDC_IRQ_MASK;
/*
* Preserve the DONE mask, since we still want to get the
* final DONE irq. It will be disabled in the IRQ handler.
*/
mask &= ~OMAP_LCDC_IRQ_DONE;
l &= ~mask;
omap_writel(l, OMAP_LCDC_CONTROL);
}
static void disable_controller(void)
{
init_completion(&lcdc.last_frame_complete);
disable_controller_async();
if (!wait_for_completion_timeout(&lcdc.last_frame_complete,
msecs_to_jiffies(500)))
dev_err(lcdc.fbdev->dev, "timeout waiting for FRAME DONE\n");
}
static void reset_controller(u32 status)
{
static unsigned long reset_count;
static unsigned long last_jiffies;
disable_controller_async();
reset_count++;
if (reset_count == 1 || time_after(jiffies, last_jiffies + HZ)) {
dev_err(lcdc.fbdev->dev,
"resetting (status %#010x,reset count %lu)\n",
status, reset_count);
last_jiffies = jiffies;
}
if (reset_count < 100) {
enable_controller();
} else {
reset_count = 0;
dev_err(lcdc.fbdev->dev,
"too many reset attempts, giving up.\n");
}
}
/*
* Configure the LCD DMA according to the current mode specified by parameters
* in lcdc.fbdev and fbdev->var.
*/
static void setup_lcd_dma(void)
{
static const int dma_elem_type[] = {
0,
OMAP_DMA_DATA_TYPE_S8,
OMAP_DMA_DATA_TYPE_S16,
0,
OMAP_DMA_DATA_TYPE_S32,
};
struct omapfb_plane_struct *plane = lcdc.fbdev->fb_info[0]->par;
struct fb_var_screeninfo *var = &lcdc.fbdev->fb_info[0]->var;
unsigned long src;
int esize, xelem, yelem;
src = lcdc.vram_phys + lcdc.frame_offset;
switch (var->rotate) {
case 0:
if (plane->info.mirror || (src & 3) ||
lcdc.color_mode == OMAPFB_COLOR_YUV420 ||
(lcdc.xres & 1))
esize = 2;
else
esize = 4;
xelem = lcdc.xres * lcdc.bpp / 8 / esize;
yelem = lcdc.yres;
break;
case 90:
case 180:
case 270:
if (cpu_is_omap15xx()) {
BUG();
}
esize = 2;
xelem = lcdc.yres * lcdc.bpp / 16;
yelem = lcdc.xres;
break;
default:
BUG();
return;
}
#ifdef VERBOSE
dev_dbg(lcdc.fbdev->dev,
"setup_dma: src %#010lx esize %d xelem %d yelem %d\n",
src, esize, xelem, yelem);
#endif
omap_set_lcd_dma_b1(src, xelem, yelem, dma_elem_type[esize]);
if (!cpu_is_omap15xx()) {
int bpp = lcdc.bpp;
/*
* YUV support is only for external mode when we have the
* YUV window embedded in a 16bpp frame buffer.
*/
if (lcdc.color_mode == OMAPFB_COLOR_YUV420)
bpp = 16;
/* Set virtual xres elem size */
omap_set_lcd_dma_b1_vxres(
lcdc.screen_width * bpp / 8 / esize);
/* Setup transformations */
omap_set_lcd_dma_b1_rotation(var->rotate);
omap_set_lcd_dma_b1_mirror(plane->info.mirror);
}
omap_setup_lcd_dma();
}
static irqreturn_t lcdc_irq_handler(int irq, void *dev_id)
{
u32 status;
status = omap_readl(OMAP_LCDC_STATUS);
if (status & (OMAP_LCDC_STAT_FUF | OMAP_LCDC_STAT_SYNC_LOST))
reset_controller(status);
else {
if (status & OMAP_LCDC_STAT_DONE) {
u32 l;
/*
* Disable IRQ_DONE. The status bit will be cleared
* only when the controller is reenabled and we don't
* want to get more interrupts.
*/
l = omap_readl(OMAP_LCDC_CONTROL);
l &= ~OMAP_LCDC_IRQ_DONE;
omap_writel(l, OMAP_LCDC_CONTROL);
complete(&lcdc.last_frame_complete);
}
if (status & OMAP_LCDC_STAT_LOADED_PALETTE) {
disable_controller_async();
complete(&lcdc.palette_load_complete);
}
}
/*
* Clear these interrupt status bits.
* Sync_lost, FUF bits were cleared by disabling the LCD controller
* LOADED_PALETTE can be cleared this way only in palette only
* load mode. In other load modes it's cleared by disabling the
* controller.
*/
status &= ~(OMAP_LCDC_STAT_VSYNC |
OMAP_LCDC_STAT_LOADED_PALETTE |
OMAP_LCDC_STAT_ABC |
OMAP_LCDC_STAT_LINE_INT);
omap_writel(status, OMAP_LCDC_STATUS);
return IRQ_HANDLED;
}
/*
* Change to a new video mode. We defer this to a later time to avoid any
* flicker and not to mess up the current LCD DMA context. For this we disable
* the LCD controller, which will generate a DONE irq after the last frame has
* been transferred. Then it'll be safe to reconfigure both the LCD controller
* as well as the LCD DMA.
*/
static int omap_lcdc_setup_plane(int plane, int channel_out,
unsigned long offset, int screen_width,
int pos_x, int pos_y, int width, int height,
int color_mode)
{
struct fb_var_screeninfo *var = &lcdc.fbdev->fb_info[0]->var;
struct lcd_panel *panel = lcdc.fbdev->panel;
int rot_x, rot_y;
if (var->rotate == 0) {
rot_x = panel->x_res;
rot_y = panel->y_res;
} else {
rot_x = panel->y_res;
rot_y = panel->x_res;
}
if (plane != 0 || channel_out != 0 || pos_x != 0 || pos_y != 0 ||
width > rot_x || height > rot_y) {
#ifdef VERBOSE
dev_dbg(lcdc.fbdev->dev,
"invalid plane params plane %d pos_x %d pos_y %d "
"w %d h %d\n", plane, pos_x, pos_y, width, height);
#endif
return -EINVAL;
}
lcdc.frame_offset = offset;
lcdc.xres = width;
lcdc.yres = height;
lcdc.screen_width = screen_width;
lcdc.color_mode = color_mode;
switch (color_mode) {
case OMAPFB_COLOR_CLUT_8BPP:
lcdc.bpp = 8;
lcdc.palette_code = 0x3000;
lcdc.palette_size = 512;
break;
case OMAPFB_COLOR_RGB565:
lcdc.bpp = 16;
lcdc.palette_code = 0x4000;
lcdc.palette_size = 32;
break;
case OMAPFB_COLOR_RGB444:
lcdc.bpp = 16;
lcdc.palette_code = 0x4000;
lcdc.palette_size = 32;
break;
case OMAPFB_COLOR_YUV420:
if (lcdc.ext_mode) {
lcdc.bpp = 12;
break;
}
/* fallthrough */
case OMAPFB_COLOR_YUV422:
if (lcdc.ext_mode) {
lcdc.bpp = 16;
break;
}
/* fallthrough */
default:
/* FIXME: other BPPs.
* bpp1: code 0, size 256
* bpp2: code 0x1000 size 256
* bpp4: code 0x2000 size 256
* bpp12: code 0x4000 size 32
*/
dev_dbg(lcdc.fbdev->dev, "invalid color mode %d\n", color_mode);
BUG();
return -1;
}
if (lcdc.ext_mode) {
setup_lcd_dma();
return 0;
}
if (lcdc.update_mode == OMAPFB_AUTO_UPDATE) {
disable_controller();
omap_stop_lcd_dma();
setup_lcd_dma();
enable_controller();
}
return 0;
}
static int omap_lcdc_enable_plane(int plane, int enable)
{
dev_dbg(lcdc.fbdev->dev,
"plane %d enable %d update_mode %d ext_mode %d\n",
plane, enable, lcdc.update_mode, lcdc.ext_mode);
if (plane != OMAPFB_PLANE_GFX)
return -EINVAL;
return 0;
}
/*
* Configure the LCD DMA for a palette load operation and do the palette
* downloading synchronously. We don't use the frame+palette load mode of
* the controller, since the palette can always be downloaded separately.
*/
static void load_palette(void)
{
u16 *palette;
palette = (u16 *)lcdc.palette_virt;
*(u16 *)palette &= 0x0fff;
*(u16 *)palette |= lcdc.palette_code;
omap_set_lcd_dma_b1(lcdc.palette_phys,
lcdc.palette_size / 4 + 1, 1, OMAP_DMA_DATA_TYPE_S32);
omap_set_lcd_dma_single_transfer(1);
omap_setup_lcd_dma();
init_completion(&lcdc.palette_load_complete);
enable_irqs(OMAP_LCDC_IRQ_LOADED_PALETTE);
set_load_mode(OMAP_LCDC_LOAD_PALETTE);
enable_controller();
if (!wait_for_completion_timeout(&lcdc.palette_load_complete,
msecs_to_jiffies(500)))
dev_err(lcdc.fbdev->dev, "timeout waiting for FRAME DONE\n");
/* The controller gets disabled in the irq handler */
disable_irqs(OMAP_LCDC_IRQ_LOADED_PALETTE);
omap_stop_lcd_dma();
omap_set_lcd_dma_single_transfer(lcdc.ext_mode);
}
/* Used only in internal controller mode */
static int omap_lcdc_setcolreg(u_int regno, u16 red, u16 green, u16 blue,
u16 transp, int update_hw_pal)
{
u16 *palette;
if (lcdc.color_mode != OMAPFB_COLOR_CLUT_8BPP || regno > 255)
return -EINVAL;
palette = (u16 *)lcdc.palette_virt;
palette[regno] &= ~0x0fff;
palette[regno] |= ((red >> 12) << 8) | ((green >> 12) << 4 ) |
(blue >> 12);
if (update_hw_pal) {
disable_controller();
omap_stop_lcd_dma();
load_palette();
setup_lcd_dma();
set_load_mode(OMAP_LCDC_LOAD_FRAME);
enable_controller();
}
return 0;
}
static void calc_ck_div(int is_tft, int pck, int *pck_div)
{
unsigned long lck;
pck = max(1, pck);
lck = clk_get_rate(lcdc.lcd_ck);
*pck_div = (lck + pck - 1) / pck;
if (is_tft)
*pck_div = max(2, *pck_div);
else
*pck_div = max(3, *pck_div);
if (*pck_div > 255) {
/* FIXME: try to adjust logic clock divider as well */
*pck_div = 255;
dev_warn(lcdc.fbdev->dev, "pixclock %d kHz too low.\n",
pck / 1000);
}
}
static void inline setup_regs(void)
{
u32 l;
struct lcd_panel *panel = lcdc.fbdev->panel;
int is_tft = panel->config & OMAP_LCDC_PANEL_TFT;
unsigned long lck;
int pcd;
l = omap_readl(OMAP_LCDC_CONTROL);
l &= ~OMAP_LCDC_CTRL_LCD_TFT;
l |= is_tft ? OMAP_LCDC_CTRL_LCD_TFT : 0;
#ifdef CONFIG_MACH_OMAP_PALMTE
/* FIXME:if (machine_is_omap_palmte()) { */
/* PalmTE uses alternate TFT setting in 8BPP mode */
l |= (is_tft && panel->bpp == 8) ? 0x810000 : 0;
/* } */
#endif
omap_writel(l, OMAP_LCDC_CONTROL);
l = omap_readl(OMAP_LCDC_TIMING2);
l &= ~(((1 << 6) - 1) << 20);
l |= (panel->config & OMAP_LCDC_SIGNAL_MASK) << 20;
omap_writel(l, OMAP_LCDC_TIMING2);
l = panel->x_res - 1;
l |= (panel->hsw - 1) << 10;
l |= (panel->hfp - 1) << 16;
l |= (panel->hbp - 1) << 24;
omap_writel(l, OMAP_LCDC_TIMING0);
l = panel->y_res - 1;
l |= (panel->vsw - 1) << 10;
l |= panel->vfp << 16;
l |= panel->vbp << 24;
omap_writel(l, OMAP_LCDC_TIMING1);
l = omap_readl(OMAP_LCDC_TIMING2);
l &= ~0xff;
lck = clk_get_rate(lcdc.lcd_ck);
if (!panel->pcd)
calc_ck_div(is_tft, panel->pixel_clock * 1000, &pcd);
else {
dev_warn(lcdc.fbdev->dev,
"Pixel clock divider value is obsolete.\n"
"Try to set pixel_clock to %lu and pcd to 0 "
"in drivers/video/omap/lcd_%s.c and submit a patch.\n",
lck / panel->pcd / 1000, panel->name);
pcd = panel->pcd;
}
l |= pcd & 0xff;
l |= panel->acb << 8;
omap_writel(l, OMAP_LCDC_TIMING2);
/* update panel info with the exact clock */
panel->pixel_clock = lck / pcd / 1000;
}
/*
* Configure the LCD controller, download the color palette and start a looped
* DMA transfer of the frame image data. Called only in internal
* controller mode.
*/
static int omap_lcdc_set_update_mode(enum omapfb_update_mode mode)
{
int r = 0;
if (mode != lcdc.update_mode) {
switch (mode) {
case OMAPFB_AUTO_UPDATE:
setup_regs();
load_palette();
/* Setup and start LCD DMA */
setup_lcd_dma();
set_load_mode(OMAP_LCDC_LOAD_FRAME);
enable_irqs(OMAP_LCDC_IRQ_DONE);
/* This will start the actual DMA transfer */
enable_controller();
lcdc.update_mode = mode;
break;
case OMAPFB_UPDATE_DISABLED:
disable_controller();
omap_stop_lcd_dma();
lcdc.update_mode = mode;
break;
default:
r = -EINVAL;
}
}
return r;
}
static enum omapfb_update_mode omap_lcdc_get_update_mode(void)
{
return lcdc.update_mode;
}
/* PM code called only in internal controller mode */
static void omap_lcdc_suspend(void)
{
omap_lcdc_set_update_mode(OMAPFB_UPDATE_DISABLED);
}
static void omap_lcdc_resume(void)
{
omap_lcdc_set_update_mode(OMAPFB_AUTO_UPDATE);
}
static void omap_lcdc_get_caps(int plane, struct omapfb_caps *caps)
{
return;
}
int omap_lcdc_set_dma_callback(void (*callback)(void *data), void *data)
{
BUG_ON(callback == NULL);
if (lcdc.dma_callback)
return -EBUSY;
else {
lcdc.dma_callback = callback;
lcdc.dma_callback_data = data;
}
return 0;
}
EXPORT_SYMBOL(omap_lcdc_set_dma_callback);
void omap_lcdc_free_dma_callback(void)
{
lcdc.dma_callback = NULL;
}
EXPORT_SYMBOL(omap_lcdc_free_dma_callback);
static void lcdc_dma_handler(u16 status, void *data)
{
if (lcdc.dma_callback)
lcdc.dma_callback(lcdc.dma_callback_data);
}
static int mmap_kern(void)
{
struct vm_struct *kvma;
struct vm_area_struct vma;
pgprot_t pgprot;
unsigned long vaddr;
kvma = get_vm_area(lcdc.vram_size, VM_IOREMAP);
if (kvma == NULL) {
dev_err(lcdc.fbdev->dev, "can't get kernel vm area\n");
return -ENOMEM;
}
vma.vm_mm = &init_mm;
vaddr = (unsigned long)kvma->addr;
vma.vm_start = vaddr;
vma.vm_end = vaddr + lcdc.vram_size;
pgprot = pgprot_writecombine(pgprot_kernel);
if (io_remap_pfn_range(&vma, vaddr,
lcdc.vram_phys >> PAGE_SHIFT,
lcdc.vram_size, pgprot) < 0) {
dev_err(lcdc.fbdev->dev, "kernel mmap for FB memory failed\n");
return -EAGAIN;
}
lcdc.vram_virt = (void *)vaddr;
return 0;
}
static void unmap_kern(void)
{
vunmap(lcdc.vram_virt);
}
static int alloc_palette_ram(void)
{
lcdc.palette_virt = dma_alloc_writecombine(lcdc.fbdev->dev,
MAX_PALETTE_SIZE, &lcdc.palette_phys, GFP_KERNEL);
if (lcdc.palette_virt == NULL) {
dev_err(lcdc.fbdev->dev, "failed to alloc palette memory\n");
return -ENOMEM;
}
memset(lcdc.palette_virt, 0, MAX_PALETTE_SIZE);
return 0;
}
static void free_palette_ram(void)
{
dma_free_writecombine(lcdc.fbdev->dev, MAX_PALETTE_SIZE,
lcdc.palette_virt, lcdc.palette_phys);
}
static int alloc_fbmem(struct omapfb_mem_region *region)
{
int bpp;
int frame_size;
struct lcd_panel *panel = lcdc.fbdev->panel;
bpp = panel->bpp;
if (bpp == 12)
bpp = 16;
frame_size = PAGE_ALIGN(panel->x_res * bpp / 8 * panel->y_res);
if (region->size > frame_size)
frame_size = region->size;
lcdc.vram_size = frame_size;
lcdc.vram_virt = dma_alloc_writecombine(lcdc.fbdev->dev,
lcdc.vram_size, &lcdc.vram_phys, GFP_KERNEL);
if (lcdc.vram_virt == NULL) {
dev_err(lcdc.fbdev->dev, "unable to allocate FB DMA memory\n");
return -ENOMEM;
}
region->size = frame_size;
region->paddr = lcdc.vram_phys;
region->vaddr = lcdc.vram_virt;
region->alloc = 1;
memset(lcdc.vram_virt, 0, lcdc.vram_size);
return 0;
}
static void free_fbmem(void)
{
dma_free_writecombine(lcdc.fbdev->dev, lcdc.vram_size,
lcdc.vram_virt, lcdc.vram_phys);
}
static int setup_fbmem(struct omapfb_mem_desc *req_md)
{
int r;
if (!req_md->region_cnt) {
dev_err(lcdc.fbdev->dev, "no memory regions defined\n");
return -EINVAL;
}
if (req_md->region_cnt > 1) {
dev_err(lcdc.fbdev->dev, "only one plane is supported\n");
req_md->region_cnt = 1;
}
if (req_md->region[0].paddr == 0) {
lcdc.fbmem_allocated = 1;
if ((r = alloc_fbmem(&req_md->region[0])) < 0)
return r;
return 0;
}
lcdc.vram_phys = req_md->region[0].paddr;
lcdc.vram_size = req_md->region[0].size;
if ((r = mmap_kern()) < 0)
return r;
dev_dbg(lcdc.fbdev->dev, "vram at %08x size %08lx mapped to 0x%p\n",
lcdc.vram_phys, lcdc.vram_size, lcdc.vram_virt);
return 0;
}
static void cleanup_fbmem(void)
{
if (lcdc.fbmem_allocated)
free_fbmem();
else
unmap_kern();
}
static int omap_lcdc_init(struct omapfb_device *fbdev, int ext_mode,
struct omapfb_mem_desc *req_vram)
{
int r;
u32 l;
int rate;
struct clk *tc_ck;
lcdc.irq_mask = 0;
lcdc.fbdev = fbdev;
lcdc.ext_mode = ext_mode;
l = 0;
omap_writel(l, OMAP_LCDC_CONTROL);
/* FIXME:
* According to errata some platforms have a clock rate limitiation
*/
lcdc.lcd_ck = clk_get(fbdev->dev, "lcd_ck");
if (IS_ERR(lcdc.lcd_ck)) {
dev_err(fbdev->dev, "unable to access LCD clock\n");
r = PTR_ERR(lcdc.lcd_ck);
goto fail0;
}
tc_ck = clk_get(fbdev->dev, "tc_ck");
if (IS_ERR(tc_ck)) {
dev_err(fbdev->dev, "unable to access TC clock\n");
r = PTR_ERR(tc_ck);
goto fail1;
}
rate = clk_get_rate(tc_ck);
clk_put(tc_ck);
if (machine_is_ams_delta())
rate /= 4;
if (machine_is_omap_h3())
rate /= 3;
r = clk_set_rate(lcdc.lcd_ck, rate);
if (r) {
dev_err(fbdev->dev, "failed to adjust LCD rate\n");
goto fail1;
}
clk_enable(lcdc.lcd_ck);
r = request_irq(OMAP_LCDC_IRQ, lcdc_irq_handler, 0, MODULE_NAME, fbdev);
if (r) {
dev_err(fbdev->dev, "unable to get IRQ\n");
goto fail2;
}
r = omap_request_lcd_dma(lcdc_dma_handler, NULL);
if (r) {
dev_err(fbdev->dev, "unable to get LCD DMA\n");
goto fail3;
}
omap_set_lcd_dma_single_transfer(ext_mode);
omap_set_lcd_dma_ext_controller(ext_mode);
if (!ext_mode)
if ((r = alloc_palette_ram()) < 0)
goto fail4;
if ((r = setup_fbmem(req_vram)) < 0)
goto fail5;
pr_info("omapfb: LCDC initialized\n");
return 0;
fail5:
if (!ext_mode)
free_palette_ram();
fail4:
omap_free_lcd_dma();
fail3:
free_irq(OMAP_LCDC_IRQ, lcdc.fbdev);
fail2:
clk_disable(lcdc.lcd_ck);
fail1:
clk_put(lcdc.lcd_ck);
fail0:
return r;
}
static void omap_lcdc_cleanup(void)
{
if (!lcdc.ext_mode)
free_palette_ram();
cleanup_fbmem();
omap_free_lcd_dma();
free_irq(OMAP_LCDC_IRQ, lcdc.fbdev);
clk_disable(lcdc.lcd_ck);
clk_put(lcdc.lcd_ck);
}
const struct lcd_ctrl omap1_int_ctrl = {
.name = "internal",
.init = omap_lcdc_init,
.cleanup = omap_lcdc_cleanup,
.get_caps = omap_lcdc_get_caps,
.set_update_mode = omap_lcdc_set_update_mode,
.get_update_mode = omap_lcdc_get_update_mode,
.update_window = NULL,
.suspend = omap_lcdc_suspend,
.resume = omap_lcdc_resume,
.setup_plane = omap_lcdc_setup_plane,
.enable_plane = omap_lcdc_enable_plane,
.setcolreg = omap_lcdc_setcolreg,
};
| gpl-2.0 |
Fusion-Devices/android_kernel_samsung_klte | drivers/net/arcnet/com90xx.c | 9700 | 18521 | /*
* Linux ARCnet driver - COM90xx chipset (memory-mapped buffers)
*
* Written 1994-1999 by Avery Pennarun.
* Written 1999 by Martin Mares <mj@ucw.cz>.
* Derived from skeleton.c by Donald Becker.
*
* Special thanks to Contemporary Controls, Inc. (www.ccontrols.com)
* for sponsoring the further development of this driver.
*
* **********************
*
* The original copyright of skeleton.c was as follows:
*
* skeleton.c Written 1993 by Donald Becker.
* Copyright 1993 United States Government as represented by the
* Director, National Security Agency. This software may only be used
* and distributed according to the terms of the GNU General Public License as
* modified by SRC, incorporated herein by reference.
*
* **********************
*
* For more details, see drivers/net/arcnet.c
*
* **********************
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
#include <asm/io.h>
#include <linux/arcdevice.h>
#define VERSION "arcnet: COM90xx chipset support\n"
/* Define this to speed up the autoprobe by assuming if only one io port and
* shmem are left in the list at Stage 5, they must correspond to each
* other.
*
* This is undefined by default because it might not always be true, and the
* extra check makes the autoprobe even more careful. Speed demons can turn
* it on - I think it should be fine if you only have one ARCnet card
* installed.
*
* If no ARCnet cards are installed, this delay never happens anyway and thus
* the option has no effect.
*/
#undef FAST_PROBE
/* Internal function declarations */
static int com90xx_found(int ioaddr, int airq, u_long shmem, void __iomem *);
static void com90xx_command(struct net_device *dev, int command);
static int com90xx_status(struct net_device *dev);
static void com90xx_setmask(struct net_device *dev, int mask);
static int com90xx_reset(struct net_device *dev, int really_reset);
static void com90xx_copy_to_card(struct net_device *dev, int bufnum, int offset,
void *buf, int count);
static void com90xx_copy_from_card(struct net_device *dev, int bufnum, int offset,
void *buf, int count);
/* Known ARCnet cards */
static struct net_device *cards[16];
static int numcards;
/* Handy defines for ARCnet specific stuff */
/* The number of low I/O ports used by the card */
#define ARCNET_TOTAL_SIZE 16
/* Amount of I/O memory used by the card */
#define BUFFER_SIZE (512)
#define MIRROR_SIZE (BUFFER_SIZE*4)
/* COM 9026 controller chip --> ARCnet register addresses */
#define _INTMASK (ioaddr+0) /* writable */
#define _STATUS (ioaddr+0) /* readable */
#define _COMMAND (ioaddr+1) /* writable, returns random vals on read (?) */
#define _CONFIG (ioaddr+2) /* Configuration register */
#define _RESET (ioaddr+8) /* software reset (on read) */
#define _MEMDATA (ioaddr+12) /* Data port for IO-mapped memory */
#define _ADDR_HI (ioaddr+15) /* Control registers for said */
#define _ADDR_LO (ioaddr+14)
#undef ASTATUS
#undef ACOMMAND
#undef AINTMASK
#define ASTATUS() inb(_STATUS)
#define ACOMMAND(cmd) outb((cmd),_COMMAND)
#define AINTMASK(msk) outb((msk),_INTMASK)
static int com90xx_skip_probe __initdata = 0;
/* Module parameters */
static int io; /* use the insmod io= irq= shmem= options */
static int irq;
static int shmem;
static char device[9]; /* use eg. device=arc1 to change name */
module_param(io, int, 0);
module_param(irq, int, 0);
module_param(shmem, int, 0);
module_param_string(device, device, sizeof(device), 0);
static void __init com90xx_probe(void)
{
int count, status, ioaddr, numprint, airq, openparen = 0;
unsigned long airqmask;
int ports[(0x3f0 - 0x200) / 16 + 1] =
{0};
unsigned long *shmems;
void __iomem **iomem;
int numports, numshmems, *port;
u_long *p;
int index;
if (!io && !irq && !shmem && !*device && com90xx_skip_probe)
return;
shmems = kzalloc(((0x100000-0xa0000) / 0x800) * sizeof(unsigned long),
GFP_KERNEL);
if (!shmems)
return;
iomem = kzalloc(((0x100000-0xa0000) / 0x800) * sizeof(void __iomem *),
GFP_KERNEL);
if (!iomem) {
kfree(shmems);
return;
}
BUGLVL(D_NORMAL) printk(VERSION);
/* set up the arrays where we'll store the possible probe addresses */
numports = numshmems = 0;
if (io)
ports[numports++] = io;
else
for (count = 0x200; count <= 0x3f0; count += 16)
ports[numports++] = count;
if (shmem)
shmems[numshmems++] = shmem;
else
for (count = 0xA0000; count <= 0xFF800; count += 2048)
shmems[numshmems++] = count;
/* Stage 1: abandon any reserved ports, or ones with status==0xFF
* (empty), and reset any others by reading the reset port.
*/
numprint = -1;
for (port = &ports[0]; port - ports < numports; port++) {
numprint++;
numprint %= 8;
if (!numprint) {
BUGMSG2(D_INIT, "\n");
BUGMSG2(D_INIT, "S1: ");
}
BUGMSG2(D_INIT, "%Xh ", *port);
ioaddr = *port;
if (!request_region(*port, ARCNET_TOTAL_SIZE, "arcnet (90xx)")) {
BUGMSG2(D_INIT_REASONS, "(request_region)\n");
BUGMSG2(D_INIT_REASONS, "S1: ");
BUGLVL(D_INIT_REASONS) numprint = 0;
*port-- = ports[--numports];
continue;
}
if (ASTATUS() == 0xFF) {
BUGMSG2(D_INIT_REASONS, "(empty)\n");
BUGMSG2(D_INIT_REASONS, "S1: ");
BUGLVL(D_INIT_REASONS) numprint = 0;
release_region(*port, ARCNET_TOTAL_SIZE);
*port-- = ports[--numports];
continue;
}
inb(_RESET); /* begin resetting card */
BUGMSG2(D_INIT_REASONS, "\n");
BUGMSG2(D_INIT_REASONS, "S1: ");
BUGLVL(D_INIT_REASONS) numprint = 0;
}
BUGMSG2(D_INIT, "\n");
if (!numports) {
BUGMSG2(D_NORMAL, "S1: No ARCnet cards found.\n");
kfree(shmems);
kfree(iomem);
return;
}
/* Stage 2: we have now reset any possible ARCnet cards, so we can't
* do anything until they finish. If D_INIT, print the list of
* cards that are left.
*/
numprint = -1;
for (port = &ports[0]; port < ports + numports; port++) {
numprint++;
numprint %= 8;
if (!numprint) {
BUGMSG2(D_INIT, "\n");
BUGMSG2(D_INIT, "S2: ");
}
BUGMSG2(D_INIT, "%Xh ", *port);
}
BUGMSG2(D_INIT, "\n");
mdelay(RESETtime);
/* Stage 3: abandon any shmem addresses that don't have the signature
* 0xD1 byte in the right place, or are read-only.
*/
numprint = -1;
for (index = 0, p = &shmems[0]; index < numshmems; p++, index++) {
void __iomem *base;
numprint++;
numprint %= 8;
if (!numprint) {
BUGMSG2(D_INIT, "\n");
BUGMSG2(D_INIT, "S3: ");
}
BUGMSG2(D_INIT, "%lXh ", *p);
if (!request_mem_region(*p, MIRROR_SIZE, "arcnet (90xx)")) {
BUGMSG2(D_INIT_REASONS, "(request_mem_region)\n");
BUGMSG2(D_INIT_REASONS, "Stage 3: ");
BUGLVL(D_INIT_REASONS) numprint = 0;
goto out;
}
base = ioremap(*p, MIRROR_SIZE);
if (!base) {
BUGMSG2(D_INIT_REASONS, "(ioremap)\n");
BUGMSG2(D_INIT_REASONS, "Stage 3: ");
BUGLVL(D_INIT_REASONS) numprint = 0;
goto out1;
}
if (readb(base) != TESTvalue) {
BUGMSG2(D_INIT_REASONS, "(%02Xh != %02Xh)\n",
readb(base), TESTvalue);
BUGMSG2(D_INIT_REASONS, "S3: ");
BUGLVL(D_INIT_REASONS) numprint = 0;
goto out2;
}
/* By writing 0x42 to the TESTvalue location, we also make
* sure no "mirror" shmem areas show up - if they occur
* in another pass through this loop, they will be discarded
* because *cptr != TESTvalue.
*/
writeb(0x42, base);
if (readb(base) != 0x42) {
BUGMSG2(D_INIT_REASONS, "(read only)\n");
BUGMSG2(D_INIT_REASONS, "S3: ");
goto out2;
}
BUGMSG2(D_INIT_REASONS, "\n");
BUGMSG2(D_INIT_REASONS, "S3: ");
BUGLVL(D_INIT_REASONS) numprint = 0;
iomem[index] = base;
continue;
out2:
iounmap(base);
out1:
release_mem_region(*p, MIRROR_SIZE);
out:
*p-- = shmems[--numshmems];
index--;
}
BUGMSG2(D_INIT, "\n");
if (!numshmems) {
BUGMSG2(D_NORMAL, "S3: No ARCnet cards found.\n");
for (port = &ports[0]; port < ports + numports; port++)
release_region(*port, ARCNET_TOTAL_SIZE);
kfree(shmems);
kfree(iomem);
return;
}
/* Stage 4: something of a dummy, to report the shmems that are
* still possible after stage 3.
*/
numprint = -1;
for (p = &shmems[0]; p < shmems + numshmems; p++) {
numprint++;
numprint %= 8;
if (!numprint) {
BUGMSG2(D_INIT, "\n");
BUGMSG2(D_INIT, "S4: ");
}
BUGMSG2(D_INIT, "%lXh ", *p);
}
BUGMSG2(D_INIT, "\n");
/* Stage 5: for any ports that have the correct status, can disable
* the RESET flag, and (if no irq is given) generate an autoirq,
* register an ARCnet device.
*
* Currently, we can only register one device per probe, so quit
* after the first one is found.
*/
numprint = -1;
for (port = &ports[0]; port < ports + numports; port++) {
int found = 0;
numprint++;
numprint %= 8;
if (!numprint) {
BUGMSG2(D_INIT, "\n");
BUGMSG2(D_INIT, "S5: ");
}
BUGMSG2(D_INIT, "%Xh ", *port);
ioaddr = *port;
status = ASTATUS();
if ((status & 0x9D)
!= (NORXflag | RECONflag | TXFREEflag | RESETflag)) {
BUGMSG2(D_INIT_REASONS, "(status=%Xh)\n", status);
BUGMSG2(D_INIT_REASONS, "S5: ");
BUGLVL(D_INIT_REASONS) numprint = 0;
release_region(*port, ARCNET_TOTAL_SIZE);
*port-- = ports[--numports];
continue;
}
ACOMMAND(CFLAGScmd | RESETclear | CONFIGclear);
status = ASTATUS();
if (status & RESETflag) {
BUGMSG2(D_INIT_REASONS, " (eternal reset, status=%Xh)\n",
status);
BUGMSG2(D_INIT_REASONS, "S5: ");
BUGLVL(D_INIT_REASONS) numprint = 0;
release_region(*port, ARCNET_TOTAL_SIZE);
*port-- = ports[--numports];
continue;
}
/* skip this completely if an IRQ was given, because maybe
* we're on a machine that locks during autoirq!
*/
if (!irq) {
/* if we do this, we're sure to get an IRQ since the
* card has just reset and the NORXflag is on until
* we tell it to start receiving.
*/
airqmask = probe_irq_on();
AINTMASK(NORXflag);
udelay(1);
AINTMASK(0);
airq = probe_irq_off(airqmask);
if (airq <= 0) {
BUGMSG2(D_INIT_REASONS, "(airq=%d)\n", airq);
BUGMSG2(D_INIT_REASONS, "S5: ");
BUGLVL(D_INIT_REASONS) numprint = 0;
release_region(*port, ARCNET_TOTAL_SIZE);
*port-- = ports[--numports];
continue;
}
} else {
airq = irq;
}
BUGMSG2(D_INIT, "(%d,", airq);
openparen = 1;
/* Everything seems okay. But which shmem, if any, puts
* back its signature byte when the card is reset?
*
* If there are multiple cards installed, there might be
* multiple shmems still in the list.
*/
#ifdef FAST_PROBE
if (numports > 1 || numshmems > 1) {
inb(_RESET);
mdelay(RESETtime);
} else {
/* just one shmem and port, assume they match */
writeb(TESTvalue, iomem[0]);
}
#else
inb(_RESET);
mdelay(RESETtime);
#endif
for (index = 0; index < numshmems; index++) {
u_long ptr = shmems[index];
void __iomem *base = iomem[index];
if (readb(base) == TESTvalue) { /* found one */
BUGMSG2(D_INIT, "%lXh)\n", *p);
openparen = 0;
/* register the card */
if (com90xx_found(*port, airq, ptr, base) == 0)
found = 1;
numprint = -1;
/* remove shmem from the list */
shmems[index] = shmems[--numshmems];
iomem[index] = iomem[numshmems];
break; /* go to the next I/O port */
} else {
BUGMSG2(D_INIT_REASONS, "%Xh-", readb(base));
}
}
if (openparen) {
BUGLVL(D_INIT) printk("no matching shmem)\n");
BUGLVL(D_INIT_REASONS) printk("S5: ");
BUGLVL(D_INIT_REASONS) numprint = 0;
}
if (!found)
release_region(*port, ARCNET_TOTAL_SIZE);
*port-- = ports[--numports];
}
BUGLVL(D_INIT_REASONS) printk("\n");
/* Now put back TESTvalue on all leftover shmems. */
for (index = 0; index < numshmems; index++) {
writeb(TESTvalue, iomem[index]);
iounmap(iomem[index]);
release_mem_region(shmems[index], MIRROR_SIZE);
}
kfree(shmems);
kfree(iomem);
}
static int check_mirror(unsigned long addr, size_t size)
{
void __iomem *p;
int res = -1;
if (!request_mem_region(addr, size, "arcnet (90xx)"))
return -1;
p = ioremap(addr, size);
if (p) {
if (readb(p) == TESTvalue)
res = 1;
else
res = 0;
iounmap(p);
}
release_mem_region(addr, size);
return res;
}
/* Set up the struct net_device associated with this card. Called after
* probing succeeds.
*/
static int __init com90xx_found(int ioaddr, int airq, u_long shmem, void __iomem *p)
{
struct net_device *dev = NULL;
struct arcnet_local *lp;
u_long first_mirror, last_mirror;
int mirror_size;
/* allocate struct net_device */
dev = alloc_arcdev(device);
if (!dev) {
BUGMSG2(D_NORMAL, "com90xx: Can't allocate device!\n");
iounmap(p);
release_mem_region(shmem, MIRROR_SIZE);
return -ENOMEM;
}
lp = netdev_priv(dev);
/* find the real shared memory start/end points, including mirrors */
/* guess the actual size of one "memory mirror" - the number of
* bytes between copies of the shared memory. On most cards, it's
* 2k (or there are no mirrors at all) but on some, it's 4k.
*/
mirror_size = MIRROR_SIZE;
if (readb(p) == TESTvalue &&
check_mirror(shmem - MIRROR_SIZE, MIRROR_SIZE) == 0 &&
check_mirror(shmem - 2 * MIRROR_SIZE, MIRROR_SIZE) == 1)
mirror_size = 2 * MIRROR_SIZE;
first_mirror = shmem - mirror_size;
while (check_mirror(first_mirror, mirror_size) == 1)
first_mirror -= mirror_size;
first_mirror += mirror_size;
last_mirror = shmem + mirror_size;
while (check_mirror(last_mirror, mirror_size) == 1)
last_mirror += mirror_size;
last_mirror -= mirror_size;
dev->mem_start = first_mirror;
dev->mem_end = last_mirror + MIRROR_SIZE - 1;
iounmap(p);
release_mem_region(shmem, MIRROR_SIZE);
if (!request_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1, "arcnet (90xx)"))
goto err_free_dev;
/* reserve the irq */
if (request_irq(airq, arcnet_interrupt, 0, "arcnet (90xx)", dev)) {
BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", airq);
goto err_release_mem;
}
dev->irq = airq;
/* Initialize the rest of the device structure. */
lp->card_name = "COM90xx";
lp->hw.command = com90xx_command;
lp->hw.status = com90xx_status;
lp->hw.intmask = com90xx_setmask;
lp->hw.reset = com90xx_reset;
lp->hw.owner = THIS_MODULE;
lp->hw.copy_to_card = com90xx_copy_to_card;
lp->hw.copy_from_card = com90xx_copy_from_card;
lp->mem_start = ioremap(dev->mem_start, dev->mem_end - dev->mem_start + 1);
if (!lp->mem_start) {
BUGMSG(D_NORMAL, "Can't remap device memory!\n");
goto err_free_irq;
}
/* get and check the station ID from offset 1 in shmem */
dev->dev_addr[0] = readb(lp->mem_start + 1);
dev->base_addr = ioaddr;
BUGMSG(D_NORMAL, "COM90xx station %02Xh found at %03lXh, IRQ %d, "
"ShMem %lXh (%ld*%xh).\n",
dev->dev_addr[0],
dev->base_addr, dev->irq, dev->mem_start,
(dev->mem_end - dev->mem_start + 1) / mirror_size, mirror_size);
if (register_netdev(dev))
goto err_unmap;
cards[numcards++] = dev;
return 0;
err_unmap:
iounmap(lp->mem_start);
err_free_irq:
free_irq(dev->irq, dev);
err_release_mem:
release_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1);
err_free_dev:
free_netdev(dev);
return -EIO;
}
static void com90xx_command(struct net_device *dev, int cmd)
{
short ioaddr = dev->base_addr;
ACOMMAND(cmd);
}
static int com90xx_status(struct net_device *dev)
{
short ioaddr = dev->base_addr;
return ASTATUS();
}
static void com90xx_setmask(struct net_device *dev, int mask)
{
short ioaddr = dev->base_addr;
AINTMASK(mask);
}
/*
* Do a hardware reset on the card, and set up necessary registers.
*
* This should be called as little as possible, because it disrupts the
* token on the network (causes a RECON) and requires a significant delay.
*
* However, it does make sure the card is in a defined state.
*/
static int com90xx_reset(struct net_device *dev, int really_reset)
{
struct arcnet_local *lp = netdev_priv(dev);
short ioaddr = dev->base_addr;
BUGMSG(D_INIT, "Resetting (status=%02Xh)\n", ASTATUS());
if (really_reset) {
/* reset the card */
inb(_RESET);
mdelay(RESETtime);
}
ACOMMAND(CFLAGScmd | RESETclear); /* clear flags & end reset */
ACOMMAND(CFLAGScmd | CONFIGclear);
/* don't do this until we verify that it doesn't hurt older cards! */
/* outb(inb(_CONFIG) | ENABLE16flag, _CONFIG); */
/* verify that the ARCnet signature byte is present */
if (readb(lp->mem_start) != TESTvalue) {
if (really_reset)
BUGMSG(D_NORMAL, "reset failed: TESTvalue not present.\n");
return 1;
}
/* enable extended (512-byte) packets */
ACOMMAND(CONFIGcmd | EXTconf);
/* clean out all the memory to make debugging make more sense :) */
BUGLVL(D_DURING)
memset_io(lp->mem_start, 0x42, 2048);
/* done! return success. */
return 0;
}
static void com90xx_copy_to_card(struct net_device *dev, int bufnum, int offset,
void *buf, int count)
{
struct arcnet_local *lp = netdev_priv(dev);
void __iomem *memaddr = lp->mem_start + bufnum * 512 + offset;
TIME("memcpy_toio", count, memcpy_toio(memaddr, buf, count));
}
static void com90xx_copy_from_card(struct net_device *dev, int bufnum, int offset,
void *buf, int count)
{
struct arcnet_local *lp = netdev_priv(dev);
void __iomem *memaddr = lp->mem_start + bufnum * 512 + offset;
TIME("memcpy_fromio", count, memcpy_fromio(buf, memaddr, count));
}
MODULE_LICENSE("GPL");
static int __init com90xx_init(void)
{
if (irq == 2)
irq = 9;
com90xx_probe();
if (!numcards)
return -EIO;
return 0;
}
static void __exit com90xx_exit(void)
{
struct net_device *dev;
struct arcnet_local *lp;
int count;
for (count = 0; count < numcards; count++) {
dev = cards[count];
lp = netdev_priv(dev);
unregister_netdev(dev);
free_irq(dev->irq, dev);
iounmap(lp->mem_start);
release_region(dev->base_addr, ARCNET_TOTAL_SIZE);
release_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1);
free_netdev(dev);
}
}
module_init(com90xx_init);
module_exit(com90xx_exit);
#ifndef MODULE
static int __init com90xx_setup(char *s)
{
int ints[8];
s = get_options(s, 8, ints);
if (!ints[0] && !*s) {
printk("com90xx: Disabled.\n");
return 1;
}
switch (ints[0]) {
default: /* ERROR */
printk("com90xx: Too many arguments.\n");
case 3: /* Mem address */
shmem = ints[3];
case 2: /* IRQ */
irq = ints[2];
case 1: /* IO address */
io = ints[1];
}
if (*s)
snprintf(device, sizeof(device), "%s", s);
return 1;
}
__setup("com90xx=", com90xx_setup);
#endif
| gpl-2.0 |
jtpoo3/kernel_asus_flo | arch/sh/mm/asids-debugfs.c | 10468 | 1876 | /*
* debugfs ops for process ASIDs
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 - 2008 Paul Mundt
* Copyright (C) 2003, 2004 Richard Curnow
*
* Provides a debugfs file that lists out the ASIDs currently associated
* with the processes.
*
* In the SH-5 case, if the DM.PC register is examined through the debug
* link, this shows ASID + PC. To make use of this, the PID->ASID
* relationship needs to be known. This is primarily for debugging.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <asm/processor.h>
#include <asm/mmu_context.h>
static int asids_seq_show(struct seq_file *file, void *iter)
{
struct task_struct *p;
read_lock(&tasklist_lock);
for_each_process(p) {
int pid = p->pid;
if (unlikely(!pid))
continue;
if (p->mm)
seq_printf(file, "%5d : %04lx\n", pid,
cpu_asid(smp_processor_id(), p->mm));
}
read_unlock(&tasklist_lock);
return 0;
}
static int asids_debugfs_open(struct inode *inode, struct file *file)
{
return single_open(file, asids_seq_show, inode->i_private);
}
static const struct file_operations asids_debugfs_fops = {
.owner = THIS_MODULE,
.open = asids_debugfs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int __init asids_debugfs_init(void)
{
struct dentry *asids_dentry;
asids_dentry = debugfs_create_file("asids", S_IRUSR, arch_debugfs_dir,
NULL, &asids_debugfs_fops);
if (!asids_dentry)
return -ENOMEM;
if (IS_ERR(asids_dentry))
return PTR_ERR(asids_dentry);
return 0;
}
module_init(asids_debugfs_init);
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
c0llal0/kernel_xperia_z | drivers/video/matrox/matroxfb_DAC1064.c | 10724 | 33768 | /*
*
* Hardware accelerated Matrox Millennium I, II, Mystique, G100, G200, G400 and G450.
*
* (c) 1998-2002 Petr Vandrovec <vandrove@vc.cvut.cz>
*
* Portions Copyright (c) 2001 Matrox Graphics Inc.
*
* Version: 1.65 2002/08/14
*
* See matroxfb_base.c for contributors.
*
*/
#include "matroxfb_DAC1064.h"
#include "matroxfb_misc.h"
#include "matroxfb_accel.h"
#include "g450_pll.h"
#include <linux/matroxfb.h>
#ifdef NEED_DAC1064
#define outDAC1064 matroxfb_DAC_out
#define inDAC1064 matroxfb_DAC_in
#define DAC1064_OPT_SCLK_PCI 0x00
#define DAC1064_OPT_SCLK_PLL 0x01
#define DAC1064_OPT_SCLK_EXT 0x02
#define DAC1064_OPT_SCLK_MASK 0x03
#define DAC1064_OPT_GDIV1 0x04 /* maybe it is GDIV2 on G100 ?! */
#define DAC1064_OPT_GDIV3 0x00
#define DAC1064_OPT_MDIV1 0x08
#define DAC1064_OPT_MDIV2 0x00
#define DAC1064_OPT_RESERVED 0x10
static void DAC1064_calcclock(const struct matrox_fb_info *minfo,
unsigned int freq, unsigned int fmax,
unsigned int *in, unsigned int *feed,
unsigned int *post)
{
unsigned int fvco;
unsigned int p;
DBG(__func__)
/* only for devices older than G450 */
fvco = PLL_calcclock(minfo, freq, fmax, in, feed, &p);
p = (1 << p) - 1;
if (fvco <= 100000)
;
else if (fvco <= 140000)
p |= 0x08;
else if (fvco <= 180000)
p |= 0x10;
else
p |= 0x18;
*post = p;
}
/* they must be in POS order */
static const unsigned char MGA1064_DAC_regs[] = {
M1064_XCURADDL, M1064_XCURADDH, M1064_XCURCTRL,
M1064_XCURCOL0RED, M1064_XCURCOL0GREEN, M1064_XCURCOL0BLUE,
M1064_XCURCOL1RED, M1064_XCURCOL1GREEN, M1064_XCURCOL1BLUE,
M1064_XCURCOL2RED, M1064_XCURCOL2GREEN, M1064_XCURCOL2BLUE,
DAC1064_XVREFCTRL, M1064_XMULCTRL, M1064_XPIXCLKCTRL, M1064_XGENCTRL,
M1064_XMISCCTRL,
M1064_XGENIOCTRL, M1064_XGENIODATA, M1064_XZOOMCTRL, M1064_XSENSETEST,
M1064_XCRCBITSEL,
M1064_XCOLKEYMASKL, M1064_XCOLKEYMASKH, M1064_XCOLKEYL, M1064_XCOLKEYH };
static const unsigned char MGA1064_DAC[] = {
0x00, 0x00, M1064_XCURCTRL_DIS,
0x00, 0x00, 0x00, /* black */
0xFF, 0xFF, 0xFF, /* white */
0xFF, 0x00, 0x00, /* red */
0x00, 0,
M1064_XPIXCLKCTRL_PLL_UP | M1064_XPIXCLKCTRL_EN | M1064_XPIXCLKCTRL_SRC_PLL,
M1064_XGENCTRL_VS_0 | M1064_XGENCTRL_ALPHA_DIS | M1064_XGENCTRL_BLACK_0IRE | M1064_XGENCTRL_NO_SYNC_ON_GREEN,
M1064_XMISCCTRL_DAC_8BIT,
0x00, 0x00, M1064_XZOOMCTRL_1, M1064_XSENSETEST_BCOMP | M1064_XSENSETEST_GCOMP | M1064_XSENSETEST_RCOMP | M1064_XSENSETEST_PDOWN,
0x00,
0x00, 0x00, 0xFF, 0xFF};
static void DAC1064_setpclk(struct matrox_fb_info *minfo, unsigned long fout)
{
unsigned int m, n, p;
DBG(__func__)
DAC1064_calcclock(minfo, fout, minfo->max_pixel_clock, &m, &n, &p);
minfo->hw.DACclk[0] = m;
minfo->hw.DACclk[1] = n;
minfo->hw.DACclk[2] = p;
}
static void DAC1064_setmclk(struct matrox_fb_info *minfo, int oscinfo,
unsigned long fmem)
{
u_int32_t mx;
struct matrox_hw_state *hw = &minfo->hw;
DBG(__func__)
if (minfo->devflags.noinit) {
/* read MCLK and give up... */
hw->DACclk[3] = inDAC1064(minfo, DAC1064_XSYSPLLM);
hw->DACclk[4] = inDAC1064(minfo, DAC1064_XSYSPLLN);
hw->DACclk[5] = inDAC1064(minfo, DAC1064_XSYSPLLP);
return;
}
mx = hw->MXoptionReg | 0x00000004;
pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, mx);
mx &= ~0x000000BB;
if (oscinfo & DAC1064_OPT_GDIV1)
mx |= 0x00000008;
if (oscinfo & DAC1064_OPT_MDIV1)
mx |= 0x00000010;
if (oscinfo & DAC1064_OPT_RESERVED)
mx |= 0x00000080;
if ((oscinfo & DAC1064_OPT_SCLK_MASK) == DAC1064_OPT_SCLK_PLL) {
/* select PCI clock until we have setup oscilator... */
int clk;
unsigned int m, n, p;
/* powerup system PLL, select PCI clock */
mx |= 0x00000020;
pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, mx);
mx &= ~0x00000004;
pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, mx);
/* !!! you must not access device if MCLK is not running !!!
Doing so cause immediate PCI lockup :-( Maybe they should
generate ABORT or I/O (parity...) error and Linux should
recover from this... (kill driver/process). But world is not
perfect... */
/* (bit 2 of PCI_OPTION_REG must be 0... and bits 0,1 must not
select PLL... because of PLL can be stopped at this time) */
DAC1064_calcclock(minfo, fmem, minfo->max_pixel_clock, &m, &n, &p);
outDAC1064(minfo, DAC1064_XSYSPLLM, hw->DACclk[3] = m);
outDAC1064(minfo, DAC1064_XSYSPLLN, hw->DACclk[4] = n);
outDAC1064(minfo, DAC1064_XSYSPLLP, hw->DACclk[5] = p);
for (clk = 65536; clk; --clk) {
if (inDAC1064(minfo, DAC1064_XSYSPLLSTAT) & 0x40)
break;
}
if (!clk)
printk(KERN_ERR "matroxfb: aiee, SYSPLL not locked\n");
/* select PLL */
mx |= 0x00000005;
} else {
/* select specified system clock source */
mx |= oscinfo & DAC1064_OPT_SCLK_MASK;
}
pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, mx);
mx &= ~0x00000004;
pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, mx);
hw->MXoptionReg = mx;
}
#ifdef CONFIG_FB_MATROX_G
static void g450_set_plls(struct matrox_fb_info *minfo)
{
u_int32_t c2_ctl;
unsigned int pxc;
struct matrox_hw_state *hw = &minfo->hw;
int pixelmnp;
int videomnp;
c2_ctl = hw->crtc2.ctl & ~0x4007; /* Clear PLL + enable for CRTC2 */
c2_ctl |= 0x0001; /* Enable CRTC2 */
hw->DACreg[POS1064_XPWRCTRL] &= ~0x02; /* Stop VIDEO PLL */
pixelmnp = minfo->crtc1.mnp;
videomnp = minfo->crtc2.mnp;
if (videomnp < 0) {
c2_ctl &= ~0x0001; /* Disable CRTC2 */
hw->DACreg[POS1064_XPWRCTRL] &= ~0x10; /* Powerdown CRTC2 */
} else if (minfo->crtc2.pixclock == minfo->features.pll.ref_freq) {
c2_ctl |= 0x4002; /* Use reference directly */
} else if (videomnp == pixelmnp) {
c2_ctl |= 0x0004; /* Use pixel PLL */
} else {
if (0 == ((videomnp ^ pixelmnp) & 0xFFFFFF00)) {
/* PIXEL and VIDEO PLL must not use same frequency. We modify N
of PIXEL PLL in such case because of VIDEO PLL may be source
of TVO clocks, and chroma subcarrier is derived from its
pixel clocks */
pixelmnp += 0x000100;
}
c2_ctl |= 0x0006; /* Use video PLL */
hw->DACreg[POS1064_XPWRCTRL] |= 0x02;
outDAC1064(minfo, M1064_XPWRCTRL, hw->DACreg[POS1064_XPWRCTRL]);
matroxfb_g450_setpll_cond(minfo, videomnp, M_VIDEO_PLL);
}
hw->DACreg[POS1064_XPIXCLKCTRL] &= ~M1064_XPIXCLKCTRL_PLL_UP;
if (pixelmnp >= 0) {
hw->DACreg[POS1064_XPIXCLKCTRL] |= M1064_XPIXCLKCTRL_PLL_UP;
outDAC1064(minfo, M1064_XPIXCLKCTRL, hw->DACreg[POS1064_XPIXCLKCTRL]);
matroxfb_g450_setpll_cond(minfo, pixelmnp, M_PIXEL_PLL_C);
}
if (c2_ctl != hw->crtc2.ctl) {
hw->crtc2.ctl = c2_ctl;
mga_outl(0x3C10, c2_ctl);
}
pxc = minfo->crtc1.pixclock;
if (pxc == 0 || minfo->outputs[2].src == MATROXFB_SRC_CRTC2) {
pxc = minfo->crtc2.pixclock;
}
if (minfo->chip == MGA_G550) {
if (pxc < 45000) {
hw->DACreg[POS1064_XPANMODE] = 0x00; /* 0-50 */
} else if (pxc < 55000) {
hw->DACreg[POS1064_XPANMODE] = 0x08; /* 34-62 */
} else if (pxc < 70000) {
hw->DACreg[POS1064_XPANMODE] = 0x10; /* 42-78 */
} else if (pxc < 85000) {
hw->DACreg[POS1064_XPANMODE] = 0x18; /* 62-92 */
} else if (pxc < 100000) {
hw->DACreg[POS1064_XPANMODE] = 0x20; /* 74-108 */
} else if (pxc < 115000) {
hw->DACreg[POS1064_XPANMODE] = 0x28; /* 94-122 */
} else if (pxc < 125000) {
hw->DACreg[POS1064_XPANMODE] = 0x30; /* 108-132 */
} else {
hw->DACreg[POS1064_XPANMODE] = 0x38; /* 120-168 */
}
} else {
/* G450 */
if (pxc < 45000) {
hw->DACreg[POS1064_XPANMODE] = 0x00; /* 0-54 */
} else if (pxc < 65000) {
hw->DACreg[POS1064_XPANMODE] = 0x08; /* 38-70 */
} else if (pxc < 85000) {
hw->DACreg[POS1064_XPANMODE] = 0x10; /* 56-96 */
} else if (pxc < 105000) {
hw->DACreg[POS1064_XPANMODE] = 0x18; /* 80-114 */
} else if (pxc < 135000) {
hw->DACreg[POS1064_XPANMODE] = 0x20; /* 102-144 */
} else if (pxc < 160000) {
hw->DACreg[POS1064_XPANMODE] = 0x28; /* 132-166 */
} else if (pxc < 175000) {
hw->DACreg[POS1064_XPANMODE] = 0x30; /* 154-182 */
} else {
hw->DACreg[POS1064_XPANMODE] = 0x38; /* 170-204 */
}
}
}
#endif
void DAC1064_global_init(struct matrox_fb_info *minfo)
{
struct matrox_hw_state *hw = &minfo->hw;
hw->DACreg[POS1064_XMISCCTRL] &= M1064_XMISCCTRL_DAC_WIDTHMASK;
hw->DACreg[POS1064_XMISCCTRL] |= M1064_XMISCCTRL_LUT_EN;
hw->DACreg[POS1064_XPIXCLKCTRL] = M1064_XPIXCLKCTRL_PLL_UP | M1064_XPIXCLKCTRL_EN | M1064_XPIXCLKCTRL_SRC_PLL;
#ifdef CONFIG_FB_MATROX_G
if (minfo->devflags.g450dac) {
hw->DACreg[POS1064_XPWRCTRL] = 0x1F; /* powerup everything */
hw->DACreg[POS1064_XOUTPUTCONN] = 0x00; /* disable outputs */
hw->DACreg[POS1064_XMISCCTRL] |= M1064_XMISCCTRL_DAC_EN;
switch (minfo->outputs[0].src) {
case MATROXFB_SRC_CRTC1:
case MATROXFB_SRC_CRTC2:
hw->DACreg[POS1064_XOUTPUTCONN] |= 0x01; /* enable output; CRTC1/2 selection is in CRTC2 ctl */
break;
case MATROXFB_SRC_NONE:
hw->DACreg[POS1064_XMISCCTRL] &= ~M1064_XMISCCTRL_DAC_EN;
break;
}
switch (minfo->outputs[1].src) {
case MATROXFB_SRC_CRTC1:
hw->DACreg[POS1064_XOUTPUTCONN] |= 0x04;
break;
case MATROXFB_SRC_CRTC2:
if (minfo->outputs[1].mode == MATROXFB_OUTPUT_MODE_MONITOR) {
hw->DACreg[POS1064_XOUTPUTCONN] |= 0x08;
} else {
hw->DACreg[POS1064_XOUTPUTCONN] |= 0x0C;
}
break;
case MATROXFB_SRC_NONE:
hw->DACreg[POS1064_XPWRCTRL] &= ~0x01; /* Poweroff DAC2 */
break;
}
switch (minfo->outputs[2].src) {
case MATROXFB_SRC_CRTC1:
hw->DACreg[POS1064_XOUTPUTCONN] |= 0x20;
break;
case MATROXFB_SRC_CRTC2:
hw->DACreg[POS1064_XOUTPUTCONN] |= 0x40;
break;
case MATROXFB_SRC_NONE:
#if 0
/* HELP! If we boot without DFP connected to DVI, we can
poweroff TMDS. But if we boot with DFP connected,
TMDS generated clocks are used instead of ALL pixclocks
available... If someone knows which register
handles it, please reveal this secret to me... */
hw->DACreg[POS1064_XPWRCTRL] &= ~0x04; /* Poweroff TMDS */
#endif
break;
}
/* Now set timming related variables... */
g450_set_plls(minfo);
} else
#endif
{
if (minfo->outputs[1].src == MATROXFB_SRC_CRTC1) {
hw->DACreg[POS1064_XPIXCLKCTRL] = M1064_XPIXCLKCTRL_PLL_UP | M1064_XPIXCLKCTRL_EN | M1064_XPIXCLKCTRL_SRC_EXT;
hw->DACreg[POS1064_XMISCCTRL] |= GX00_XMISCCTRL_MFC_MAFC | G400_XMISCCTRL_VDO_MAFC12;
} else if (minfo->outputs[1].src == MATROXFB_SRC_CRTC2) {
hw->DACreg[POS1064_XMISCCTRL] |= GX00_XMISCCTRL_MFC_MAFC | G400_XMISCCTRL_VDO_C2_MAFC12;
} else if (minfo->outputs[2].src == MATROXFB_SRC_CRTC1)
hw->DACreg[POS1064_XMISCCTRL] |= GX00_XMISCCTRL_MFC_PANELLINK | G400_XMISCCTRL_VDO_MAFC12;
else
hw->DACreg[POS1064_XMISCCTRL] |= GX00_XMISCCTRL_MFC_DIS;
if (minfo->outputs[0].src != MATROXFB_SRC_NONE)
hw->DACreg[POS1064_XMISCCTRL] |= M1064_XMISCCTRL_DAC_EN;
}
}
void DAC1064_global_restore(struct matrox_fb_info *minfo)
{
struct matrox_hw_state *hw = &minfo->hw;
outDAC1064(minfo, M1064_XPIXCLKCTRL, hw->DACreg[POS1064_XPIXCLKCTRL]);
outDAC1064(minfo, M1064_XMISCCTRL, hw->DACreg[POS1064_XMISCCTRL]);
if (minfo->devflags.accelerator == FB_ACCEL_MATROX_MGAG400) {
outDAC1064(minfo, 0x20, 0x04);
outDAC1064(minfo, 0x1F, minfo->devflags.dfp_type);
if (minfo->devflags.g450dac) {
outDAC1064(minfo, M1064_XSYNCCTRL, 0xCC);
outDAC1064(minfo, M1064_XPWRCTRL, hw->DACreg[POS1064_XPWRCTRL]);
outDAC1064(minfo, M1064_XPANMODE, hw->DACreg[POS1064_XPANMODE]);
outDAC1064(minfo, M1064_XOUTPUTCONN, hw->DACreg[POS1064_XOUTPUTCONN]);
}
}
}
static int DAC1064_init_1(struct matrox_fb_info *minfo, struct my_timming *m)
{
struct matrox_hw_state *hw = &minfo->hw;
DBG(__func__)
memcpy(hw->DACreg, MGA1064_DAC, sizeof(MGA1064_DAC_regs));
switch (minfo->fbcon.var.bits_per_pixel) {
/* case 4: not supported by MGA1064 DAC */
case 8:
hw->DACreg[POS1064_XMULCTRL] = M1064_XMULCTRL_DEPTH_8BPP | M1064_XMULCTRL_GRAPHICS_PALETIZED;
break;
case 16:
if (minfo->fbcon.var.green.length == 5)
hw->DACreg[POS1064_XMULCTRL] = M1064_XMULCTRL_DEPTH_15BPP_1BPP | M1064_XMULCTRL_GRAPHICS_PALETIZED;
else
hw->DACreg[POS1064_XMULCTRL] = M1064_XMULCTRL_DEPTH_16BPP | M1064_XMULCTRL_GRAPHICS_PALETIZED;
break;
case 24:
hw->DACreg[POS1064_XMULCTRL] = M1064_XMULCTRL_DEPTH_24BPP | M1064_XMULCTRL_GRAPHICS_PALETIZED;
break;
case 32:
hw->DACreg[POS1064_XMULCTRL] = M1064_XMULCTRL_DEPTH_32BPP | M1064_XMULCTRL_GRAPHICS_PALETIZED;
break;
default:
return 1; /* unsupported depth */
}
hw->DACreg[POS1064_XVREFCTRL] = minfo->features.DAC1064.xvrefctrl;
hw->DACreg[POS1064_XGENCTRL] &= ~M1064_XGENCTRL_SYNC_ON_GREEN_MASK;
hw->DACreg[POS1064_XGENCTRL] |= (m->sync & FB_SYNC_ON_GREEN)?M1064_XGENCTRL_SYNC_ON_GREEN:M1064_XGENCTRL_NO_SYNC_ON_GREEN;
hw->DACreg[POS1064_XCURADDL] = 0;
hw->DACreg[POS1064_XCURADDH] = 0;
DAC1064_global_init(minfo);
return 0;
}
static int DAC1064_init_2(struct matrox_fb_info *minfo, struct my_timming *m)
{
struct matrox_hw_state *hw = &minfo->hw;
DBG(__func__)
if (minfo->fbcon.var.bits_per_pixel > 16) { /* 256 entries */
int i;
for (i = 0; i < 256; i++) {
hw->DACpal[i * 3 + 0] = i;
hw->DACpal[i * 3 + 1] = i;
hw->DACpal[i * 3 + 2] = i;
}
} else if (minfo->fbcon.var.bits_per_pixel > 8) {
if (minfo->fbcon.var.green.length == 5) { /* 0..31, 128..159 */
int i;
for (i = 0; i < 32; i++) {
/* with p15 == 0 */
hw->DACpal[i * 3 + 0] = i << 3;
hw->DACpal[i * 3 + 1] = i << 3;
hw->DACpal[i * 3 + 2] = i << 3;
/* with p15 == 1 */
hw->DACpal[(i + 128) * 3 + 0] = i << 3;
hw->DACpal[(i + 128) * 3 + 1] = i << 3;
hw->DACpal[(i + 128) * 3 + 2] = i << 3;
}
} else {
int i;
for (i = 0; i < 64; i++) { /* 0..63 */
hw->DACpal[i * 3 + 0] = i << 3;
hw->DACpal[i * 3 + 1] = i << 2;
hw->DACpal[i * 3 + 2] = i << 3;
}
}
} else {
memset(hw->DACpal, 0, 768);
}
return 0;
}
static void DAC1064_restore_1(struct matrox_fb_info *minfo)
{
struct matrox_hw_state *hw = &minfo->hw;
CRITFLAGS
DBG(__func__)
CRITBEGIN
if ((inDAC1064(minfo, DAC1064_XSYSPLLM) != hw->DACclk[3]) ||
(inDAC1064(minfo, DAC1064_XSYSPLLN) != hw->DACclk[4]) ||
(inDAC1064(minfo, DAC1064_XSYSPLLP) != hw->DACclk[5])) {
outDAC1064(minfo, DAC1064_XSYSPLLM, hw->DACclk[3]);
outDAC1064(minfo, DAC1064_XSYSPLLN, hw->DACclk[4]);
outDAC1064(minfo, DAC1064_XSYSPLLP, hw->DACclk[5]);
}
{
unsigned int i;
for (i = 0; i < sizeof(MGA1064_DAC_regs); i++) {
if ((i != POS1064_XPIXCLKCTRL) && (i != POS1064_XMISCCTRL))
outDAC1064(minfo, MGA1064_DAC_regs[i], hw->DACreg[i]);
}
}
DAC1064_global_restore(minfo);
CRITEND
};
static void DAC1064_restore_2(struct matrox_fb_info *minfo)
{
#ifdef DEBUG
unsigned int i;
#endif
DBG(__func__)
#ifdef DEBUG
dprintk(KERN_DEBUG "DAC1064regs ");
for (i = 0; i < sizeof(MGA1064_DAC_regs); i++) {
dprintk("R%02X=%02X ", MGA1064_DAC_regs[i], minfo->hw.DACreg[i]);
if ((i & 0x7) == 0x7) dprintk(KERN_DEBUG "continuing... ");
}
dprintk(KERN_DEBUG "DAC1064clk ");
for (i = 0; i < 6; i++)
dprintk("C%02X=%02X ", i, minfo->hw.DACclk[i]);
dprintk("\n");
#endif
}
static int m1064_compute(void* out, struct my_timming* m) {
#define minfo ((struct matrox_fb_info*)out)
{
int i;
int tmout;
CRITFLAGS
DAC1064_setpclk(minfo, m->pixclock);
CRITBEGIN
for (i = 0; i < 3; i++)
outDAC1064(minfo, M1064_XPIXPLLCM + i, minfo->hw.DACclk[i]);
for (tmout = 500000; tmout; tmout--) {
if (inDAC1064(minfo, M1064_XPIXPLLSTAT) & 0x40)
break;
udelay(10);
};
CRITEND
if (!tmout)
printk(KERN_ERR "matroxfb: Pixel PLL not locked after 5 secs\n");
}
#undef minfo
return 0;
}
static struct matrox_altout m1064 = {
.name = "Primary output",
.compute = m1064_compute,
};
#ifdef CONFIG_FB_MATROX_G
static int g450_compute(void* out, struct my_timming* m) {
#define minfo ((struct matrox_fb_info*)out)
if (m->mnp < 0) {
m->mnp = matroxfb_g450_setclk(minfo, m->pixclock, (m->crtc == MATROXFB_SRC_CRTC1) ? M_PIXEL_PLL_C : M_VIDEO_PLL);
if (m->mnp >= 0) {
m->pixclock = g450_mnp2f(minfo, m->mnp);
}
}
#undef minfo
return 0;
}
static struct matrox_altout g450out = {
.name = "Primary output",
.compute = g450_compute,
};
#endif
#endif /* NEED_DAC1064 */
#ifdef CONFIG_FB_MATROX_MYSTIQUE
static int MGA1064_init(struct matrox_fb_info *minfo, struct my_timming *m)
{
struct matrox_hw_state *hw = &minfo->hw;
DBG(__func__)
if (DAC1064_init_1(minfo, m)) return 1;
if (matroxfb_vgaHWinit(minfo, m)) return 1;
hw->MiscOutReg = 0xCB;
if (m->sync & FB_SYNC_HOR_HIGH_ACT)
hw->MiscOutReg &= ~0x40;
if (m->sync & FB_SYNC_VERT_HIGH_ACT)
hw->MiscOutReg &= ~0x80;
if (m->sync & FB_SYNC_COMP_HIGH_ACT) /* should be only FB_SYNC_COMP */
hw->CRTCEXT[3] |= 0x40;
if (DAC1064_init_2(minfo, m)) return 1;
return 0;
}
#endif
#ifdef CONFIG_FB_MATROX_G
static int MGAG100_init(struct matrox_fb_info *minfo, struct my_timming *m)
{
struct matrox_hw_state *hw = &minfo->hw;
DBG(__func__)
if (DAC1064_init_1(minfo, m)) return 1;
hw->MXoptionReg &= ~0x2000;
if (matroxfb_vgaHWinit(minfo, m)) return 1;
hw->MiscOutReg = 0xEF;
if (m->sync & FB_SYNC_HOR_HIGH_ACT)
hw->MiscOutReg &= ~0x40;
if (m->sync & FB_SYNC_VERT_HIGH_ACT)
hw->MiscOutReg &= ~0x80;
if (m->sync & FB_SYNC_COMP_HIGH_ACT) /* should be only FB_SYNC_COMP */
hw->CRTCEXT[3] |= 0x40;
if (DAC1064_init_2(minfo, m)) return 1;
return 0;
}
#endif /* G */
#ifdef CONFIG_FB_MATROX_MYSTIQUE
static void MGA1064_ramdac_init(struct matrox_fb_info *minfo)
{
DBG(__func__)
/* minfo->features.DAC1064.vco_freq_min = 120000; */
minfo->features.pll.vco_freq_min = 62000;
minfo->features.pll.ref_freq = 14318;
minfo->features.pll.feed_div_min = 100;
minfo->features.pll.feed_div_max = 127;
minfo->features.pll.in_div_min = 1;
minfo->features.pll.in_div_max = 31;
minfo->features.pll.post_shift_max = 3;
minfo->features.DAC1064.xvrefctrl = DAC1064_XVREFCTRL_EXTERNAL;
/* maybe cmdline MCLK= ?, doc says gclk=44MHz, mclk=66MHz... it was 55/83 with old values */
DAC1064_setmclk(minfo, DAC1064_OPT_MDIV2 | DAC1064_OPT_GDIV3 | DAC1064_OPT_SCLK_PLL, 133333);
}
#endif
#ifdef CONFIG_FB_MATROX_G
/* BIOS environ */
static int x7AF4 = 0x10; /* flags, maybe 0x10 = SDRAM, 0x00 = SGRAM??? */
/* G100 wants 0x10, G200 SGRAM does not care... */
#if 0
static int def50 = 0; /* reg50, & 0x0F, & 0x3000 (only 0x0000, 0x1000, 0x2000 (0x3000 disallowed and treated as 0) */
#endif
static void MGAG100_progPixClock(const struct matrox_fb_info *minfo, int flags,
int m, int n, int p)
{
int reg;
int selClk;
int clk;
DBG(__func__)
outDAC1064(minfo, M1064_XPIXCLKCTRL, inDAC1064(minfo, M1064_XPIXCLKCTRL) | M1064_XPIXCLKCTRL_DIS |
M1064_XPIXCLKCTRL_PLL_UP);
switch (flags & 3) {
case 0: reg = M1064_XPIXPLLAM; break;
case 1: reg = M1064_XPIXPLLBM; break;
default: reg = M1064_XPIXPLLCM; break;
}
outDAC1064(minfo, reg++, m);
outDAC1064(minfo, reg++, n);
outDAC1064(minfo, reg, p);
selClk = mga_inb(M_MISC_REG_READ) & ~0xC;
/* there should be flags & 0x03 & case 0/1/else */
/* and we should first select source and after that we should wait for PLL */
/* and we are waiting for PLL with oscilator disabled... Is it right? */
switch (flags & 0x03) {
case 0x00: break;
case 0x01: selClk |= 4; break;
default: selClk |= 0x0C; break;
}
mga_outb(M_MISC_REG, selClk);
for (clk = 500000; clk; clk--) {
if (inDAC1064(minfo, M1064_XPIXPLLSTAT) & 0x40)
break;
udelay(10);
};
if (!clk)
printk(KERN_ERR "matroxfb: Pixel PLL%c not locked after usual time\n", (reg-M1064_XPIXPLLAM-2)/4 + 'A');
selClk = inDAC1064(minfo, M1064_XPIXCLKCTRL) & ~M1064_XPIXCLKCTRL_SRC_MASK;
switch (flags & 0x0C) {
case 0x00: selClk |= M1064_XPIXCLKCTRL_SRC_PCI; break;
case 0x04: selClk |= M1064_XPIXCLKCTRL_SRC_PLL; break;
default: selClk |= M1064_XPIXCLKCTRL_SRC_EXT; break;
}
outDAC1064(minfo, M1064_XPIXCLKCTRL, selClk);
outDAC1064(minfo, M1064_XPIXCLKCTRL, inDAC1064(minfo, M1064_XPIXCLKCTRL) & ~M1064_XPIXCLKCTRL_DIS);
}
static void MGAG100_setPixClock(const struct matrox_fb_info *minfo, int flags,
int freq)
{
unsigned int m, n, p;
DBG(__func__)
DAC1064_calcclock(minfo, freq, minfo->max_pixel_clock, &m, &n, &p);
MGAG100_progPixClock(minfo, flags, m, n, p);
}
#endif
#ifdef CONFIG_FB_MATROX_MYSTIQUE
static int MGA1064_preinit(struct matrox_fb_info *minfo)
{
static const int vxres_mystique[] = { 512, 640, 768, 800, 832, 960,
1024, 1152, 1280, 1600, 1664, 1920,
2048, 0};
struct matrox_hw_state *hw = &minfo->hw;
DBG(__func__)
/* minfo->capable.cfb4 = 0; ... preinitialized by 0 */
minfo->capable.text = 1;
minfo->capable.vxres = vxres_mystique;
minfo->outputs[0].output = &m1064;
minfo->outputs[0].src = minfo->outputs[0].default_src;
minfo->outputs[0].data = minfo;
minfo->outputs[0].mode = MATROXFB_OUTPUT_MODE_MONITOR;
if (minfo->devflags.noinit)
return 0; /* do not modify settings */
hw->MXoptionReg &= 0xC0000100;
hw->MXoptionReg |= 0x00094E20;
if (minfo->devflags.novga)
hw->MXoptionReg &= ~0x00000100;
if (minfo->devflags.nobios)
hw->MXoptionReg &= ~0x40000000;
if (minfo->devflags.nopciretry)
hw->MXoptionReg |= 0x20000000;
pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, hw->MXoptionReg);
mga_setr(M_SEQ_INDEX, 0x01, 0x20);
mga_outl(M_CTLWTST, 0x00000000);
udelay(200);
mga_outl(M_MACCESS, 0x00008000);
udelay(100);
mga_outl(M_MACCESS, 0x0000C000);
return 0;
}
static void MGA1064_reset(struct matrox_fb_info *minfo)
{
DBG(__func__);
MGA1064_ramdac_init(minfo);
}
#endif
#ifdef CONFIG_FB_MATROX_G
static void g450_mclk_init(struct matrox_fb_info *minfo)
{
/* switch all clocks to PCI source */
pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, minfo->hw.MXoptionReg | 4);
pci_write_config_dword(minfo->pcidev, PCI_OPTION3_REG, minfo->values.reg.opt3 & ~0x00300C03);
pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, minfo->hw.MXoptionReg);
if (((minfo->values.reg.opt3 & 0x000003) == 0x000003) ||
((minfo->values.reg.opt3 & 0x000C00) == 0x000C00) ||
((minfo->values.reg.opt3 & 0x300000) == 0x300000)) {
matroxfb_g450_setclk(minfo, minfo->values.pll.video, M_VIDEO_PLL);
} else {
unsigned long flags;
unsigned int pwr;
matroxfb_DAC_lock_irqsave(flags);
pwr = inDAC1064(minfo, M1064_XPWRCTRL) & ~0x02;
outDAC1064(minfo, M1064_XPWRCTRL, pwr);
matroxfb_DAC_unlock_irqrestore(flags);
}
matroxfb_g450_setclk(minfo, minfo->values.pll.system, M_SYSTEM_PLL);
/* switch clocks to their real PLL source(s) */
pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, minfo->hw.MXoptionReg | 4);
pci_write_config_dword(minfo->pcidev, PCI_OPTION3_REG, minfo->values.reg.opt3);
pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, minfo->hw.MXoptionReg);
}
static void g450_memory_init(struct matrox_fb_info *minfo)
{
/* disable memory refresh */
minfo->hw.MXoptionReg &= ~0x001F8000;
pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, minfo->hw.MXoptionReg);
/* set memory interface parameters */
minfo->hw.MXoptionReg &= ~0x00207E00;
minfo->hw.MXoptionReg |= 0x00207E00 & minfo->values.reg.opt;
pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, minfo->hw.MXoptionReg);
pci_write_config_dword(minfo->pcidev, PCI_OPTION2_REG, minfo->values.reg.opt2);
mga_outl(M_CTLWTST, minfo->values.reg.mctlwtst);
/* first set up memory interface with disabled memory interface clocks */
pci_write_config_dword(minfo->pcidev, PCI_MEMMISC_REG, minfo->values.reg.memmisc & ~0x80000000U);
mga_outl(M_MEMRDBK, minfo->values.reg.memrdbk);
mga_outl(M_MACCESS, minfo->values.reg.maccess);
/* start memory clocks */
pci_write_config_dword(minfo->pcidev, PCI_MEMMISC_REG, minfo->values.reg.memmisc | 0x80000000U);
udelay(200);
if (minfo->values.memory.ddr && (!minfo->values.memory.emrswen || !minfo->values.memory.dll)) {
mga_outl(M_MEMRDBK, minfo->values.reg.memrdbk & ~0x1000);
}
mga_outl(M_MACCESS, minfo->values.reg.maccess | 0x8000);
udelay(200);
minfo->hw.MXoptionReg |= 0x001F8000 & minfo->values.reg.opt;
pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, minfo->hw.MXoptionReg);
/* value is written to memory chips only if old != new */
mga_outl(M_PLNWT, 0);
mga_outl(M_PLNWT, ~0);
if (minfo->values.reg.mctlwtst != minfo->values.reg.mctlwtst_core) {
mga_outl(M_CTLWTST, minfo->values.reg.mctlwtst_core);
}
}
static void g450_preinit(struct matrox_fb_info *minfo)
{
u_int32_t c2ctl;
u_int8_t curctl;
u_int8_t c1ctl;
/* minfo->hw.MXoptionReg = minfo->values.reg.opt; */
minfo->hw.MXoptionReg &= 0xC0000100;
minfo->hw.MXoptionReg |= 0x00000020;
if (minfo->devflags.novga)
minfo->hw.MXoptionReg &= ~0x00000100;
if (minfo->devflags.nobios)
minfo->hw.MXoptionReg &= ~0x40000000;
if (minfo->devflags.nopciretry)
minfo->hw.MXoptionReg |= 0x20000000;
minfo->hw.MXoptionReg |= minfo->values.reg.opt & 0x03400040;
pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, minfo->hw.MXoptionReg);
/* Init system clocks */
/* stop crtc2 */
c2ctl = mga_inl(M_C2CTL);
mga_outl(M_C2CTL, c2ctl & ~1);
/* stop cursor */
curctl = inDAC1064(minfo, M1064_XCURCTRL);
outDAC1064(minfo, M1064_XCURCTRL, 0);
/* stop crtc1 */
c1ctl = mga_readr(M_SEQ_INDEX, 1);
mga_setr(M_SEQ_INDEX, 1, c1ctl | 0x20);
g450_mclk_init(minfo);
g450_memory_init(minfo);
/* set legacy VGA clock sources for DOSEmu or VMware... */
matroxfb_g450_setclk(minfo, 25175, M_PIXEL_PLL_A);
matroxfb_g450_setclk(minfo, 28322, M_PIXEL_PLL_B);
/* restore crtc1 */
mga_setr(M_SEQ_INDEX, 1, c1ctl);
/* restore cursor */
outDAC1064(minfo, M1064_XCURCTRL, curctl);
/* restore crtc2 */
mga_outl(M_C2CTL, c2ctl);
return;
}
static int MGAG100_preinit(struct matrox_fb_info *minfo)
{
static const int vxres_g100[] = { 512, 640, 768, 800, 832, 960,
1024, 1152, 1280, 1600, 1664, 1920,
2048, 0};
struct matrox_hw_state *hw = &minfo->hw;
u_int32_t reg50;
#if 0
u_int32_t q;
#endif
DBG(__func__)
/* there are some instabilities if in_div > 19 && vco < 61000 */
if (minfo->devflags.g450dac) {
minfo->features.pll.vco_freq_min = 130000; /* my sample: >118 */
} else {
minfo->features.pll.vco_freq_min = 62000;
}
if (!minfo->features.pll.ref_freq) {
minfo->features.pll.ref_freq = 27000;
}
minfo->features.pll.feed_div_min = 7;
minfo->features.pll.feed_div_max = 127;
minfo->features.pll.in_div_min = 1;
minfo->features.pll.in_div_max = 31;
minfo->features.pll.post_shift_max = 3;
minfo->features.DAC1064.xvrefctrl = DAC1064_XVREFCTRL_G100_DEFAULT;
/* minfo->capable.cfb4 = 0; ... preinitialized by 0 */
minfo->capable.text = 1;
minfo->capable.vxres = vxres_g100;
minfo->capable.plnwt = minfo->devflags.accelerator == FB_ACCEL_MATROX_MGAG100
? minfo->devflags.sgram : 1;
if (minfo->devflags.g450dac) {
minfo->outputs[0].output = &g450out;
} else {
minfo->outputs[0].output = &m1064;
}
minfo->outputs[0].src = minfo->outputs[0].default_src;
minfo->outputs[0].data = minfo;
minfo->outputs[0].mode = MATROXFB_OUTPUT_MODE_MONITOR;
if (minfo->devflags.g450dac) {
/* we must do this always, BIOS does not do it for us
and accelerator dies without it */
mga_outl(0x1C0C, 0);
}
if (minfo->devflags.noinit)
return 0;
if (minfo->devflags.g450dac) {
g450_preinit(minfo);
return 0;
}
hw->MXoptionReg &= 0xC0000100;
hw->MXoptionReg |= 0x00000020;
if (minfo->devflags.novga)
hw->MXoptionReg &= ~0x00000100;
if (minfo->devflags.nobios)
hw->MXoptionReg &= ~0x40000000;
if (minfo->devflags.nopciretry)
hw->MXoptionReg |= 0x20000000;
pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, hw->MXoptionReg);
DAC1064_setmclk(minfo, DAC1064_OPT_MDIV2 | DAC1064_OPT_GDIV3 | DAC1064_OPT_SCLK_PCI, 133333);
if (minfo->devflags.accelerator == FB_ACCEL_MATROX_MGAG100) {
pci_read_config_dword(minfo->pcidev, PCI_OPTION2_REG, ®50);
reg50 &= ~0x3000;
pci_write_config_dword(minfo->pcidev, PCI_OPTION2_REG, reg50);
hw->MXoptionReg |= 0x1080;
pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, hw->MXoptionReg);
mga_outl(M_CTLWTST, minfo->values.reg.mctlwtst);
udelay(100);
mga_outb(0x1C05, 0x00);
mga_outb(0x1C05, 0x80);
udelay(100);
mga_outb(0x1C05, 0x40);
mga_outb(0x1C05, 0xC0);
udelay(100);
reg50 &= ~0xFF;
reg50 |= 0x07;
pci_write_config_dword(minfo->pcidev, PCI_OPTION2_REG, reg50);
/* it should help with G100 */
mga_outb(M_GRAPHICS_INDEX, 6);
mga_outb(M_GRAPHICS_DATA, (mga_inb(M_GRAPHICS_DATA) & 3) | 4);
mga_setr(M_EXTVGA_INDEX, 0x03, 0x81);
mga_setr(M_EXTVGA_INDEX, 0x04, 0x00);
mga_writeb(minfo->video.vbase, 0x0000, 0xAA);
mga_writeb(minfo->video.vbase, 0x0800, 0x55);
mga_writeb(minfo->video.vbase, 0x4000, 0x55);
#if 0
if (mga_readb(minfo->video.vbase, 0x0000) != 0xAA) {
hw->MXoptionReg &= ~0x1000;
}
#endif
hw->MXoptionReg |= 0x00078020;
} else if (minfo->devflags.accelerator == FB_ACCEL_MATROX_MGAG200) {
pci_read_config_dword(minfo->pcidev, PCI_OPTION2_REG, ®50);
reg50 &= ~0x3000;
pci_write_config_dword(minfo->pcidev, PCI_OPTION2_REG, reg50);
if (minfo->devflags.memtype == -1)
hw->MXoptionReg |= minfo->values.reg.opt & 0x1C00;
else
hw->MXoptionReg |= (minfo->devflags.memtype & 7) << 10;
if (minfo->devflags.sgram)
hw->MXoptionReg |= 0x4000;
mga_outl(M_CTLWTST, minfo->values.reg.mctlwtst);
mga_outl(M_MEMRDBK, minfo->values.reg.memrdbk);
udelay(200);
mga_outl(M_MACCESS, 0x00000000);
mga_outl(M_MACCESS, 0x00008000);
udelay(100);
mga_outw(M_MEMRDBK, minfo->values.reg.memrdbk);
hw->MXoptionReg |= 0x00078020;
} else {
pci_read_config_dword(minfo->pcidev, PCI_OPTION2_REG, ®50);
reg50 &= ~0x00000100;
reg50 |= 0x00000000;
pci_write_config_dword(minfo->pcidev, PCI_OPTION2_REG, reg50);
if (minfo->devflags.memtype == -1)
hw->MXoptionReg |= minfo->values.reg.opt & 0x1C00;
else
hw->MXoptionReg |= (minfo->devflags.memtype & 7) << 10;
if (minfo->devflags.sgram)
hw->MXoptionReg |= 0x4000;
mga_outl(M_CTLWTST, minfo->values.reg.mctlwtst);
mga_outl(M_MEMRDBK, minfo->values.reg.memrdbk);
udelay(200);
mga_outl(M_MACCESS, 0x00000000);
mga_outl(M_MACCESS, 0x00008000);
udelay(100);
mga_outl(M_MEMRDBK, minfo->values.reg.memrdbk);
hw->MXoptionReg |= 0x00040020;
}
pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, hw->MXoptionReg);
return 0;
}
static void MGAG100_reset(struct matrox_fb_info *minfo)
{
u_int8_t b;
struct matrox_hw_state *hw = &minfo->hw;
DBG(__func__)
{
#ifdef G100_BROKEN_IBM_82351
u_int32_t d;
find 1014/22 (IBM/82351); /* if found and bridging Matrox, do some strange stuff */
pci_read_config_byte(ibm, PCI_SECONDARY_BUS, &b);
if (b == minfo->pcidev->bus->number) {
pci_write_config_byte(ibm, PCI_COMMAND+1, 0); /* disable back-to-back & SERR */
pci_write_config_byte(ibm, 0x41, 0xF4); /* ??? */
pci_write_config_byte(ibm, PCI_IO_BASE, 0xF0); /* ??? */
pci_write_config_byte(ibm, PCI_IO_LIMIT, 0x00); /* ??? */
}
#endif
if (!minfo->devflags.noinit) {
if (x7AF4 & 8) {
hw->MXoptionReg |= 0x40; /* FIXME... */
pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, hw->MXoptionReg);
}
mga_setr(M_EXTVGA_INDEX, 0x06, 0x00);
}
}
if (minfo->devflags.g450dac) {
/* either leave MCLK as is... or they were set in preinit */
hw->DACclk[3] = inDAC1064(minfo, DAC1064_XSYSPLLM);
hw->DACclk[4] = inDAC1064(minfo, DAC1064_XSYSPLLN);
hw->DACclk[5] = inDAC1064(minfo, DAC1064_XSYSPLLP);
} else {
DAC1064_setmclk(minfo, DAC1064_OPT_RESERVED | DAC1064_OPT_MDIV2 | DAC1064_OPT_GDIV1 | DAC1064_OPT_SCLK_PLL, 133333);
}
if (minfo->devflags.accelerator == FB_ACCEL_MATROX_MGAG400) {
if (minfo->devflags.dfp_type == -1) {
minfo->devflags.dfp_type = inDAC1064(minfo, 0x1F);
}
}
if (minfo->devflags.noinit)
return;
if (minfo->devflags.g450dac) {
} else {
MGAG100_setPixClock(minfo, 4, 25175);
MGAG100_setPixClock(minfo, 5, 28322);
if (x7AF4 & 0x10) {
b = inDAC1064(minfo, M1064_XGENIODATA) & ~1;
outDAC1064(minfo, M1064_XGENIODATA, b);
b = inDAC1064(minfo, M1064_XGENIOCTRL) | 1;
outDAC1064(minfo, M1064_XGENIOCTRL, b);
}
}
}
#endif
#ifdef CONFIG_FB_MATROX_MYSTIQUE
static void MGA1064_restore(struct matrox_fb_info *minfo)
{
int i;
struct matrox_hw_state *hw = &minfo->hw;
CRITFLAGS
DBG(__func__)
CRITBEGIN
pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, hw->MXoptionReg);
mga_outb(M_IEN, 0x00);
mga_outb(M_CACHEFLUSH, 0x00);
CRITEND
DAC1064_restore_1(minfo);
matroxfb_vgaHWrestore(minfo);
minfo->crtc1.panpos = -1;
for (i = 0; i < 6; i++)
mga_setr(M_EXTVGA_INDEX, i, hw->CRTCEXT[i]);
DAC1064_restore_2(minfo);
}
#endif
#ifdef CONFIG_FB_MATROX_G
static void MGAG100_restore(struct matrox_fb_info *minfo)
{
int i;
struct matrox_hw_state *hw = &minfo->hw;
CRITFLAGS
DBG(__func__)
CRITBEGIN
pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, hw->MXoptionReg);
CRITEND
DAC1064_restore_1(minfo);
matroxfb_vgaHWrestore(minfo);
if (minfo->devflags.support32MB)
mga_setr(M_EXTVGA_INDEX, 8, hw->CRTCEXT[8]);
minfo->crtc1.panpos = -1;
for (i = 0; i < 6; i++)
mga_setr(M_EXTVGA_INDEX, i, hw->CRTCEXT[i]);
DAC1064_restore_2(minfo);
}
#endif
#ifdef CONFIG_FB_MATROX_MYSTIQUE
struct matrox_switch matrox_mystique = {
MGA1064_preinit, MGA1064_reset, MGA1064_init, MGA1064_restore,
};
EXPORT_SYMBOL(matrox_mystique);
#endif
#ifdef CONFIG_FB_MATROX_G
struct matrox_switch matrox_G100 = {
MGAG100_preinit, MGAG100_reset, MGAG100_init, MGAG100_restore,
};
EXPORT_SYMBOL(matrox_G100);
#endif
#ifdef NEED_DAC1064
EXPORT_SYMBOL(DAC1064_global_init);
EXPORT_SYMBOL(DAC1064_global_restore);
#endif
MODULE_LICENSE("GPL");
| gpl-2.0 |
AOSParadox/android_kernel_oneplus_msm8974 | arch/mips/fw/cfe/cfe_api.c | 11236 | 11211 | /*
* Copyright (C) 2000, 2001, 2002 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/*
*
* Broadcom Common Firmware Environment (CFE)
*
* This module contains device function stubs (small routines to
* call the standard "iocb" interface entry point to CFE).
* There should be one routine here per iocb function call.
*
* Authors: Mitch Lichtenberg, Chris Demetriou
*/
#include <asm/fw/cfe/cfe_api.h>
#include "cfe_api_int.h"
/* Cast from a native pointer to a cfe_xptr_t and back. */
#define XPTR_FROM_NATIVE(n) ((cfe_xptr_t) (intptr_t) (n))
#define NATIVE_FROM_XPTR(x) ((void *) (intptr_t) (x))
int cfe_iocb_dispatch(struct cfe_xiocb *xiocb);
/*
* Declare the dispatch function with args of "intptr_t".
* This makes sure whatever model we're compiling in
* puts the pointers in a single register. For example,
* combining -mlong64 and -mips1 or -mips2 would lead to
* trouble, since the handle and IOCB pointer will be
* passed in two registers each, and CFE expects one.
*/
static int (*cfe_dispfunc) (intptr_t handle, intptr_t xiocb);
static u64 cfe_handle;
int cfe_init(u64 handle, u64 ept)
{
cfe_dispfunc = NATIVE_FROM_XPTR(ept);
cfe_handle = handle;
return 0;
}
int cfe_iocb_dispatch(struct cfe_xiocb * xiocb)
{
if (!cfe_dispfunc)
return -1;
return (*cfe_dispfunc) ((intptr_t) cfe_handle, (intptr_t) xiocb);
}
int cfe_close(int handle)
{
struct cfe_xiocb xiocb;
xiocb.xiocb_fcode = CFE_CMD_DEV_CLOSE;
xiocb.xiocb_status = 0;
xiocb.xiocb_handle = handle;
xiocb.xiocb_flags = 0;
xiocb.xiocb_psize = 0;
cfe_iocb_dispatch(&xiocb);
return xiocb.xiocb_status;
}
int cfe_cpu_start(int cpu, void (*fn) (void), long sp, long gp, long a1)
{
struct cfe_xiocb xiocb;
xiocb.xiocb_fcode = CFE_CMD_FW_CPUCTL;
xiocb.xiocb_status = 0;
xiocb.xiocb_handle = 0;
xiocb.xiocb_flags = 0;
xiocb.xiocb_psize = sizeof(struct xiocb_cpuctl);
xiocb.plist.xiocb_cpuctl.cpu_number = cpu;
xiocb.plist.xiocb_cpuctl.cpu_command = CFE_CPU_CMD_START;
xiocb.plist.xiocb_cpuctl.gp_val = gp;
xiocb.plist.xiocb_cpuctl.sp_val = sp;
xiocb.plist.xiocb_cpuctl.a1_val = a1;
xiocb.plist.xiocb_cpuctl.start_addr = (long) fn;
cfe_iocb_dispatch(&xiocb);
return xiocb.xiocb_status;
}
int cfe_cpu_stop(int cpu)
{
struct cfe_xiocb xiocb;
xiocb.xiocb_fcode = CFE_CMD_FW_CPUCTL;
xiocb.xiocb_status = 0;
xiocb.xiocb_handle = 0;
xiocb.xiocb_flags = 0;
xiocb.xiocb_psize = sizeof(struct xiocb_cpuctl);
xiocb.plist.xiocb_cpuctl.cpu_number = cpu;
xiocb.plist.xiocb_cpuctl.cpu_command = CFE_CPU_CMD_STOP;
cfe_iocb_dispatch(&xiocb);
return xiocb.xiocb_status;
}
int cfe_enumenv(int idx, char *name, int namelen, char *val, int vallen)
{
struct cfe_xiocb xiocb;
xiocb.xiocb_fcode = CFE_CMD_ENV_SET;
xiocb.xiocb_status = 0;
xiocb.xiocb_handle = 0;
xiocb.xiocb_flags = 0;
xiocb.xiocb_psize = sizeof(struct xiocb_envbuf);
xiocb.plist.xiocb_envbuf.enum_idx = idx;
xiocb.plist.xiocb_envbuf.name_ptr = XPTR_FROM_NATIVE(name);
xiocb.plist.xiocb_envbuf.name_length = namelen;
xiocb.plist.xiocb_envbuf.val_ptr = XPTR_FROM_NATIVE(val);
xiocb.plist.xiocb_envbuf.val_length = vallen;
cfe_iocb_dispatch(&xiocb);
return xiocb.xiocb_status;
}
int
cfe_enummem(int idx, int flags, u64 *start, u64 *length, u64 *type)
{
struct cfe_xiocb xiocb;
xiocb.xiocb_fcode = CFE_CMD_FW_MEMENUM;
xiocb.xiocb_status = 0;
xiocb.xiocb_handle = 0;
xiocb.xiocb_flags = flags;
xiocb.xiocb_psize = sizeof(struct xiocb_meminfo);
xiocb.plist.xiocb_meminfo.mi_idx = idx;
cfe_iocb_dispatch(&xiocb);
if (xiocb.xiocb_status < 0)
return xiocb.xiocb_status;
*start = xiocb.plist.xiocb_meminfo.mi_addr;
*length = xiocb.plist.xiocb_meminfo.mi_size;
*type = xiocb.plist.xiocb_meminfo.mi_type;
return 0;
}
int cfe_exit(int warm, int status)
{
struct cfe_xiocb xiocb;
xiocb.xiocb_fcode = CFE_CMD_FW_RESTART;
xiocb.xiocb_status = 0;
xiocb.xiocb_handle = 0;
xiocb.xiocb_flags = warm ? CFE_FLG_WARMSTART : 0;
xiocb.xiocb_psize = sizeof(struct xiocb_exitstat);
xiocb.plist.xiocb_exitstat.status = status;
cfe_iocb_dispatch(&xiocb);
return xiocb.xiocb_status;
}
int cfe_flushcache(int flg)
{
struct cfe_xiocb xiocb;
xiocb.xiocb_fcode = CFE_CMD_FW_FLUSHCACHE;
xiocb.xiocb_status = 0;
xiocb.xiocb_handle = 0;
xiocb.xiocb_flags = flg;
xiocb.xiocb_psize = 0;
cfe_iocb_dispatch(&xiocb);
return xiocb.xiocb_status;
}
int cfe_getdevinfo(char *name)
{
struct cfe_xiocb xiocb;
xiocb.xiocb_fcode = CFE_CMD_DEV_GETINFO;
xiocb.xiocb_status = 0;
xiocb.xiocb_handle = 0;
xiocb.xiocb_flags = 0;
xiocb.xiocb_psize = sizeof(struct xiocb_buffer);
xiocb.plist.xiocb_buffer.buf_offset = 0;
xiocb.plist.xiocb_buffer.buf_ptr = XPTR_FROM_NATIVE(name);
xiocb.plist.xiocb_buffer.buf_length = strlen(name);
cfe_iocb_dispatch(&xiocb);
if (xiocb.xiocb_status < 0)
return xiocb.xiocb_status;
return xiocb.plist.xiocb_buffer.buf_ioctlcmd;
}
int cfe_getenv(char *name, char *dest, int destlen)
{
struct cfe_xiocb xiocb;
*dest = 0;
xiocb.xiocb_fcode = CFE_CMD_ENV_GET;
xiocb.xiocb_status = 0;
xiocb.xiocb_handle = 0;
xiocb.xiocb_flags = 0;
xiocb.xiocb_psize = sizeof(struct xiocb_envbuf);
xiocb.plist.xiocb_envbuf.enum_idx = 0;
xiocb.plist.xiocb_envbuf.name_ptr = XPTR_FROM_NATIVE(name);
xiocb.plist.xiocb_envbuf.name_length = strlen(name);
xiocb.plist.xiocb_envbuf.val_ptr = XPTR_FROM_NATIVE(dest);
xiocb.plist.xiocb_envbuf.val_length = destlen;
cfe_iocb_dispatch(&xiocb);
return xiocb.xiocb_status;
}
int cfe_getfwinfo(cfe_fwinfo_t * info)
{
struct cfe_xiocb xiocb;
xiocb.xiocb_fcode = CFE_CMD_FW_GETINFO;
xiocb.xiocb_status = 0;
xiocb.xiocb_handle = 0;
xiocb.xiocb_flags = 0;
xiocb.xiocb_psize = sizeof(struct xiocb_fwinfo);
cfe_iocb_dispatch(&xiocb);
if (xiocb.xiocb_status < 0)
return xiocb.xiocb_status;
info->fwi_version = xiocb.plist.xiocb_fwinfo.fwi_version;
info->fwi_totalmem = xiocb.plist.xiocb_fwinfo.fwi_totalmem;
info->fwi_flags = xiocb.plist.xiocb_fwinfo.fwi_flags;
info->fwi_boardid = xiocb.plist.xiocb_fwinfo.fwi_boardid;
info->fwi_bootarea_va = xiocb.plist.xiocb_fwinfo.fwi_bootarea_va;
info->fwi_bootarea_pa = xiocb.plist.xiocb_fwinfo.fwi_bootarea_pa;
info->fwi_bootarea_size =
xiocb.plist.xiocb_fwinfo.fwi_bootarea_size;
#if 0
info->fwi_reserved1 = xiocb.plist.xiocb_fwinfo.fwi_reserved1;
info->fwi_reserved2 = xiocb.plist.xiocb_fwinfo.fwi_reserved2;
info->fwi_reserved3 = xiocb.plist.xiocb_fwinfo.fwi_reserved3;
#endif
return 0;
}
int cfe_getstdhandle(int flg)
{
struct cfe_xiocb xiocb;
xiocb.xiocb_fcode = CFE_CMD_DEV_GETHANDLE;
xiocb.xiocb_status = 0;
xiocb.xiocb_handle = 0;
xiocb.xiocb_flags = flg;
xiocb.xiocb_psize = 0;
cfe_iocb_dispatch(&xiocb);
if (xiocb.xiocb_status < 0)
return xiocb.xiocb_status;
return xiocb.xiocb_handle;
}
int64_t
cfe_getticks(void)
{
struct cfe_xiocb xiocb;
xiocb.xiocb_fcode = CFE_CMD_FW_GETTIME;
xiocb.xiocb_status = 0;
xiocb.xiocb_handle = 0;
xiocb.xiocb_flags = 0;
xiocb.xiocb_psize = sizeof(struct xiocb_time);
xiocb.plist.xiocb_time.ticks = 0;
cfe_iocb_dispatch(&xiocb);
return xiocb.plist.xiocb_time.ticks;
}
int cfe_inpstat(int handle)
{
struct cfe_xiocb xiocb;
xiocb.xiocb_fcode = CFE_CMD_DEV_INPSTAT;
xiocb.xiocb_status = 0;
xiocb.xiocb_handle = handle;
xiocb.xiocb_flags = 0;
xiocb.xiocb_psize = sizeof(struct xiocb_inpstat);
xiocb.plist.xiocb_inpstat.inp_status = 0;
cfe_iocb_dispatch(&xiocb);
if (xiocb.xiocb_status < 0)
return xiocb.xiocb_status;
return xiocb.plist.xiocb_inpstat.inp_status;
}
int
cfe_ioctl(int handle, unsigned int ioctlnum, unsigned char *buffer,
int length, int *retlen, u64 offset)
{
struct cfe_xiocb xiocb;
xiocb.xiocb_fcode = CFE_CMD_DEV_IOCTL;
xiocb.xiocb_status = 0;
xiocb.xiocb_handle = handle;
xiocb.xiocb_flags = 0;
xiocb.xiocb_psize = sizeof(struct xiocb_buffer);
xiocb.plist.xiocb_buffer.buf_offset = offset;
xiocb.plist.xiocb_buffer.buf_ioctlcmd = ioctlnum;
xiocb.plist.xiocb_buffer.buf_ptr = XPTR_FROM_NATIVE(buffer);
xiocb.plist.xiocb_buffer.buf_length = length;
cfe_iocb_dispatch(&xiocb);
if (retlen)
*retlen = xiocb.plist.xiocb_buffer.buf_retlen;
return xiocb.xiocb_status;
}
int cfe_open(char *name)
{
struct cfe_xiocb xiocb;
xiocb.xiocb_fcode = CFE_CMD_DEV_OPEN;
xiocb.xiocb_status = 0;
xiocb.xiocb_handle = 0;
xiocb.xiocb_flags = 0;
xiocb.xiocb_psize = sizeof(struct xiocb_buffer);
xiocb.plist.xiocb_buffer.buf_offset = 0;
xiocb.plist.xiocb_buffer.buf_ptr = XPTR_FROM_NATIVE(name);
xiocb.plist.xiocb_buffer.buf_length = strlen(name);
cfe_iocb_dispatch(&xiocb);
if (xiocb.xiocb_status < 0)
return xiocb.xiocb_status;
return xiocb.xiocb_handle;
}
int cfe_read(int handle, unsigned char *buffer, int length)
{
return cfe_readblk(handle, 0, buffer, length);
}
int cfe_readblk(int handle, s64 offset, unsigned char *buffer, int length)
{
struct cfe_xiocb xiocb;
xiocb.xiocb_fcode = CFE_CMD_DEV_READ;
xiocb.xiocb_status = 0;
xiocb.xiocb_handle = handle;
xiocb.xiocb_flags = 0;
xiocb.xiocb_psize = sizeof(struct xiocb_buffer);
xiocb.plist.xiocb_buffer.buf_offset = offset;
xiocb.plist.xiocb_buffer.buf_ptr = XPTR_FROM_NATIVE(buffer);
xiocb.plist.xiocb_buffer.buf_length = length;
cfe_iocb_dispatch(&xiocb);
if (xiocb.xiocb_status < 0)
return xiocb.xiocb_status;
return xiocb.plist.xiocb_buffer.buf_retlen;
}
int cfe_setenv(char *name, char *val)
{
struct cfe_xiocb xiocb;
xiocb.xiocb_fcode = CFE_CMD_ENV_SET;
xiocb.xiocb_status = 0;
xiocb.xiocb_handle = 0;
xiocb.xiocb_flags = 0;
xiocb.xiocb_psize = sizeof(struct xiocb_envbuf);
xiocb.plist.xiocb_envbuf.enum_idx = 0;
xiocb.plist.xiocb_envbuf.name_ptr = XPTR_FROM_NATIVE(name);
xiocb.plist.xiocb_envbuf.name_length = strlen(name);
xiocb.plist.xiocb_envbuf.val_ptr = XPTR_FROM_NATIVE(val);
xiocb.plist.xiocb_envbuf.val_length = strlen(val);
cfe_iocb_dispatch(&xiocb);
return xiocb.xiocb_status;
}
int cfe_write(int handle, unsigned char *buffer, int length)
{
return cfe_writeblk(handle, 0, buffer, length);
}
int cfe_writeblk(int handle, s64 offset, unsigned char *buffer, int length)
{
struct cfe_xiocb xiocb;
xiocb.xiocb_fcode = CFE_CMD_DEV_WRITE;
xiocb.xiocb_status = 0;
xiocb.xiocb_handle = handle;
xiocb.xiocb_flags = 0;
xiocb.xiocb_psize = sizeof(struct xiocb_buffer);
xiocb.plist.xiocb_buffer.buf_offset = offset;
xiocb.plist.xiocb_buffer.buf_ptr = XPTR_FROM_NATIVE(buffer);
xiocb.plist.xiocb_buffer.buf_length = length;
cfe_iocb_dispatch(&xiocb);
if (xiocb.xiocb_status < 0)
return xiocb.xiocb_status;
return xiocb.plist.xiocb_buffer.buf_retlen;
}
| gpl-2.0 |
andr00ib/victor-oficial-kernel | drivers/scsi/arcmsr/arcmsr_attr.c | 11236 | 13042 | /*
*******************************************************************************
** O.S : Linux
** FILE NAME : arcmsr_attr.c
** BY : Nick Cheng
** Description: attributes exported to sysfs and device host
*******************************************************************************
** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved
**
** Web site: www.areca.com.tw
** E-mail: support@areca.com.tw
**
** This program is free software; you can redistribute it and/or modify
** it under the terms of the GNU General Public License version 2 as
** published by the Free Software Foundation.
** This program is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
*******************************************************************************
** Redistribution and use in source and binary forms, with or without
** modification, are permitted provided that the following conditions
** are met:
** 1. Redistributions of source code must retain the above copyright
** notice, this list of conditions and the following disclaimer.
** 2. Redistributions in binary form must reproduce the above copyright
** notice, this list of conditions and the following disclaimer in the
** documentation and/or other materials provided with the distribution.
** 3. The name of the author may not be used to endorse or promote products
** derived from this software without specific prior written permission.
**
** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT
** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************
** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
** Firmware Specification, see Documentation/scsi/arcmsr_spec.txt
*******************************************************************************
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
#include "arcmsr.h"
struct device_attribute *arcmsr_host_attrs[];
static ssize_t arcmsr_sysfs_iop_message_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *bin,
char *buf, loff_t off,
size_t count)
{
struct device *dev = container_of(kobj,struct device,kobj);
struct Scsi_Host *host = class_to_shost(dev);
struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
uint8_t *pQbuffer,*ptmpQbuffer;
int32_t allxfer_len = 0;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
/* do message unit read. */
ptmpQbuffer = (uint8_t *)buf;
while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
&& (allxfer_len < 1031)) {
pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
memcpy(ptmpQbuffer, pQbuffer, 1);
acb->rqbuf_firstindex++;
acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
ptmpQbuffer++;
allxfer_len++;
}
if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
struct QBUFFER __iomem *prbuffer;
uint8_t __iomem *iop_data;
int32_t iop_len;
acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
prbuffer = arcmsr_get_iop_rqbuffer(acb);
iop_data = prbuffer->data;
iop_len = readl(&prbuffer->data_len);
while (iop_len > 0) {
acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data);
acb->rqbuf_lastindex++;
acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
iop_data++;
iop_len--;
}
arcmsr_iop_message_read(acb);
}
return (allxfer_len);
}
static ssize_t arcmsr_sysfs_iop_message_write(struct file *filp,
struct kobject *kobj,
struct bin_attribute *bin,
char *buf, loff_t off,
size_t count)
{
struct device *dev = container_of(kobj,struct device,kobj);
struct Scsi_Host *host = class_to_shost(dev);
struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
uint8_t *pQbuffer, *ptmpuserbuffer;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (count > 1032)
return -EINVAL;
/* do message unit write. */
ptmpuserbuffer = (uint8_t *)buf;
user_len = (int32_t)count;
wqbuf_lastindex = acb->wqbuf_lastindex;
wqbuf_firstindex = acb->wqbuf_firstindex;
if (wqbuf_lastindex != wqbuf_firstindex) {
arcmsr_post_ioctldata2iop(acb);
return 0; /*need retry*/
} else {
my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
&(ARCMSR_MAX_QBUFFER - 1);
if (my_empty_len >= user_len) {
while (user_len > 0) {
pQbuffer =
&acb->wqbuffer[acb->wqbuf_lastindex];
memcpy(pQbuffer, ptmpuserbuffer, 1);
acb->wqbuf_lastindex++;
acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
ptmpuserbuffer++;
user_len--;
}
if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
acb->acb_flags &=
~ACB_F_MESSAGE_WQBUFFER_CLEARED;
arcmsr_post_ioctldata2iop(acb);
}
return count;
} else {
return 0; /*need retry*/
}
}
}
static ssize_t arcmsr_sysfs_iop_message_clear(struct file *filp,
struct kobject *kobj,
struct bin_attribute *bin,
char *buf, loff_t off,
size_t count)
{
struct device *dev = container_of(kobj,struct device,kobj);
struct Scsi_Host *host = class_to_shost(dev);
struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
uint8_t *pQbuffer;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
arcmsr_iop_message_read(acb);
}
acb->acb_flags |=
(ACB_F_MESSAGE_WQBUFFER_CLEARED
| ACB_F_MESSAGE_RQBUFFER_CLEARED
| ACB_F_MESSAGE_WQBUFFER_READED);
acb->rqbuf_firstindex = 0;
acb->rqbuf_lastindex = 0;
acb->wqbuf_firstindex = 0;
acb->wqbuf_lastindex = 0;
pQbuffer = acb->rqbuffer;
memset(pQbuffer, 0, sizeof (struct QBUFFER));
pQbuffer = acb->wqbuffer;
memset(pQbuffer, 0, sizeof (struct QBUFFER));
return 1;
}
static struct bin_attribute arcmsr_sysfs_message_read_attr = {
.attr = {
.name = "mu_read",
.mode = S_IRUSR ,
},
.size = 1032,
.read = arcmsr_sysfs_iop_message_read,
};
static struct bin_attribute arcmsr_sysfs_message_write_attr = {
.attr = {
.name = "mu_write",
.mode = S_IWUSR,
},
.size = 1032,
.write = arcmsr_sysfs_iop_message_write,
};
static struct bin_attribute arcmsr_sysfs_message_clear_attr = {
.attr = {
.name = "mu_clear",
.mode = S_IWUSR,
},
.size = 1,
.write = arcmsr_sysfs_iop_message_clear,
};
int arcmsr_alloc_sysfs_attr(struct AdapterControlBlock *acb)
{
struct Scsi_Host *host = acb->host;
int error;
error = sysfs_create_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_read_attr);
if (error) {
printk(KERN_ERR "arcmsr: alloc sysfs mu_read failed\n");
goto error_bin_file_message_read;
}
error = sysfs_create_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_write_attr);
if (error) {
printk(KERN_ERR "arcmsr: alloc sysfs mu_write failed\n");
goto error_bin_file_message_write;
}
error = sysfs_create_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_clear_attr);
if (error) {
printk(KERN_ERR "arcmsr: alloc sysfs mu_clear failed\n");
goto error_bin_file_message_clear;
}
return 0;
error_bin_file_message_clear:
sysfs_remove_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_write_attr);
error_bin_file_message_write:
sysfs_remove_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_read_attr);
error_bin_file_message_read:
return error;
}
void arcmsr_free_sysfs_attr(struct AdapterControlBlock *acb)
{
struct Scsi_Host *host = acb->host;
sysfs_remove_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_clear_attr);
sysfs_remove_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_write_attr);
sysfs_remove_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_read_attr);
}
static ssize_t
arcmsr_attr_host_driver_version(struct device *dev,
struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE,
"%s\n",
ARCMSR_DRIVER_VERSION);
}
static ssize_t
arcmsr_attr_host_driver_posted_cmd(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *) host->hostdata;
return snprintf(buf, PAGE_SIZE,
"%4d\n",
atomic_read(&acb->ccboutstandingcount));
}
static ssize_t
arcmsr_attr_host_driver_reset(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *) host->hostdata;
return snprintf(buf, PAGE_SIZE,
"%4d\n",
acb->num_resets);
}
static ssize_t
arcmsr_attr_host_driver_abort(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *) host->hostdata;
return snprintf(buf, PAGE_SIZE,
"%4d\n",
acb->num_aborts);
}
static ssize_t
arcmsr_attr_host_fw_model(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *) host->hostdata;
return snprintf(buf, PAGE_SIZE,
"%s\n",
acb->firm_model);
}
static ssize_t
arcmsr_attr_host_fw_version(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *) host->hostdata;
return snprintf(buf, PAGE_SIZE,
"%s\n",
acb->firm_version);
}
static ssize_t
arcmsr_attr_host_fw_request_len(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *) host->hostdata;
return snprintf(buf, PAGE_SIZE,
"%4d\n",
acb->firm_request_len);
}
static ssize_t
arcmsr_attr_host_fw_numbers_queue(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *) host->hostdata;
return snprintf(buf, PAGE_SIZE,
"%4d\n",
acb->firm_numbers_queue);
}
static ssize_t
arcmsr_attr_host_fw_sdram_size(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *) host->hostdata;
return snprintf(buf, PAGE_SIZE,
"%4d\n",
acb->firm_sdram_size);
}
static ssize_t
arcmsr_attr_host_fw_hd_channels(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *) host->hostdata;
return snprintf(buf, PAGE_SIZE,
"%4d\n",
acb->firm_hd_channels);
}
static DEVICE_ATTR(host_driver_version, S_IRUGO, arcmsr_attr_host_driver_version, NULL);
static DEVICE_ATTR(host_driver_posted_cmd, S_IRUGO, arcmsr_attr_host_driver_posted_cmd, NULL);
static DEVICE_ATTR(host_driver_reset, S_IRUGO, arcmsr_attr_host_driver_reset, NULL);
static DEVICE_ATTR(host_driver_abort, S_IRUGO, arcmsr_attr_host_driver_abort, NULL);
static DEVICE_ATTR(host_fw_model, S_IRUGO, arcmsr_attr_host_fw_model, NULL);
static DEVICE_ATTR(host_fw_version, S_IRUGO, arcmsr_attr_host_fw_version, NULL);
static DEVICE_ATTR(host_fw_request_len, S_IRUGO, arcmsr_attr_host_fw_request_len, NULL);
static DEVICE_ATTR(host_fw_numbers_queue, S_IRUGO, arcmsr_attr_host_fw_numbers_queue, NULL);
static DEVICE_ATTR(host_fw_sdram_size, S_IRUGO, arcmsr_attr_host_fw_sdram_size, NULL);
static DEVICE_ATTR(host_fw_hd_channels, S_IRUGO, arcmsr_attr_host_fw_hd_channels, NULL);
struct device_attribute *arcmsr_host_attrs[] = {
&dev_attr_host_driver_version,
&dev_attr_host_driver_posted_cmd,
&dev_attr_host_driver_reset,
&dev_attr_host_driver_abort,
&dev_attr_host_fw_model,
&dev_attr_host_fw_version,
&dev_attr_host_fw_request_len,
&dev_attr_host_fw_numbers_queue,
&dev_attr_host_fw_sdram_size,
&dev_attr_host_fw_hd_channels,
NULL,
};
| gpl-2.0 |
Alzyoud/android_kernel_samsung_smdk4412 | drivers/scsi/arcmsr/arcmsr_attr.c | 11236 | 13042 | /*
*******************************************************************************
** O.S : Linux
** FILE NAME : arcmsr_attr.c
** BY : Nick Cheng
** Description: attributes exported to sysfs and device host
*******************************************************************************
** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved
**
** Web site: www.areca.com.tw
** E-mail: support@areca.com.tw
**
** This program is free software; you can redistribute it and/or modify
** it under the terms of the GNU General Public License version 2 as
** published by the Free Software Foundation.
** This program is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
*******************************************************************************
** Redistribution and use in source and binary forms, with or without
** modification, are permitted provided that the following conditions
** are met:
** 1. Redistributions of source code must retain the above copyright
** notice, this list of conditions and the following disclaimer.
** 2. Redistributions in binary form must reproduce the above copyright
** notice, this list of conditions and the following disclaimer in the
** documentation and/or other materials provided with the distribution.
** 3. The name of the author may not be used to endorse or promote products
** derived from this software without specific prior written permission.
**
** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT
** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************
** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
** Firmware Specification, see Documentation/scsi/arcmsr_spec.txt
*******************************************************************************
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
#include "arcmsr.h"
struct device_attribute *arcmsr_host_attrs[];
static ssize_t arcmsr_sysfs_iop_message_read(struct file *filp,
struct kobject *kobj,
struct bin_attribute *bin,
char *buf, loff_t off,
size_t count)
{
struct device *dev = container_of(kobj,struct device,kobj);
struct Scsi_Host *host = class_to_shost(dev);
struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
uint8_t *pQbuffer,*ptmpQbuffer;
int32_t allxfer_len = 0;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
/* do message unit read. */
ptmpQbuffer = (uint8_t *)buf;
while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
&& (allxfer_len < 1031)) {
pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
memcpy(ptmpQbuffer, pQbuffer, 1);
acb->rqbuf_firstindex++;
acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
ptmpQbuffer++;
allxfer_len++;
}
if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
struct QBUFFER __iomem *prbuffer;
uint8_t __iomem *iop_data;
int32_t iop_len;
acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
prbuffer = arcmsr_get_iop_rqbuffer(acb);
iop_data = prbuffer->data;
iop_len = readl(&prbuffer->data_len);
while (iop_len > 0) {
acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data);
acb->rqbuf_lastindex++;
acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
iop_data++;
iop_len--;
}
arcmsr_iop_message_read(acb);
}
return (allxfer_len);
}
static ssize_t arcmsr_sysfs_iop_message_write(struct file *filp,
struct kobject *kobj,
struct bin_attribute *bin,
char *buf, loff_t off,
size_t count)
{
struct device *dev = container_of(kobj,struct device,kobj);
struct Scsi_Host *host = class_to_shost(dev);
struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
uint8_t *pQbuffer, *ptmpuserbuffer;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (count > 1032)
return -EINVAL;
/* do message unit write. */
ptmpuserbuffer = (uint8_t *)buf;
user_len = (int32_t)count;
wqbuf_lastindex = acb->wqbuf_lastindex;
wqbuf_firstindex = acb->wqbuf_firstindex;
if (wqbuf_lastindex != wqbuf_firstindex) {
arcmsr_post_ioctldata2iop(acb);
return 0; /*need retry*/
} else {
my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
&(ARCMSR_MAX_QBUFFER - 1);
if (my_empty_len >= user_len) {
while (user_len > 0) {
pQbuffer =
&acb->wqbuffer[acb->wqbuf_lastindex];
memcpy(pQbuffer, ptmpuserbuffer, 1);
acb->wqbuf_lastindex++;
acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
ptmpuserbuffer++;
user_len--;
}
if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
acb->acb_flags &=
~ACB_F_MESSAGE_WQBUFFER_CLEARED;
arcmsr_post_ioctldata2iop(acb);
}
return count;
} else {
return 0; /*need retry*/
}
}
}
static ssize_t arcmsr_sysfs_iop_message_clear(struct file *filp,
struct kobject *kobj,
struct bin_attribute *bin,
char *buf, loff_t off,
size_t count)
{
struct device *dev = container_of(kobj,struct device,kobj);
struct Scsi_Host *host = class_to_shost(dev);
struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
uint8_t *pQbuffer;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
arcmsr_iop_message_read(acb);
}
acb->acb_flags |=
(ACB_F_MESSAGE_WQBUFFER_CLEARED
| ACB_F_MESSAGE_RQBUFFER_CLEARED
| ACB_F_MESSAGE_WQBUFFER_READED);
acb->rqbuf_firstindex = 0;
acb->rqbuf_lastindex = 0;
acb->wqbuf_firstindex = 0;
acb->wqbuf_lastindex = 0;
pQbuffer = acb->rqbuffer;
memset(pQbuffer, 0, sizeof (struct QBUFFER));
pQbuffer = acb->wqbuffer;
memset(pQbuffer, 0, sizeof (struct QBUFFER));
return 1;
}
static struct bin_attribute arcmsr_sysfs_message_read_attr = {
.attr = {
.name = "mu_read",
.mode = S_IRUSR ,
},
.size = 1032,
.read = arcmsr_sysfs_iop_message_read,
};
static struct bin_attribute arcmsr_sysfs_message_write_attr = {
.attr = {
.name = "mu_write",
.mode = S_IWUSR,
},
.size = 1032,
.write = arcmsr_sysfs_iop_message_write,
};
static struct bin_attribute arcmsr_sysfs_message_clear_attr = {
.attr = {
.name = "mu_clear",
.mode = S_IWUSR,
},
.size = 1,
.write = arcmsr_sysfs_iop_message_clear,
};
int arcmsr_alloc_sysfs_attr(struct AdapterControlBlock *acb)
{
struct Scsi_Host *host = acb->host;
int error;
error = sysfs_create_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_read_attr);
if (error) {
printk(KERN_ERR "arcmsr: alloc sysfs mu_read failed\n");
goto error_bin_file_message_read;
}
error = sysfs_create_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_write_attr);
if (error) {
printk(KERN_ERR "arcmsr: alloc sysfs mu_write failed\n");
goto error_bin_file_message_write;
}
error = sysfs_create_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_clear_attr);
if (error) {
printk(KERN_ERR "arcmsr: alloc sysfs mu_clear failed\n");
goto error_bin_file_message_clear;
}
return 0;
error_bin_file_message_clear:
sysfs_remove_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_write_attr);
error_bin_file_message_write:
sysfs_remove_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_read_attr);
error_bin_file_message_read:
return error;
}
void arcmsr_free_sysfs_attr(struct AdapterControlBlock *acb)
{
struct Scsi_Host *host = acb->host;
sysfs_remove_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_clear_attr);
sysfs_remove_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_write_attr);
sysfs_remove_bin_file(&host->shost_dev.kobj, &arcmsr_sysfs_message_read_attr);
}
static ssize_t
arcmsr_attr_host_driver_version(struct device *dev,
struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE,
"%s\n",
ARCMSR_DRIVER_VERSION);
}
static ssize_t
arcmsr_attr_host_driver_posted_cmd(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *) host->hostdata;
return snprintf(buf, PAGE_SIZE,
"%4d\n",
atomic_read(&acb->ccboutstandingcount));
}
static ssize_t
arcmsr_attr_host_driver_reset(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *) host->hostdata;
return snprintf(buf, PAGE_SIZE,
"%4d\n",
acb->num_resets);
}
static ssize_t
arcmsr_attr_host_driver_abort(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *) host->hostdata;
return snprintf(buf, PAGE_SIZE,
"%4d\n",
acb->num_aborts);
}
static ssize_t
arcmsr_attr_host_fw_model(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *) host->hostdata;
return snprintf(buf, PAGE_SIZE,
"%s\n",
acb->firm_model);
}
static ssize_t
arcmsr_attr_host_fw_version(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *) host->hostdata;
return snprintf(buf, PAGE_SIZE,
"%s\n",
acb->firm_version);
}
static ssize_t
arcmsr_attr_host_fw_request_len(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *) host->hostdata;
return snprintf(buf, PAGE_SIZE,
"%4d\n",
acb->firm_request_len);
}
static ssize_t
arcmsr_attr_host_fw_numbers_queue(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *) host->hostdata;
return snprintf(buf, PAGE_SIZE,
"%4d\n",
acb->firm_numbers_queue);
}
static ssize_t
arcmsr_attr_host_fw_sdram_size(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *) host->hostdata;
return snprintf(buf, PAGE_SIZE,
"%4d\n",
acb->firm_sdram_size);
}
static ssize_t
arcmsr_attr_host_fw_hd_channels(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *host = class_to_shost(dev);
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *) host->hostdata;
return snprintf(buf, PAGE_SIZE,
"%4d\n",
acb->firm_hd_channels);
}
static DEVICE_ATTR(host_driver_version, S_IRUGO, arcmsr_attr_host_driver_version, NULL);
static DEVICE_ATTR(host_driver_posted_cmd, S_IRUGO, arcmsr_attr_host_driver_posted_cmd, NULL);
static DEVICE_ATTR(host_driver_reset, S_IRUGO, arcmsr_attr_host_driver_reset, NULL);
static DEVICE_ATTR(host_driver_abort, S_IRUGO, arcmsr_attr_host_driver_abort, NULL);
static DEVICE_ATTR(host_fw_model, S_IRUGO, arcmsr_attr_host_fw_model, NULL);
static DEVICE_ATTR(host_fw_version, S_IRUGO, arcmsr_attr_host_fw_version, NULL);
static DEVICE_ATTR(host_fw_request_len, S_IRUGO, arcmsr_attr_host_fw_request_len, NULL);
static DEVICE_ATTR(host_fw_numbers_queue, S_IRUGO, arcmsr_attr_host_fw_numbers_queue, NULL);
static DEVICE_ATTR(host_fw_sdram_size, S_IRUGO, arcmsr_attr_host_fw_sdram_size, NULL);
static DEVICE_ATTR(host_fw_hd_channels, S_IRUGO, arcmsr_attr_host_fw_hd_channels, NULL);
struct device_attribute *arcmsr_host_attrs[] = {
&dev_attr_host_driver_version,
&dev_attr_host_driver_posted_cmd,
&dev_attr_host_driver_reset,
&dev_attr_host_driver_abort,
&dev_attr_host_fw_model,
&dev_attr_host_fw_version,
&dev_attr_host_fw_request_len,
&dev_attr_host_fw_numbers_queue,
&dev_attr_host_fw_sdram_size,
&dev_attr_host_fw_hd_channels,
NULL,
};
| gpl-2.0 |
playfulgod/kernel_lge_l1m | drivers/net/irda/sir_dongle.c | 12516 | 3508 | /*********************************************************************
*
* sir_dongle.c: manager for serial dongle protocol drivers
*
* Copyright (c) 2002 Martin Diehl
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
********************************************************************/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/kmod.h>
#include <linux/mutex.h>
#include <net/irda/irda.h>
#include "sir-dev.h"
/**************************************************************************
*
* dongle registration and attachment
*
*/
static LIST_HEAD(dongle_list); /* list of registered dongle drivers */
static DEFINE_MUTEX(dongle_list_lock); /* protects the list */
int irda_register_dongle(struct dongle_driver *new)
{
struct list_head *entry;
struct dongle_driver *drv;
IRDA_DEBUG(0, "%s : registering dongle \"%s\" (%d).\n",
__func__, new->driver_name, new->type);
mutex_lock(&dongle_list_lock);
list_for_each(entry, &dongle_list) {
drv = list_entry(entry, struct dongle_driver, dongle_list);
if (new->type == drv->type) {
mutex_unlock(&dongle_list_lock);
return -EEXIST;
}
}
list_add(&new->dongle_list, &dongle_list);
mutex_unlock(&dongle_list_lock);
return 0;
}
EXPORT_SYMBOL(irda_register_dongle);
int irda_unregister_dongle(struct dongle_driver *drv)
{
mutex_lock(&dongle_list_lock);
list_del(&drv->dongle_list);
mutex_unlock(&dongle_list_lock);
return 0;
}
EXPORT_SYMBOL(irda_unregister_dongle);
int sirdev_get_dongle(struct sir_dev *dev, IRDA_DONGLE type)
{
struct list_head *entry;
const struct dongle_driver *drv = NULL;
int err = -EINVAL;
request_module("irda-dongle-%d", type);
if (dev->dongle_drv != NULL)
return -EBUSY;
/* serialize access to the list of registered dongles */
mutex_lock(&dongle_list_lock);
list_for_each(entry, &dongle_list) {
drv = list_entry(entry, struct dongle_driver, dongle_list);
if (drv->type == type)
break;
else
drv = NULL;
}
if (!drv) {
err = -ENODEV;
goto out_unlock; /* no such dongle */
}
/* handling of SMP races with dongle module removal - three cases:
* 1) dongle driver was already unregistered - then we haven't found the
* requested dongle above and are already out here
* 2) the module is already marked deleted but the driver is still
* registered - then the try_module_get() below will fail
* 3) the try_module_get() below succeeds before the module is marked
* deleted - then sys_delete_module() fails and prevents the removal
* because the module is in use.
*/
if (!try_module_get(drv->owner)) {
err = -ESTALE;
goto out_unlock; /* rmmod already pending */
}
dev->dongle_drv = drv;
if (!drv->open || (err=drv->open(dev))!=0)
goto out_reject; /* failed to open driver */
mutex_unlock(&dongle_list_lock);
return 0;
out_reject:
dev->dongle_drv = NULL;
module_put(drv->owner);
out_unlock:
mutex_unlock(&dongle_list_lock);
return err;
}
int sirdev_put_dongle(struct sir_dev *dev)
{
const struct dongle_driver *drv = dev->dongle_drv;
if (drv) {
if (drv->close)
drv->close(dev); /* close this dongle instance */
dev->dongle_drv = NULL; /* unlink the dongle driver */
module_put(drv->owner);/* decrement driver's module refcount */
}
return 0;
}
| gpl-2.0 |
yajnab/android_kernel_samsung_msm8625 | arch/arm/mach-msm/platsmp.c | 229 | 6105 | /*
* Copyright (C) 2002 ARM Ltd.
* All Rights Reserved
* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/cpumask.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <asm/hardware/gic.h>
#include <asm/cacheflush.h>
#include <asm/cputype.h>
#include <asm/mach-types.h>
#include <asm/smp_plat.h>
#include <mach/socinfo.h>
#include <mach/hardware.h>
#include <mach/msm_iomap.h>
#include "pm.h"
#include "scm-boot.h"
#include "spm.h"
#define VDD_SC1_ARRAY_CLAMP_GFS_CTL 0x15A0
#define SCSS_CPU1CORE_RESET 0xD80
#define SCSS_DBG_STATUS_CORE_PWRDUP 0xE64
extern void msm_secondary_startup(void);
/*
* control for which core is the next to come out of the secondary
* boot "holding pen".
*/
volatile int pen_release = -1;
static DEFINE_SPINLOCK(boot_lock);
void __cpuinit platform_secondary_init(unsigned int cpu)
{
WARN_ON(msm_platform_secondary_init(cpu));
/*
* if any interrupts are already enabled for the primary
* core (e.g. timer irq), then they will not have been enabled
* for us: do so
*/
gic_secondary_init(0);
/*
* Synchronise with the boot thread.
*/
spin_lock(&boot_lock);
spin_unlock(&boot_lock);
}
static int __cpuinit scorpion_release_secondary(void)
{
void *base_ptr = ioremap_nocache(0x00902000, SZ_4K*2);
if (!base_ptr)
return -EINVAL;
writel_relaxed(0, base_ptr + VDD_SC1_ARRAY_CLAMP_GFS_CTL);
dmb();
writel_relaxed(0, base_ptr + SCSS_CPU1CORE_RESET);
writel_relaxed(3, base_ptr + SCSS_DBG_STATUS_CORE_PWRDUP);
mb();
iounmap(base_ptr);
return 0;
}
static int __cpuinit krait_release_secondary_sim(unsigned long base, int cpu)
{
void *base_ptr = ioremap_nocache(base + (cpu * 0x10000), SZ_4K);
if (!base_ptr)
return -ENODEV;
if (machine_is_msm8960_sim() || machine_is_msm8960_rumi3()) {
writel_relaxed(0x10, base_ptr+0x04);
writel_relaxed(0x80, base_ptr+0x04);
}
if (machine_is_apq8064_sim())
writel_relaxed(0xf0000, base_ptr+0x04);
if (machine_is_msm8974_sim()) {
writel_relaxed(0x800, base_ptr+0x04);
writel_relaxed(0x3FFF, base_ptr+0x14);
}
mb();
iounmap(base_ptr);
return 0;
}
static int __cpuinit krait_release_secondary(unsigned long base, int cpu)
{
void *base_ptr = ioremap_nocache(base + (cpu * 0x10000), SZ_4K);
if (!base_ptr)
return -ENODEV;
msm_spm_turn_on_cpu_rail(cpu);
writel_relaxed(0x109, base_ptr+0x04);
writel_relaxed(0x101, base_ptr+0x04);
ndelay(300);
writel_relaxed(0x121, base_ptr+0x04);
udelay(2);
writel_relaxed(0x020, base_ptr+0x04);
udelay(2);
writel_relaxed(0x000, base_ptr+0x04);
udelay(100);
writel_relaxed(0x080, base_ptr+0x04);
mb();
iounmap(base_ptr);
return 0;
}
static int __cpuinit release_secondary(unsigned int cpu)
{
BUG_ON(cpu >= get_core_count());
if (cpu_is_msm8x60())
return scorpion_release_secondary();
if (machine_is_msm8960_sim() || machine_is_msm8960_rumi3() ||
machine_is_apq8064_sim())
return krait_release_secondary_sim(0x02088000, cpu);
if (machine_is_msm8974_sim())
return krait_release_secondary_sim(0xf9088000, cpu);
if (cpu_is_msm8960() || cpu_is_msm8930() || cpu_is_msm8930aa() ||
cpu_is_apq8064() || cpu_is_msm8627())
return krait_release_secondary(0x02088000, cpu);
WARN(1, "unknown CPU case in release_secondary\n");
return -EINVAL;
}
DEFINE_PER_CPU(int, cold_boot_done);
static int cold_boot_flags[] = {
0,
SCM_FLAG_COLDBOOT_CPU1,
SCM_FLAG_COLDBOOT_CPU2,
SCM_FLAG_COLDBOOT_CPU3,
};
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
{
int ret;
int flag = 0;
unsigned long timeout;
pr_debug("Starting secondary CPU %d\n", cpu);
/* Set preset_lpj to avoid subsequent lpj recalculations */
preset_lpj = loops_per_jiffy;
if (cpu > 0 && cpu < ARRAY_SIZE(cold_boot_flags))
flag = cold_boot_flags[cpu];
else
__WARN();
if (per_cpu(cold_boot_done, cpu) == false) {
ret = scm_set_boot_addr((void *)
virt_to_phys(msm_secondary_startup),
flag);
if (ret == 0)
release_secondary(cpu);
else
printk(KERN_DEBUG "Failed to set secondary core boot "
"address\n");
per_cpu(cold_boot_done, cpu) = true;
}
/*
* set synchronisation state between this boot processor
* and the secondary one
*/
spin_lock(&boot_lock);
/*
* The secondary processor is waiting to be released from
* the holding pen - release it, then wait for it to flag
* that it has been released by resetting pen_release.
*
* Note that "pen_release" is the hardware CPU ID, whereas
* "cpu" is Linux's internal ID.
*/
pen_release = cpu_logical_map(cpu);
__cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
/*
* Send the secondary CPU a soft interrupt, thereby causing
* the boot monitor to read the system wide flags register,
* and branch to the address found there.
*/
gic_raise_softirq(cpumask_of(cpu), 1);
timeout = jiffies + (1 * HZ);
while (time_before(jiffies, timeout)) {
smp_rmb();
if (pen_release == -1)
break;
dmac_inv_range((void *)&pen_release,
(void *)(&pen_release+sizeof(pen_release)));
udelay(10);
}
/*
* now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
spin_unlock(&boot_lock);
return pen_release != -1 ? -ENOSYS : 0;
}
/*
* Initialise the CPU possible map early - this describes the CPUs
* which may be present or become present in the system.
*/
void __init smp_init_cpus(void)
{
unsigned int i, ncores = get_core_count();
if (ncores > nr_cpu_ids) {
pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
ncores, nr_cpu_ids);
ncores = nr_cpu_ids;
}
for (i = 0; i < ncores; i++)
set_cpu_possible(i, true);
set_smp_cross_call(gic_raise_softirq);
}
void __init platform_smp_prepare_cpus(unsigned int max_cpus)
{
}
| gpl-2.0 |
ghanashyamprabhu/linux | drivers/staging/rtl8712/recv_linux.c | 229 | 4652 | /******************************************************************************
* recv_linux.c
*
* Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
* Linux device driver for RTL8192SU
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* Modifications for inclusion into the Linux staging tree are
* Copyright(c) 2010 Larry Finger. All rights reserved.
*
* Contact information:
* WLAN FAE <wlanfae@realtek.com>.
* Larry Finger <Larry.Finger@lwfinger.net>
*
******************************************************************************/
#define _RECV_OSDEP_C_
#include <linux/usb.h>
#include "osdep_service.h"
#include "drv_types.h"
#include "wifi.h"
#include "recv_osdep.h"
#include "osdep_intf.h"
#include "ethernet.h"
#include <linux/if_arp.h>
#include "usb_ops.h"
/*init os related resource in struct recv_priv*/
/*alloc os related resource in union recv_frame*/
int r8712_os_recv_resource_alloc(struct _adapter *padapter,
union recv_frame *precvframe)
{
precvframe->u.hdr.pkt_newalloc = precvframe->u.hdr.pkt = NULL;
return _SUCCESS;
}
/*alloc os related resource in struct recv_buf*/
int r8712_os_recvbuf_resource_alloc(struct _adapter *padapter,
struct recv_buf *precvbuf)
{
int res = _SUCCESS;
precvbuf->irp_pending = false;
precvbuf->purb = usb_alloc_urb(0, GFP_KERNEL);
if (precvbuf->purb == NULL)
res = _FAIL;
precvbuf->pskb = NULL;
precvbuf->reuse = false;
precvbuf->pallocated_buf = NULL;
precvbuf->pbuf = NULL;
precvbuf->pdata = NULL;
precvbuf->phead = NULL;
precvbuf->ptail = NULL;
precvbuf->pend = NULL;
precvbuf->transfer_len = 0;
precvbuf->len = 0;
return res;
}
/*free os related resource in struct recv_buf*/
int r8712_os_recvbuf_resource_free(struct _adapter *padapter,
struct recv_buf *precvbuf)
{
if (precvbuf->pskb)
dev_kfree_skb_any(precvbuf->pskb);
if (precvbuf->purb) {
usb_kill_urb(precvbuf->purb);
usb_free_urb(precvbuf->purb);
}
return _SUCCESS;
}
void r8712_handle_tkip_mic_err(struct _adapter *padapter, u8 bgroup)
{
union iwreq_data wrqu;
struct iw_michaelmicfailure ev;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
memset(&ev, 0x00, sizeof(ev));
if (bgroup)
ev.flags |= IW_MICFAILURE_GROUP;
else
ev.flags |= IW_MICFAILURE_PAIRWISE;
ev.src_addr.sa_family = ARPHRD_ETHER;
ether_addr_copy(ev.src_addr.sa_data, &pmlmepriv->assoc_bssid[0]);
memset(&wrqu, 0x00, sizeof(wrqu));
wrqu.data.length = sizeof(ev);
wireless_send_event(padapter->pnetdev, IWEVMICHAELMICFAILURE, &wrqu,
(char *)&ev);
}
void r8712_recv_indicatepkt(struct _adapter *padapter,
union recv_frame *precv_frame)
{
struct recv_priv *precvpriv;
struct __queue *pfree_recv_queue;
_pkt *skb;
struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
precvpriv = &(padapter->recvpriv);
pfree_recv_queue = &(precvpriv->free_recv_queue);
skb = precv_frame->u.hdr.pkt;
if (skb == NULL)
goto _recv_indicatepkt_drop;
skb->data = precv_frame->u.hdr.rx_data;
skb->len = precv_frame->u.hdr.len;
skb_set_tail_pointer(skb, skb->len);
if ((pattrib->tcpchk_valid == 1) && (pattrib->tcp_chkrpt == 1))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb->ip_summed = CHECKSUM_NONE;
skb->dev = padapter->pnetdev;
skb->protocol = eth_type_trans(skb, padapter->pnetdev);
netif_rx(skb);
precv_frame->u.hdr.pkt = NULL; /* pointers to NULL before
* r8712_free_recvframe() */
r8712_free_recvframe(precv_frame, pfree_recv_queue);
return;
_recv_indicatepkt_drop:
/*enqueue back to free_recv_queue*/
if (precv_frame)
r8712_free_recvframe(precv_frame, pfree_recv_queue);
precvpriv->rx_drop++;
}
static void _r8712_reordering_ctrl_timeout_handler (unsigned long data)
{
struct recv_reorder_ctrl *preorder_ctrl =
(struct recv_reorder_ctrl *)data;
r8712_reordering_ctrl_timeout_handler(preorder_ctrl);
}
void r8712_init_recv_timer(struct recv_reorder_ctrl *preorder_ctrl)
{
setup_timer(&preorder_ctrl->reordering_ctrl_timer,
_r8712_reordering_ctrl_timeout_handler,
(unsigned long)preorder_ctrl);
}
| gpl-2.0 |
ailichao/kernel_motorola_msm8x26 | arch/parisc/kernel/process.c | 1509 | 10830 | /*
* PARISC Architecture-dependent parts of process handling
* based on the work for i386
*
* Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
* Copyright (C) 2000 Martin K Petersen <mkp at mkp.net>
* Copyright (C) 2000 John Marvin <jsm at parisc-linux.org>
* Copyright (C) 2000 David Huggins-Daines <dhd with pobox.org>
* Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
* Copyright (C) 2000 Philipp Rumpf <prumpf with tux.org>
* Copyright (C) 2000 David Kennedy <dkennedy with linuxcare.com>
* Copyright (C) 2000 Richard Hirst <rhirst with parisc-linux.org>
* Copyright (C) 2000 Grant Grundler <grundler with parisc-linux.org>
* Copyright (C) 2001 Alan Modra <amodra at parisc-linux.org>
* Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org>
* Copyright (C) 2001-2007 Helge Deller <deller at parisc-linux.org>
* Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <stdarg.h>
#include <linux/elf.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/personality.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/kallsyms.h>
#include <linux/uaccess.h>
#include <linux/rcupdate.h>
#include <asm/io.h>
#include <asm/asm-offsets.h>
#include <asm/pdc.h>
#include <asm/pdc_chassis.h>
#include <asm/pgalloc.h>
#include <asm/unwind.h>
#include <asm/sections.h>
/*
* The idle thread. There's no useful work to be
* done, so just try to conserve power and have a
* low exit latency (ie sit in a loop waiting for
* somebody to say that they'd like to reschedule)
*/
void cpu_idle(void)
{
set_thread_flag(TIF_POLLING_NRFLAG);
/* endless idle loop with no priority at all */
while (1) {
rcu_idle_enter();
while (!need_resched())
barrier();
rcu_idle_exit();
schedule_preempt_disabled();
check_pgt_cache();
}
}
#define COMMAND_GLOBAL F_EXTEND(0xfffe0030)
#define CMD_RESET 5 /* reset any module */
/*
** The Wright Brothers and Gecko systems have a H/W problem
** (Lasi...'nuf said) may cause a broadcast reset to lockup
** the system. An HVERSION dependent PDC call was developed
** to perform a "safe", platform specific broadcast reset instead
** of kludging up all the code.
**
** Older machines which do not implement PDC_BROADCAST_RESET will
** return (with an error) and the regular broadcast reset can be
** issued. Obviously, if the PDC does implement PDC_BROADCAST_RESET
** the PDC call will not return (the system will be reset).
*/
void machine_restart(char *cmd)
{
#ifdef FASTBOOT_SELFTEST_SUPPORT
/*
** If user has modified the Firmware Selftest Bitmap,
** run the tests specified in the bitmap after the
** system is rebooted w/PDC_DO_RESET.
**
** ftc_bitmap = 0x1AUL "Skip destructive memory tests"
**
** Using "directed resets" at each processor with the MEM_TOC
** vector cleared will also avoid running destructive
** memory self tests. (Not implemented yet)
*/
if (ftc_bitmap) {
pdc_do_firm_test_reset(ftc_bitmap);
}
#endif
/* set up a new led state on systems shipped with a LED State panel */
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN);
/* "Normal" system reset */
pdc_do_reset();
/* Nope...box should reset with just CMD_RESET now */
gsc_writel(CMD_RESET, COMMAND_GLOBAL);
/* Wait for RESET to lay us to rest. */
while (1) ;
}
void machine_halt(void)
{
/*
** The LED/ChassisCodes are updated by the led_halt()
** function, called by the reboot notifier chain.
*/
}
void (*chassis_power_off)(void);
/*
* This routine is called from sys_reboot to actually turn off the
* machine
*/
void machine_power_off(void)
{
/* If there is a registered power off handler, call it. */
if (chassis_power_off)
chassis_power_off();
/* Put the soft power button back under hardware control.
* If the user had already pressed the power button, the
* following call will immediately power off. */
pdc_soft_power_button(0);
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN);
/* It seems we have no way to power the system off via
* software. The user has to press the button himself. */
printk(KERN_EMERG "System shut down completed.\n"
"Please power this system off now.");
}
void (*pm_power_off)(void) = machine_power_off;
EXPORT_SYMBOL(pm_power_off);
/*
* Create a kernel thread
*/
extern pid_t __kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
{
/*
* FIXME: Once we are sure we don't need any debug here,
* kernel_thread can become a #define.
*/
return __kernel_thread(fn, arg, flags);
}
EXPORT_SYMBOL(kernel_thread);
/*
* Free current thread data structures etc..
*/
void exit_thread(void)
{
}
void flush_thread(void)
{
/* Only needs to handle fpu stuff or perf monitors.
** REVISIT: several arches implement a "lazy fpu state".
*/
}
void release_thread(struct task_struct *dead_task)
{
}
/*
* Fill in the FPU structure for a core dump.
*/
int dump_fpu (struct pt_regs * regs, elf_fpregset_t *r)
{
if (regs == NULL)
return 0;
memcpy(r, regs->fr, sizeof *r);
return 1;
}
int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r)
{
memcpy(r, tsk->thread.regs.fr, sizeof(*r));
return 1;
}
/* Note that "fork()" is implemented in terms of clone, with
parameters (SIGCHLD, regs->gr[30], regs). */
int
sys_clone(unsigned long clone_flags, unsigned long usp,
struct pt_regs *regs)
{
/* Arugments from userspace are:
r26 = Clone flags.
r25 = Child stack.
r24 = parent_tidptr.
r23 = Is the TLS storage descriptor
r22 = child_tidptr
However, these last 3 args are only examined
if the proper flags are set. */
int __user *parent_tidptr = (int __user *)regs->gr[24];
int __user *child_tidptr = (int __user *)regs->gr[22];
/* usp must be word aligned. This also prevents users from
* passing in the value 1 (which is the signal for a special
* return for a kernel thread) */
usp = ALIGN(usp, 4);
/* A zero value for usp means use the current stack */
if (usp == 0)
usp = regs->gr[30];
return do_fork(clone_flags, usp, regs, 0, parent_tidptr, child_tidptr);
}
int
sys_vfork(struct pt_regs *regs)
{
return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gr[30], regs, 0, NULL, NULL);
}
int
copy_thread(unsigned long clone_flags, unsigned long usp,
unsigned long unused, /* in ia64 this is "user_stack_size" */
struct task_struct * p, struct pt_regs * pregs)
{
struct pt_regs * cregs = &(p->thread.regs);
void *stack = task_stack_page(p);
/* We have to use void * instead of a function pointer, because
* function pointers aren't a pointer to the function on 64-bit.
* Make them const so the compiler knows they live in .text */
extern void * const ret_from_kernel_thread;
extern void * const child_return;
#ifdef CONFIG_HPUX
extern void * const hpux_child_return;
#endif
*cregs = *pregs;
/* Set the return value for the child. Note that this is not
actually restored by the syscall exit path, but we put it
here for consistency in case of signals. */
cregs->gr[28] = 0; /* child */
/*
* We need to differentiate between a user fork and a
* kernel fork. We can't use user_mode, because the
* the syscall path doesn't save iaoq. Right now
* We rely on the fact that kernel_thread passes
* in zero for usp.
*/
if (usp == 1) {
/* kernel thread */
cregs->ksp = (unsigned long)stack + THREAD_SZ_ALGN;
/* Must exit via ret_from_kernel_thread in order
* to call schedule_tail()
*/
cregs->kpc = (unsigned long) &ret_from_kernel_thread;
/*
* Copy function and argument to be called from
* ret_from_kernel_thread.
*/
#ifdef CONFIG_64BIT
cregs->gr[27] = pregs->gr[27];
#endif
cregs->gr[26] = pregs->gr[26];
cregs->gr[25] = pregs->gr[25];
} else {
/* user thread */
/*
* Note that the fork wrappers are responsible
* for setting gr[21].
*/
/* Use same stack depth as parent */
cregs->ksp = (unsigned long)stack
+ (pregs->gr[21] & (THREAD_SIZE - 1));
cregs->gr[30] = usp;
if (p->personality == PER_HPUX) {
#ifdef CONFIG_HPUX
cregs->kpc = (unsigned long) &hpux_child_return;
#else
BUG();
#endif
} else {
cregs->kpc = (unsigned long) &child_return;
}
/* Setup thread TLS area from the 4th parameter in clone */
if (clone_flags & CLONE_SETTLS)
cregs->cr27 = pregs->gr[23];
}
return 0;
}
unsigned long thread_saved_pc(struct task_struct *t)
{
return t->thread.regs.kpc;
}
/*
* sys_execve() executes a new program.
*/
asmlinkage int sys_execve(struct pt_regs *regs)
{
int error;
char *filename;
filename = getname((const char __user *) regs->gr[26]);
error = PTR_ERR(filename);
if (IS_ERR(filename))
goto out;
error = do_execve(filename,
(const char __user *const __user *) regs->gr[25],
(const char __user *const __user *) regs->gr[24],
regs);
putname(filename);
out:
return error;
}
extern int __execve(const char *filename,
const char *const argv[],
const char *const envp[], struct task_struct *task);
int kernel_execve(const char *filename,
const char *const argv[],
const char *const envp[])
{
return __execve(filename, argv, envp, current);
}
unsigned long
get_wchan(struct task_struct *p)
{
struct unwind_frame_info info;
unsigned long ip;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
/*
* These bracket the sleeping functions..
*/
unwind_frame_init_from_blocked_task(&info, p);
do {
if (unwind_once(&info) < 0)
return 0;
ip = info.ip;
if (!in_sched_functions(ip))
return ip;
} while (count++ < 16);
return 0;
}
#ifdef CONFIG_64BIT
void *dereference_function_descriptor(void *ptr)
{
Elf64_Fdesc *desc = ptr;
void *p;
if (!probe_kernel_address(&desc->addr, p))
ptr = p;
return ptr;
}
#endif
| gpl-2.0 |
JamesAng/lx-std | arch/sh/kernel/cpu/sh4a/clock-sh7343.c | 2789 | 9813 | /*
* arch/sh/kernel/cpu/sh4a/clock-sh7343.c
*
* SH7343 clock framework support
*
* Copyright (C) 2009 Magnus Damm
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/clkdev.h>
#include <asm/clock.h>
/* SH7343 registers */
#define FRQCR 0xa4150000
#define VCLKCR 0xa4150004
#define SCLKACR 0xa4150008
#define SCLKBCR 0xa415000c
#define PLLCR 0xa4150024
#define MSTPCR0 0xa4150030
#define MSTPCR1 0xa4150034
#define MSTPCR2 0xa4150038
#define DLLFRQ 0xa4150050
/* Fixed 32 KHz root clock for RTC and Power Management purposes */
static struct clk r_clk = {
.rate = 32768,
};
/*
* Default rate for the root input clock, reset this with clk_set_rate()
* from the platform code.
*/
struct clk extal_clk = {
.rate = 33333333,
};
/* The dll block multiplies the 32khz r_clk, may be used instead of extal */
static unsigned long dll_recalc(struct clk *clk)
{
unsigned long mult;
if (__raw_readl(PLLCR) & 0x1000)
mult = __raw_readl(DLLFRQ);
else
mult = 0;
return clk->parent->rate * mult;
}
static struct sh_clk_ops dll_clk_ops = {
.recalc = dll_recalc,
};
static struct clk dll_clk = {
.ops = &dll_clk_ops,
.parent = &r_clk,
.flags = CLK_ENABLE_ON_INIT,
};
static unsigned long pll_recalc(struct clk *clk)
{
unsigned long mult = 1;
if (__raw_readl(PLLCR) & 0x4000)
mult = (((__raw_readl(FRQCR) >> 24) & 0x1f) + 1);
return clk->parent->rate * mult;
}
static struct sh_clk_ops pll_clk_ops = {
.recalc = pll_recalc,
};
static struct clk pll_clk = {
.ops = &pll_clk_ops,
.flags = CLK_ENABLE_ON_INIT,
};
struct clk *main_clks[] = {
&r_clk,
&extal_clk,
&dll_clk,
&pll_clk,
};
static int multipliers[] = { 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
static int divisors[] = { 1, 3, 2, 5, 3, 4, 5, 6, 8, 10, 12, 16, 20 };
static struct clk_div_mult_table div4_div_mult_table = {
.divisors = divisors,
.nr_divisors = ARRAY_SIZE(divisors),
.multipliers = multipliers,
.nr_multipliers = ARRAY_SIZE(multipliers),
};
static struct clk_div4_table div4_table = {
.div_mult_table = &div4_div_mult_table,
};
enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_B3, DIV4_P,
DIV4_SIUA, DIV4_SIUB, DIV4_NR };
#define DIV4(_reg, _bit, _mask, _flags) \
SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags)
struct clk div4_clks[DIV4_NR] = {
[DIV4_I] = DIV4(FRQCR, 20, 0x1fff, CLK_ENABLE_ON_INIT),
[DIV4_U] = DIV4(FRQCR, 16, 0x1fff, CLK_ENABLE_ON_INIT),
[DIV4_SH] = DIV4(FRQCR, 12, 0x1fff, CLK_ENABLE_ON_INIT),
[DIV4_B] = DIV4(FRQCR, 8, 0x1fff, CLK_ENABLE_ON_INIT),
[DIV4_B3] = DIV4(FRQCR, 4, 0x1fff, CLK_ENABLE_ON_INIT),
[DIV4_P] = DIV4(FRQCR, 0, 0x1fff, 0),
[DIV4_SIUA] = DIV4(SCLKACR, 0, 0x1fff, 0),
[DIV4_SIUB] = DIV4(SCLKBCR, 0, 0x1fff, 0),
};
enum { DIV6_V, DIV6_NR };
struct clk div6_clks[DIV6_NR] = {
[DIV6_V] = SH_CLK_DIV6(&pll_clk, VCLKCR, 0),
};
#define MSTP(_parent, _reg, _bit, _flags) \
SH_CLK_MSTP32(_parent, _reg, _bit, _flags)
enum { MSTP031, MSTP030, MSTP029, MSTP028, MSTP026,
MSTP023, MSTP022, MSTP021, MSTP020, MSTP019, MSTP018, MSTP017, MSTP016,
MSTP015, MSTP014, MSTP013, MSTP012, MSTP011, MSTP010,
MSTP007, MSTP006, MSTP005, MSTP004, MSTP003, MSTP002, MSTP001,
MSTP109, MSTP108, MSTP100,
MSTP225, MSTP224, MSTP218, MSTP217, MSTP216,
MSTP214, MSTP213, MSTP212, MSTP211, MSTP208,
MSTP206, MSTP205, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
MSTP_NR };
static struct clk mstp_clks[MSTP_NR] = {
[MSTP031] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 31, CLK_ENABLE_ON_INIT),
[MSTP030] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 30, CLK_ENABLE_ON_INIT),
[MSTP029] = MSTP(&div4_clks[DIV4_I], MSTPCR0, 29, CLK_ENABLE_ON_INIT),
[MSTP028] = MSTP(&div4_clks[DIV4_U], MSTPCR0, 28, CLK_ENABLE_ON_INIT),
[MSTP026] = MSTP(&div4_clks[DIV4_B], MSTPCR0, 26, CLK_ENABLE_ON_INIT),
[MSTP023] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 23, 0),
[MSTP022] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 22, 0),
[MSTP021] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 21, 0),
[MSTP020] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 20, 0),
[MSTP019] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 19, 0),
[MSTP017] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 17, 0),
[MSTP015] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 15, 0),
[MSTP014] = MSTP(&r_clk, MSTPCR0, 14, 0),
[MSTP013] = MSTP(&r_clk, MSTPCR0, 13, 0),
[MSTP011] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 11, 0),
[MSTP010] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 10, 0),
[MSTP007] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 7, 0),
[MSTP006] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 6, 0),
[MSTP005] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 5, 0),
[MSTP004] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 4, 0),
[MSTP003] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 3, 0),
[MSTP002] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 2, 0),
[MSTP001] = MSTP(&div4_clks[DIV4_P], MSTPCR0, 1, 0),
[MSTP109] = MSTP(&div4_clks[DIV4_P], MSTPCR1, 9, 0),
[MSTP108] = MSTP(&div4_clks[DIV4_P], MSTPCR1, 8, 0),
[MSTP225] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 25, 0),
[MSTP224] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 24, 0),
[MSTP218] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 18, 0),
[MSTP217] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 17, 0),
[MSTP216] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 16, 0),
[MSTP214] = MSTP(&r_clk, MSTPCR2, 14, 0),
[MSTP213] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 13, 0),
[MSTP212] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 12, 0),
[MSTP211] = MSTP(&div4_clks[DIV4_P], MSTPCR2, 11, 0),
[MSTP208] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 8, 0),
[MSTP206] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 6, CLK_ENABLE_ON_INIT),
[MSTP205] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 5, 0),
[MSTP204] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 4, 0),
[MSTP203] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 3, 0),
[MSTP202] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 2, CLK_ENABLE_ON_INIT),
[MSTP201] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 1, CLK_ENABLE_ON_INIT),
[MSTP200] = MSTP(&div4_clks[DIV4_B], MSTPCR2, 0, 0),
};
static struct clk_lookup lookups[] = {
/* main clocks */
CLKDEV_CON_ID("rclk", &r_clk),
CLKDEV_CON_ID("extal", &extal_clk),
CLKDEV_CON_ID("dll_clk", &dll_clk),
CLKDEV_CON_ID("pll_clk", &pll_clk),
/* DIV4 clocks */
CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]),
CLKDEV_CON_ID("umem_clk", &div4_clks[DIV4_U]),
CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]),
CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]),
CLKDEV_CON_ID("b3_clk", &div4_clks[DIV4_B3]),
CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]),
CLKDEV_CON_ID("siua_clk", &div4_clks[DIV4_SIUA]),
CLKDEV_CON_ID("siub_clk", &div4_clks[DIV4_SIUB]),
/* DIV6 clocks */
CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]),
/* MSTP32 clocks */
CLKDEV_CON_ID("tlb0", &mstp_clks[MSTP031]),
CLKDEV_CON_ID("ic0", &mstp_clks[MSTP030]),
CLKDEV_CON_ID("oc0", &mstp_clks[MSTP029]),
CLKDEV_CON_ID("uram0", &mstp_clks[MSTP028]),
CLKDEV_CON_ID("xymem0", &mstp_clks[MSTP026]),
CLKDEV_CON_ID("intc3", &mstp_clks[MSTP023]),
CLKDEV_CON_ID("intc0", &mstp_clks[MSTP022]),
CLKDEV_CON_ID("dmac0", &mstp_clks[MSTP021]),
CLKDEV_CON_ID("sh0", &mstp_clks[MSTP020]),
CLKDEV_CON_ID("hudi0", &mstp_clks[MSTP019]),
CLKDEV_CON_ID("ubc0", &mstp_clks[MSTP017]),
CLKDEV_CON_ID("tmu_fck", &mstp_clks[MSTP015]),
CLKDEV_CON_ID("cmt_fck", &mstp_clks[MSTP014]),
CLKDEV_CON_ID("rwdt0", &mstp_clks[MSTP013]),
CLKDEV_CON_ID("mfi0", &mstp_clks[MSTP011]),
CLKDEV_CON_ID("flctl0", &mstp_clks[MSTP010]),
CLKDEV_ICK_ID("sci_fck", "sh-sci.0", &mstp_clks[MSTP007]),
CLKDEV_ICK_ID("sci_fck", "sh-sci.1", &mstp_clks[MSTP006]),
CLKDEV_ICK_ID("sci_fck", "sh-sci.2", &mstp_clks[MSTP005]),
CLKDEV_ICK_ID("sci_fck", "sh-sci.3", &mstp_clks[MSTP004]),
CLKDEV_CON_ID("sio0", &mstp_clks[MSTP003]),
CLKDEV_CON_ID("siof0", &mstp_clks[MSTP002]),
CLKDEV_CON_ID("siof1", &mstp_clks[MSTP001]),
CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP109]),
CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP108]),
CLKDEV_CON_ID("tpu0", &mstp_clks[MSTP225]),
CLKDEV_CON_ID("irda0", &mstp_clks[MSTP224]),
CLKDEV_CON_ID("sdhi0", &mstp_clks[MSTP218]),
CLKDEV_CON_ID("mmcif0", &mstp_clks[MSTP217]),
CLKDEV_CON_ID("sim0", &mstp_clks[MSTP216]),
CLKDEV_CON_ID("keysc0", &mstp_clks[MSTP214]),
CLKDEV_CON_ID("tsif0", &mstp_clks[MSTP213]),
CLKDEV_CON_ID("s3d40", &mstp_clks[MSTP212]),
CLKDEV_CON_ID("usbf0", &mstp_clks[MSTP211]),
CLKDEV_CON_ID("siu0", &mstp_clks[MSTP208]),
CLKDEV_CON_ID("jpu0", &mstp_clks[MSTP206]),
CLKDEV_CON_ID("vou0", &mstp_clks[MSTP205]),
CLKDEV_CON_ID("beu0", &mstp_clks[MSTP204]),
CLKDEV_CON_ID("ceu0", &mstp_clks[MSTP203]),
CLKDEV_CON_ID("veu0", &mstp_clks[MSTP202]),
CLKDEV_CON_ID("vpu0", &mstp_clks[MSTP201]),
CLKDEV_CON_ID("lcdc0", &mstp_clks[MSTP200]),
};
int __init arch_clk_init(void)
{
int k, ret = 0;
/* autodetect extal or dll configuration */
if (__raw_readl(PLLCR) & 0x1000)
pll_clk.parent = &dll_clk;
else
pll_clk.parent = &extal_clk;
for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++)
ret = clk_register(main_clks[k]);
clkdev_add_table(lookups, ARRAY_SIZE(lookups));
if (!ret)
ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table);
if (!ret)
ret = sh_clk_div6_register(div6_clks, DIV6_NR);
if (!ret)
ret = sh_clk_mstp_register(mstp_clks, MSTP_NR);
return ret;
}
| gpl-2.0 |
CyanogenMod/sony-kernel-u8500 | drivers/tty/serial/sa1100.c | 3045 | 23052 | /*
* Driver for SA11x0 serial ports
*
* Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
*
* Copyright (C) 2000 Deep Blue Solutions Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#if defined(CONFIG_SERIAL_SA1100_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#define SUPPORT_SYSRQ
#endif
#include <linux/module.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/sysrq.h>
#include <linux/platform_device.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
#include <linux/io.h>
#include <asm/irq.h>
#include <mach/hardware.h>
#include <asm/mach/serial_sa1100.h>
/* We've been assigned a range on the "Low-density serial ports" major */
#define SERIAL_SA1100_MAJOR 204
#define MINOR_START 5
#define NR_PORTS 3
#define SA1100_ISR_PASS_LIMIT 256
/*
* Convert from ignore_status_mask or read_status_mask to UTSR[01]
*/
#define SM_TO_UTSR0(x) ((x) & 0xff)
#define SM_TO_UTSR1(x) ((x) >> 8)
#define UTSR0_TO_SM(x) ((x))
#define UTSR1_TO_SM(x) ((x) << 8)
#define UART_GET_UTCR0(sport) __raw_readl((sport)->port.membase + UTCR0)
#define UART_GET_UTCR1(sport) __raw_readl((sport)->port.membase + UTCR1)
#define UART_GET_UTCR2(sport) __raw_readl((sport)->port.membase + UTCR2)
#define UART_GET_UTCR3(sport) __raw_readl((sport)->port.membase + UTCR3)
#define UART_GET_UTSR0(sport) __raw_readl((sport)->port.membase + UTSR0)
#define UART_GET_UTSR1(sport) __raw_readl((sport)->port.membase + UTSR1)
#define UART_GET_CHAR(sport) __raw_readl((sport)->port.membase + UTDR)
#define UART_PUT_UTCR0(sport,v) __raw_writel((v),(sport)->port.membase + UTCR0)
#define UART_PUT_UTCR1(sport,v) __raw_writel((v),(sport)->port.membase + UTCR1)
#define UART_PUT_UTCR2(sport,v) __raw_writel((v),(sport)->port.membase + UTCR2)
#define UART_PUT_UTCR3(sport,v) __raw_writel((v),(sport)->port.membase + UTCR3)
#define UART_PUT_UTSR0(sport,v) __raw_writel((v),(sport)->port.membase + UTSR0)
#define UART_PUT_UTSR1(sport,v) __raw_writel((v),(sport)->port.membase + UTSR1)
#define UART_PUT_CHAR(sport,v) __raw_writel((v),(sport)->port.membase + UTDR)
/*
* This is the size of our serial port register set.
*/
#define UART_PORT_SIZE 0x24
/*
* This determines how often we check the modem status signals
* for any change. They generally aren't connected to an IRQ
* so we have to poll them. We also check immediately before
* filling the TX fifo incase CTS has been dropped.
*/
#define MCTRL_TIMEOUT (250*HZ/1000)
struct sa1100_port {
struct uart_port port;
struct timer_list timer;
unsigned int old_status;
};
/*
* Handle any change of modem status signal since we were last called.
*/
static void sa1100_mctrl_check(struct sa1100_port *sport)
{
unsigned int status, changed;
status = sport->port.ops->get_mctrl(&sport->port);
changed = status ^ sport->old_status;
if (changed == 0)
return;
sport->old_status = status;
if (changed & TIOCM_RI)
sport->port.icount.rng++;
if (changed & TIOCM_DSR)
sport->port.icount.dsr++;
if (changed & TIOCM_CAR)
uart_handle_dcd_change(&sport->port, status & TIOCM_CAR);
if (changed & TIOCM_CTS)
uart_handle_cts_change(&sport->port, status & TIOCM_CTS);
wake_up_interruptible(&sport->port.state->port.delta_msr_wait);
}
/*
* This is our per-port timeout handler, for checking the
* modem status signals.
*/
static void sa1100_timeout(unsigned long data)
{
struct sa1100_port *sport = (struct sa1100_port *)data;
unsigned long flags;
if (sport->port.state) {
spin_lock_irqsave(&sport->port.lock, flags);
sa1100_mctrl_check(sport);
spin_unlock_irqrestore(&sport->port.lock, flags);
mod_timer(&sport->timer, jiffies + MCTRL_TIMEOUT);
}
}
/*
* interrupts disabled on entry
*/
static void sa1100_stop_tx(struct uart_port *port)
{
struct sa1100_port *sport = (struct sa1100_port *)port;
u32 utcr3;
utcr3 = UART_GET_UTCR3(sport);
UART_PUT_UTCR3(sport, utcr3 & ~UTCR3_TIE);
sport->port.read_status_mask &= ~UTSR0_TO_SM(UTSR0_TFS);
}
/*
* port locked and interrupts disabled
*/
static void sa1100_start_tx(struct uart_port *port)
{
struct sa1100_port *sport = (struct sa1100_port *)port;
u32 utcr3;
utcr3 = UART_GET_UTCR3(sport);
sport->port.read_status_mask |= UTSR0_TO_SM(UTSR0_TFS);
UART_PUT_UTCR3(sport, utcr3 | UTCR3_TIE);
}
/*
* Interrupts enabled
*/
static void sa1100_stop_rx(struct uart_port *port)
{
struct sa1100_port *sport = (struct sa1100_port *)port;
u32 utcr3;
utcr3 = UART_GET_UTCR3(sport);
UART_PUT_UTCR3(sport, utcr3 & ~UTCR3_RIE);
}
/*
* Set the modem control timer to fire immediately.
*/
static void sa1100_enable_ms(struct uart_port *port)
{
struct sa1100_port *sport = (struct sa1100_port *)port;
mod_timer(&sport->timer, jiffies);
}
static void
sa1100_rx_chars(struct sa1100_port *sport)
{
struct tty_struct *tty = sport->port.state->port.tty;
unsigned int status, ch, flg;
status = UTSR1_TO_SM(UART_GET_UTSR1(sport)) |
UTSR0_TO_SM(UART_GET_UTSR0(sport));
while (status & UTSR1_TO_SM(UTSR1_RNE)) {
ch = UART_GET_CHAR(sport);
sport->port.icount.rx++;
flg = TTY_NORMAL;
/*
* note that the error handling code is
* out of the main execution path
*/
if (status & UTSR1_TO_SM(UTSR1_PRE | UTSR1_FRE | UTSR1_ROR)) {
if (status & UTSR1_TO_SM(UTSR1_PRE))
sport->port.icount.parity++;
else if (status & UTSR1_TO_SM(UTSR1_FRE))
sport->port.icount.frame++;
if (status & UTSR1_TO_SM(UTSR1_ROR))
sport->port.icount.overrun++;
status &= sport->port.read_status_mask;
if (status & UTSR1_TO_SM(UTSR1_PRE))
flg = TTY_PARITY;
else if (status & UTSR1_TO_SM(UTSR1_FRE))
flg = TTY_FRAME;
#ifdef SUPPORT_SYSRQ
sport->port.sysrq = 0;
#endif
}
if (uart_handle_sysrq_char(&sport->port, ch))
goto ignore_char;
uart_insert_char(&sport->port, status, UTSR1_TO_SM(UTSR1_ROR), ch, flg);
ignore_char:
status = UTSR1_TO_SM(UART_GET_UTSR1(sport)) |
UTSR0_TO_SM(UART_GET_UTSR0(sport));
}
tty_flip_buffer_push(tty);
}
static void sa1100_tx_chars(struct sa1100_port *sport)
{
struct circ_buf *xmit = &sport->port.state->xmit;
if (sport->port.x_char) {
UART_PUT_CHAR(sport, sport->port.x_char);
sport->port.icount.tx++;
sport->port.x_char = 0;
return;
}
/*
* Check the modem control lines before
* transmitting anything.
*/
sa1100_mctrl_check(sport);
if (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)) {
sa1100_stop_tx(&sport->port);
return;
}
/*
* Tried using FIFO (not checking TNF) for fifo fill:
* still had the '4 bytes repeated' problem.
*/
while (UART_GET_UTSR1(sport) & UTSR1_TNF) {
UART_PUT_CHAR(sport, xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
sport->port.icount.tx++;
if (uart_circ_empty(xmit))
break;
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(&sport->port);
if (uart_circ_empty(xmit))
sa1100_stop_tx(&sport->port);
}
static irqreturn_t sa1100_int(int irq, void *dev_id)
{
struct sa1100_port *sport = dev_id;
unsigned int status, pass_counter = 0;
spin_lock(&sport->port.lock);
status = UART_GET_UTSR0(sport);
status &= SM_TO_UTSR0(sport->port.read_status_mask) | ~UTSR0_TFS;
do {
if (status & (UTSR0_RFS | UTSR0_RID)) {
/* Clear the receiver idle bit, if set */
if (status & UTSR0_RID)
UART_PUT_UTSR0(sport, UTSR0_RID);
sa1100_rx_chars(sport);
}
/* Clear the relevant break bits */
if (status & (UTSR0_RBB | UTSR0_REB))
UART_PUT_UTSR0(sport, status & (UTSR0_RBB | UTSR0_REB));
if (status & UTSR0_RBB)
sport->port.icount.brk++;
if (status & UTSR0_REB)
uart_handle_break(&sport->port);
if (status & UTSR0_TFS)
sa1100_tx_chars(sport);
if (pass_counter++ > SA1100_ISR_PASS_LIMIT)
break;
status = UART_GET_UTSR0(sport);
status &= SM_TO_UTSR0(sport->port.read_status_mask) |
~UTSR0_TFS;
} while (status & (UTSR0_TFS | UTSR0_RFS | UTSR0_RID));
spin_unlock(&sport->port.lock);
return IRQ_HANDLED;
}
/*
* Return TIOCSER_TEMT when transmitter is not busy.
*/
static unsigned int sa1100_tx_empty(struct uart_port *port)
{
struct sa1100_port *sport = (struct sa1100_port *)port;
return UART_GET_UTSR1(sport) & UTSR1_TBY ? 0 : TIOCSER_TEMT;
}
static unsigned int sa1100_get_mctrl(struct uart_port *port)
{
return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
}
static void sa1100_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
}
/*
* Interrupts always disabled.
*/
static void sa1100_break_ctl(struct uart_port *port, int break_state)
{
struct sa1100_port *sport = (struct sa1100_port *)port;
unsigned long flags;
unsigned int utcr3;
spin_lock_irqsave(&sport->port.lock, flags);
utcr3 = UART_GET_UTCR3(sport);
if (break_state == -1)
utcr3 |= UTCR3_BRK;
else
utcr3 &= ~UTCR3_BRK;
UART_PUT_UTCR3(sport, utcr3);
spin_unlock_irqrestore(&sport->port.lock, flags);
}
static int sa1100_startup(struct uart_port *port)
{
struct sa1100_port *sport = (struct sa1100_port *)port;
int retval;
/*
* Allocate the IRQ
*/
retval = request_irq(sport->port.irq, sa1100_int, 0,
"sa11x0-uart", sport);
if (retval)
return retval;
/*
* Finally, clear and enable interrupts
*/
UART_PUT_UTSR0(sport, -1);
UART_PUT_UTCR3(sport, UTCR3_RXE | UTCR3_TXE | UTCR3_RIE);
/*
* Enable modem status interrupts
*/
spin_lock_irq(&sport->port.lock);
sa1100_enable_ms(&sport->port);
spin_unlock_irq(&sport->port.lock);
return 0;
}
static void sa1100_shutdown(struct uart_port *port)
{
struct sa1100_port *sport = (struct sa1100_port *)port;
/*
* Stop our timer.
*/
del_timer_sync(&sport->timer);
/*
* Free the interrupt
*/
free_irq(sport->port.irq, sport);
/*
* Disable all interrupts, port and break condition.
*/
UART_PUT_UTCR3(sport, 0);
}
static void
sa1100_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
struct sa1100_port *sport = (struct sa1100_port *)port;
unsigned long flags;
unsigned int utcr0, old_utcr3, baud, quot;
unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8;
/*
* We only support CS7 and CS8.
*/
while ((termios->c_cflag & CSIZE) != CS7 &&
(termios->c_cflag & CSIZE) != CS8) {
termios->c_cflag &= ~CSIZE;
termios->c_cflag |= old_csize;
old_csize = CS8;
}
if ((termios->c_cflag & CSIZE) == CS8)
utcr0 = UTCR0_DSS;
else
utcr0 = 0;
if (termios->c_cflag & CSTOPB)
utcr0 |= UTCR0_SBS;
if (termios->c_cflag & PARENB) {
utcr0 |= UTCR0_PE;
if (!(termios->c_cflag & PARODD))
utcr0 |= UTCR0_OES;
}
/*
* Ask the core to calculate the divisor for us.
*/
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
quot = uart_get_divisor(port, baud);
spin_lock_irqsave(&sport->port.lock, flags);
sport->port.read_status_mask &= UTSR0_TO_SM(UTSR0_TFS);
sport->port.read_status_mask |= UTSR1_TO_SM(UTSR1_ROR);
if (termios->c_iflag & INPCK)
sport->port.read_status_mask |=
UTSR1_TO_SM(UTSR1_FRE | UTSR1_PRE);
if (termios->c_iflag & (BRKINT | PARMRK))
sport->port.read_status_mask |=
UTSR0_TO_SM(UTSR0_RBB | UTSR0_REB);
/*
* Characters to ignore
*/
sport->port.ignore_status_mask = 0;
if (termios->c_iflag & IGNPAR)
sport->port.ignore_status_mask |=
UTSR1_TO_SM(UTSR1_FRE | UTSR1_PRE);
if (termios->c_iflag & IGNBRK) {
sport->port.ignore_status_mask |=
UTSR0_TO_SM(UTSR0_RBB | UTSR0_REB);
/*
* If we're ignoring parity and break indicators,
* ignore overruns too (for real raw support).
*/
if (termios->c_iflag & IGNPAR)
sport->port.ignore_status_mask |=
UTSR1_TO_SM(UTSR1_ROR);
}
del_timer_sync(&sport->timer);
/*
* Update the per-port timeout.
*/
uart_update_timeout(port, termios->c_cflag, baud);
/*
* disable interrupts and drain transmitter
*/
old_utcr3 = UART_GET_UTCR3(sport);
UART_PUT_UTCR3(sport, old_utcr3 & ~(UTCR3_RIE | UTCR3_TIE));
while (UART_GET_UTSR1(sport) & UTSR1_TBY)
barrier();
/* then, disable everything */
UART_PUT_UTCR3(sport, 0);
/* set the parity, stop bits and data size */
UART_PUT_UTCR0(sport, utcr0);
/* set the baud rate */
quot -= 1;
UART_PUT_UTCR1(sport, ((quot & 0xf00) >> 8));
UART_PUT_UTCR2(sport, (quot & 0xff));
UART_PUT_UTSR0(sport, -1);
UART_PUT_UTCR3(sport, old_utcr3);
if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
sa1100_enable_ms(&sport->port);
spin_unlock_irqrestore(&sport->port.lock, flags);
}
static const char *sa1100_type(struct uart_port *port)
{
struct sa1100_port *sport = (struct sa1100_port *)port;
return sport->port.type == PORT_SA1100 ? "SA1100" : NULL;
}
/*
* Release the memory region(s) being used by 'port'.
*/
static void sa1100_release_port(struct uart_port *port)
{
struct sa1100_port *sport = (struct sa1100_port *)port;
release_mem_region(sport->port.mapbase, UART_PORT_SIZE);
}
/*
* Request the memory region(s) being used by 'port'.
*/
static int sa1100_request_port(struct uart_port *port)
{
struct sa1100_port *sport = (struct sa1100_port *)port;
return request_mem_region(sport->port.mapbase, UART_PORT_SIZE,
"sa11x0-uart") != NULL ? 0 : -EBUSY;
}
/*
* Configure/autoconfigure the port.
*/
static void sa1100_config_port(struct uart_port *port, int flags)
{
struct sa1100_port *sport = (struct sa1100_port *)port;
if (flags & UART_CONFIG_TYPE &&
sa1100_request_port(&sport->port) == 0)
sport->port.type = PORT_SA1100;
}
/*
* Verify the new serial_struct (for TIOCSSERIAL).
* The only change we allow are to the flags and type, and
* even then only between PORT_SA1100 and PORT_UNKNOWN
*/
static int
sa1100_verify_port(struct uart_port *port, struct serial_struct *ser)
{
struct sa1100_port *sport = (struct sa1100_port *)port;
int ret = 0;
if (ser->type != PORT_UNKNOWN && ser->type != PORT_SA1100)
ret = -EINVAL;
if (sport->port.irq != ser->irq)
ret = -EINVAL;
if (ser->io_type != SERIAL_IO_MEM)
ret = -EINVAL;
if (sport->port.uartclk / 16 != ser->baud_base)
ret = -EINVAL;
if ((void *)sport->port.mapbase != ser->iomem_base)
ret = -EINVAL;
if (sport->port.iobase != ser->port)
ret = -EINVAL;
if (ser->hub6 != 0)
ret = -EINVAL;
return ret;
}
static struct uart_ops sa1100_pops = {
.tx_empty = sa1100_tx_empty,
.set_mctrl = sa1100_set_mctrl,
.get_mctrl = sa1100_get_mctrl,
.stop_tx = sa1100_stop_tx,
.start_tx = sa1100_start_tx,
.stop_rx = sa1100_stop_rx,
.enable_ms = sa1100_enable_ms,
.break_ctl = sa1100_break_ctl,
.startup = sa1100_startup,
.shutdown = sa1100_shutdown,
.set_termios = sa1100_set_termios,
.type = sa1100_type,
.release_port = sa1100_release_port,
.request_port = sa1100_request_port,
.config_port = sa1100_config_port,
.verify_port = sa1100_verify_port,
};
static struct sa1100_port sa1100_ports[NR_PORTS];
/*
* Setup the SA1100 serial ports. Note that we don't include the IrDA
* port here since we have our own SIR/FIR driver (see drivers/net/irda)
*
* Note also that we support "console=ttySAx" where "x" is either 0 or 1.
* Which serial port this ends up being depends on the machine you're
* running this kernel on. I'm not convinced that this is a good idea,
* but that's the way it traditionally works.
*
* Note that NanoEngine UART3 becomes UART2, and UART2 is no longer
* used here.
*/
static void __init sa1100_init_ports(void)
{
static int first = 1;
int i;
if (!first)
return;
first = 0;
for (i = 0; i < NR_PORTS; i++) {
sa1100_ports[i].port.uartclk = 3686400;
sa1100_ports[i].port.ops = &sa1100_pops;
sa1100_ports[i].port.fifosize = 8;
sa1100_ports[i].port.line = i;
sa1100_ports[i].port.iotype = UPIO_MEM;
init_timer(&sa1100_ports[i].timer);
sa1100_ports[i].timer.function = sa1100_timeout;
sa1100_ports[i].timer.data = (unsigned long)&sa1100_ports[i];
}
/*
* make transmit lines outputs, so that when the port
* is closed, the output is in the MARK state.
*/
PPDR |= PPC_TXD1 | PPC_TXD3;
PPSR |= PPC_TXD1 | PPC_TXD3;
}
void __devinit sa1100_register_uart_fns(struct sa1100_port_fns *fns)
{
if (fns->get_mctrl)
sa1100_pops.get_mctrl = fns->get_mctrl;
if (fns->set_mctrl)
sa1100_pops.set_mctrl = fns->set_mctrl;
sa1100_pops.pm = fns->pm;
sa1100_pops.set_wake = fns->set_wake;
}
void __init sa1100_register_uart(int idx, int port)
{
if (idx >= NR_PORTS) {
printk(KERN_ERR "%s: bad index number %d\n", __func__, idx);
return;
}
switch (port) {
case 1:
sa1100_ports[idx].port.membase = (void __iomem *)&Ser1UTCR0;
sa1100_ports[idx].port.mapbase = _Ser1UTCR0;
sa1100_ports[idx].port.irq = IRQ_Ser1UART;
sa1100_ports[idx].port.flags = UPF_BOOT_AUTOCONF;
break;
case 2:
sa1100_ports[idx].port.membase = (void __iomem *)&Ser2UTCR0;
sa1100_ports[idx].port.mapbase = _Ser2UTCR0;
sa1100_ports[idx].port.irq = IRQ_Ser2ICP;
sa1100_ports[idx].port.flags = UPF_BOOT_AUTOCONF;
break;
case 3:
sa1100_ports[idx].port.membase = (void __iomem *)&Ser3UTCR0;
sa1100_ports[idx].port.mapbase = _Ser3UTCR0;
sa1100_ports[idx].port.irq = IRQ_Ser3UART;
sa1100_ports[idx].port.flags = UPF_BOOT_AUTOCONF;
break;
default:
printk(KERN_ERR "%s: bad port number %d\n", __func__, port);
}
}
#ifdef CONFIG_SERIAL_SA1100_CONSOLE
static void sa1100_console_putchar(struct uart_port *port, int ch)
{
struct sa1100_port *sport = (struct sa1100_port *)port;
while (!(UART_GET_UTSR1(sport) & UTSR1_TNF))
barrier();
UART_PUT_CHAR(sport, ch);
}
/*
* Interrupts are disabled on entering
*/
static void
sa1100_console_write(struct console *co, const char *s, unsigned int count)
{
struct sa1100_port *sport = &sa1100_ports[co->index];
unsigned int old_utcr3, status;
/*
* First, save UTCR3 and then disable interrupts
*/
old_utcr3 = UART_GET_UTCR3(sport);
UART_PUT_UTCR3(sport, (old_utcr3 & ~(UTCR3_RIE | UTCR3_TIE)) |
UTCR3_TXE);
uart_console_write(&sport->port, s, count, sa1100_console_putchar);
/*
* Finally, wait for transmitter to become empty
* and restore UTCR3
*/
do {
status = UART_GET_UTSR1(sport);
} while (status & UTSR1_TBY);
UART_PUT_UTCR3(sport, old_utcr3);
}
/*
* If the port was already initialised (eg, by a boot loader),
* try to determine the current setup.
*/
static void __init
sa1100_console_get_options(struct sa1100_port *sport, int *baud,
int *parity, int *bits)
{
unsigned int utcr3;
utcr3 = UART_GET_UTCR3(sport) & (UTCR3_RXE | UTCR3_TXE);
if (utcr3 == (UTCR3_RXE | UTCR3_TXE)) {
/* ok, the port was enabled */
unsigned int utcr0, quot;
utcr0 = UART_GET_UTCR0(sport);
*parity = 'n';
if (utcr0 & UTCR0_PE) {
if (utcr0 & UTCR0_OES)
*parity = 'e';
else
*parity = 'o';
}
if (utcr0 & UTCR0_DSS)
*bits = 8;
else
*bits = 7;
quot = UART_GET_UTCR2(sport) | UART_GET_UTCR1(sport) << 8;
quot &= 0xfff;
*baud = sport->port.uartclk / (16 * (quot + 1));
}
}
static int __init
sa1100_console_setup(struct console *co, char *options)
{
struct sa1100_port *sport;
int baud = 9600;
int bits = 8;
int parity = 'n';
int flow = 'n';
/*
* Check whether an invalid uart number has been specified, and
* if so, search for the first available port that does have
* console support.
*/
if (co->index == -1 || co->index >= NR_PORTS)
co->index = 0;
sport = &sa1100_ports[co->index];
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
else
sa1100_console_get_options(sport, &baud, &parity, &bits);
return uart_set_options(&sport->port, co, baud, parity, bits, flow);
}
static struct uart_driver sa1100_reg;
static struct console sa1100_console = {
.name = "ttySA",
.write = sa1100_console_write,
.device = uart_console_device,
.setup = sa1100_console_setup,
.flags = CON_PRINTBUFFER,
.index = -1,
.data = &sa1100_reg,
};
static int __init sa1100_rs_console_init(void)
{
sa1100_init_ports();
register_console(&sa1100_console);
return 0;
}
console_initcall(sa1100_rs_console_init);
#define SA1100_CONSOLE &sa1100_console
#else
#define SA1100_CONSOLE NULL
#endif
static struct uart_driver sa1100_reg = {
.owner = THIS_MODULE,
.driver_name = "ttySA",
.dev_name = "ttySA",
.major = SERIAL_SA1100_MAJOR,
.minor = MINOR_START,
.nr = NR_PORTS,
.cons = SA1100_CONSOLE,
};
static int sa1100_serial_suspend(struct platform_device *dev, pm_message_t state)
{
struct sa1100_port *sport = platform_get_drvdata(dev);
if (sport)
uart_suspend_port(&sa1100_reg, &sport->port);
return 0;
}
static int sa1100_serial_resume(struct platform_device *dev)
{
struct sa1100_port *sport = platform_get_drvdata(dev);
if (sport)
uart_resume_port(&sa1100_reg, &sport->port);
return 0;
}
static int sa1100_serial_probe(struct platform_device *dev)
{
struct resource *res = dev->resource;
int i;
for (i = 0; i < dev->num_resources; i++, res++)
if (res->flags & IORESOURCE_MEM)
break;
if (i < dev->num_resources) {
for (i = 0; i < NR_PORTS; i++) {
if (sa1100_ports[i].port.mapbase != res->start)
continue;
sa1100_ports[i].port.dev = &dev->dev;
uart_add_one_port(&sa1100_reg, &sa1100_ports[i].port);
platform_set_drvdata(dev, &sa1100_ports[i]);
break;
}
}
return 0;
}
static int sa1100_serial_remove(struct platform_device *pdev)
{
struct sa1100_port *sport = platform_get_drvdata(pdev);
platform_set_drvdata(pdev, NULL);
if (sport)
uart_remove_one_port(&sa1100_reg, &sport->port);
return 0;
}
static struct platform_driver sa11x0_serial_driver = {
.probe = sa1100_serial_probe,
.remove = sa1100_serial_remove,
.suspend = sa1100_serial_suspend,
.resume = sa1100_serial_resume,
.driver = {
.name = "sa11x0-uart",
.owner = THIS_MODULE,
},
};
static int __init sa1100_serial_init(void)
{
int ret;
printk(KERN_INFO "Serial: SA11x0 driver\n");
sa1100_init_ports();
ret = uart_register_driver(&sa1100_reg);
if (ret == 0) {
ret = platform_driver_register(&sa11x0_serial_driver);
if (ret)
uart_unregister_driver(&sa1100_reg);
}
return ret;
}
static void __exit sa1100_serial_exit(void)
{
platform_driver_unregister(&sa11x0_serial_driver);
uart_unregister_driver(&sa1100_reg);
}
module_init(sa1100_serial_init);
module_exit(sa1100_serial_exit);
MODULE_AUTHOR("Deep Blue Solutions Ltd");
MODULE_DESCRIPTION("SA1100 generic serial port driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV_MAJOR(SERIAL_SA1100_MAJOR);
MODULE_ALIAS("platform:sa11x0-uart");
| gpl-2.0 |
vuanhduy/odroidxu-3.4.y | drivers/media/video/mt9v011.c | 5093 | 16884 | /*
* mt9v011 -Micron 1/4-Inch VGA Digital Image Sensor
*
* Copyright (c) 2009 Mauro Carvalho Chehab (mchehab@redhat.com)
* This code is placed under the terms of the GNU General Public License v2
*/
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <asm/div64.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
#include <media/mt9v011.h>
MODULE_DESCRIPTION("Micron mt9v011 sensor driver");
MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
MODULE_LICENSE("GPL");
static int debug;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0-2)");
#define R00_MT9V011_CHIP_VERSION 0x00
#define R01_MT9V011_ROWSTART 0x01
#define R02_MT9V011_COLSTART 0x02
#define R03_MT9V011_HEIGHT 0x03
#define R04_MT9V011_WIDTH 0x04
#define R05_MT9V011_HBLANK 0x05
#define R06_MT9V011_VBLANK 0x06
#define R07_MT9V011_OUT_CTRL 0x07
#define R09_MT9V011_SHUTTER_WIDTH 0x09
#define R0A_MT9V011_CLK_SPEED 0x0a
#define R0B_MT9V011_RESTART 0x0b
#define R0C_MT9V011_SHUTTER_DELAY 0x0c
#define R0D_MT9V011_RESET 0x0d
#define R1E_MT9V011_DIGITAL_ZOOM 0x1e
#define R20_MT9V011_READ_MODE 0x20
#define R2B_MT9V011_GREEN_1_GAIN 0x2b
#define R2C_MT9V011_BLUE_GAIN 0x2c
#define R2D_MT9V011_RED_GAIN 0x2d
#define R2E_MT9V011_GREEN_2_GAIN 0x2e
#define R35_MT9V011_GLOBAL_GAIN 0x35
#define RF1_MT9V011_CHIP_ENABLE 0xf1
#define MT9V011_VERSION 0x8232
#define MT9V011_REV_B_VERSION 0x8243
/* supported controls */
static struct v4l2_queryctrl mt9v011_qctrl[] = {
{
.id = V4L2_CID_GAIN,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Gain",
.minimum = 0,
.maximum = (1 << 12) - 1 - 0x0020,
.step = 1,
.default_value = 0x0020,
.flags = 0,
}, {
.id = V4L2_CID_EXPOSURE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Exposure",
.minimum = 0,
.maximum = 2047,
.step = 1,
.default_value = 0x01fc,
.flags = 0,
}, {
.id = V4L2_CID_RED_BALANCE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Red Balance",
.minimum = -1 << 9,
.maximum = (1 << 9) - 1,
.step = 1,
.default_value = 0,
.flags = 0,
}, {
.id = V4L2_CID_BLUE_BALANCE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Blue Balance",
.minimum = -1 << 9,
.maximum = (1 << 9) - 1,
.step = 1,
.default_value = 0,
.flags = 0,
}, {
.id = V4L2_CID_HFLIP,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "Mirror",
.minimum = 0,
.maximum = 1,
.step = 1,
.default_value = 0,
.flags = 0,
}, {
.id = V4L2_CID_VFLIP,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "Vflip",
.minimum = 0,
.maximum = 1,
.step = 1,
.default_value = 0,
.flags = 0,
}, {
}
};
struct mt9v011 {
struct v4l2_subdev sd;
unsigned width, height;
unsigned xtal;
unsigned hflip:1;
unsigned vflip:1;
u16 global_gain, exposure;
s16 red_bal, blue_bal;
};
static inline struct mt9v011 *to_mt9v011(struct v4l2_subdev *sd)
{
return container_of(sd, struct mt9v011, sd);
}
static int mt9v011_read(struct v4l2_subdev *sd, unsigned char addr)
{
struct i2c_client *c = v4l2_get_subdevdata(sd);
__be16 buffer;
int rc, val;
rc = i2c_master_send(c, &addr, 1);
if (rc != 1)
v4l2_dbg(0, debug, sd,
"i2c i/o error: rc == %d (should be 1)\n", rc);
msleep(10);
rc = i2c_master_recv(c, (char *)&buffer, 2);
if (rc != 2)
v4l2_dbg(0, debug, sd,
"i2c i/o error: rc == %d (should be 2)\n", rc);
val = be16_to_cpu(buffer);
v4l2_dbg(2, debug, sd, "mt9v011: read 0x%02x = 0x%04x\n", addr, val);
return val;
}
static void mt9v011_write(struct v4l2_subdev *sd, unsigned char addr,
u16 value)
{
struct i2c_client *c = v4l2_get_subdevdata(sd);
unsigned char buffer[3];
int rc;
buffer[0] = addr;
buffer[1] = value >> 8;
buffer[2] = value & 0xff;
v4l2_dbg(2, debug, sd,
"mt9v011: writing 0x%02x 0x%04x\n", buffer[0], value);
rc = i2c_master_send(c, buffer, 3);
if (rc != 3)
v4l2_dbg(0, debug, sd,
"i2c i/o error: rc == %d (should be 3)\n", rc);
}
struct i2c_reg_value {
unsigned char reg;
u16 value;
};
/*
* Values used at the original driver
* Some values are marked as Reserved at the datasheet
*/
static const struct i2c_reg_value mt9v011_init_default[] = {
{ R0D_MT9V011_RESET, 0x0001 },
{ R0D_MT9V011_RESET, 0x0000 },
{ R0C_MT9V011_SHUTTER_DELAY, 0x0000 },
{ R09_MT9V011_SHUTTER_WIDTH, 0x1fc },
{ R0A_MT9V011_CLK_SPEED, 0x0000 },
{ R1E_MT9V011_DIGITAL_ZOOM, 0x0000 },
{ R07_MT9V011_OUT_CTRL, 0x0002 }, /* chip enable */
};
static u16 calc_mt9v011_gain(s16 lineargain)
{
u16 digitalgain = 0;
u16 analogmult = 0;
u16 analoginit = 0;
if (lineargain < 0)
lineargain = 0;
/* recommended minimum */
lineargain += 0x0020;
if (lineargain > 2047)
lineargain = 2047;
if (lineargain > 1023) {
digitalgain = 3;
analogmult = 3;
analoginit = lineargain / 16;
} else if (lineargain > 511) {
digitalgain = 1;
analogmult = 3;
analoginit = lineargain / 8;
} else if (lineargain > 255) {
analogmult = 3;
analoginit = lineargain / 4;
} else if (lineargain > 127) {
analogmult = 1;
analoginit = lineargain / 2;
} else
analoginit = lineargain;
return analoginit + (analogmult << 7) + (digitalgain << 9);
}
static void set_balance(struct v4l2_subdev *sd)
{
struct mt9v011 *core = to_mt9v011(sd);
u16 green_gain, blue_gain, red_gain;
u16 exposure;
s16 bal;
exposure = core->exposure;
green_gain = calc_mt9v011_gain(core->global_gain);
bal = core->global_gain;
bal += (core->blue_bal * core->global_gain / (1 << 7));
blue_gain = calc_mt9v011_gain(bal);
bal = core->global_gain;
bal += (core->red_bal * core->global_gain / (1 << 7));
red_gain = calc_mt9v011_gain(bal);
mt9v011_write(sd, R2B_MT9V011_GREEN_1_GAIN, green_gain);
mt9v011_write(sd, R2E_MT9V011_GREEN_2_GAIN, green_gain);
mt9v011_write(sd, R2C_MT9V011_BLUE_GAIN, blue_gain);
mt9v011_write(sd, R2D_MT9V011_RED_GAIN, red_gain);
mt9v011_write(sd, R09_MT9V011_SHUTTER_WIDTH, exposure);
}
static void calc_fps(struct v4l2_subdev *sd, u32 *numerator, u32 *denominator)
{
struct mt9v011 *core = to_mt9v011(sd);
unsigned height, width, hblank, vblank, speed;
unsigned row_time, t_time;
u64 frames_per_ms;
unsigned tmp;
height = mt9v011_read(sd, R03_MT9V011_HEIGHT);
width = mt9v011_read(sd, R04_MT9V011_WIDTH);
hblank = mt9v011_read(sd, R05_MT9V011_HBLANK);
vblank = mt9v011_read(sd, R06_MT9V011_VBLANK);
speed = mt9v011_read(sd, R0A_MT9V011_CLK_SPEED);
row_time = (width + 113 + hblank) * (speed + 2);
t_time = row_time * (height + vblank + 1);
frames_per_ms = core->xtal * 1000l;
do_div(frames_per_ms, t_time);
tmp = frames_per_ms;
v4l2_dbg(1, debug, sd, "Programmed to %u.%03u fps (%d pixel clcks)\n",
tmp / 1000, tmp % 1000, t_time);
if (numerator && denominator) {
*numerator = 1000;
*denominator = (u32)frames_per_ms;
}
}
static u16 calc_speed(struct v4l2_subdev *sd, u32 numerator, u32 denominator)
{
struct mt9v011 *core = to_mt9v011(sd);
unsigned height, width, hblank, vblank;
unsigned row_time, line_time;
u64 t_time, speed;
/* Avoid bogus calculus */
if (!numerator || !denominator)
return 0;
height = mt9v011_read(sd, R03_MT9V011_HEIGHT);
width = mt9v011_read(sd, R04_MT9V011_WIDTH);
hblank = mt9v011_read(sd, R05_MT9V011_HBLANK);
vblank = mt9v011_read(sd, R06_MT9V011_VBLANK);
row_time = width + 113 + hblank;
line_time = height + vblank + 1;
t_time = core->xtal * ((u64)numerator);
/* round to the closest value */
t_time += denominator / 2;
do_div(t_time, denominator);
speed = t_time;
do_div(speed, row_time * line_time);
/* Avoid having a negative value for speed */
if (speed < 2)
speed = 0;
else
speed -= 2;
/* Avoid speed overflow */
if (speed > 15)
return 15;
return (u16)speed;
}
static void set_res(struct v4l2_subdev *sd)
{
struct mt9v011 *core = to_mt9v011(sd);
unsigned vstart, hstart;
/*
* The mt9v011 doesn't have scaling. So, in order to select the desired
* resolution, we're cropping at the middle of the sensor.
* hblank and vblank should be adjusted, in order to warrant that
* we'll preserve the line timings for 30 fps, no matter what resolution
* is selected.
* NOTE: datasheet says that width (and height) should be filled with
* width-1. However, this doesn't work, since one pixel per line will
* be missing.
*/
hstart = 20 + (640 - core->width) / 2;
mt9v011_write(sd, R02_MT9V011_COLSTART, hstart);
mt9v011_write(sd, R04_MT9V011_WIDTH, core->width);
mt9v011_write(sd, R05_MT9V011_HBLANK, 771 - core->width);
vstart = 8 + (480 - core->height) / 2;
mt9v011_write(sd, R01_MT9V011_ROWSTART, vstart);
mt9v011_write(sd, R03_MT9V011_HEIGHT, core->height);
mt9v011_write(sd, R06_MT9V011_VBLANK, 508 - core->height);
calc_fps(sd, NULL, NULL);
};
static void set_read_mode(struct v4l2_subdev *sd)
{
struct mt9v011 *core = to_mt9v011(sd);
unsigned mode = 0x1000;
if (core->hflip)
mode |= 0x4000;
if (core->vflip)
mode |= 0x8000;
mt9v011_write(sd, R20_MT9V011_READ_MODE, mode);
}
static int mt9v011_reset(struct v4l2_subdev *sd, u32 val)
{
int i;
for (i = 0; i < ARRAY_SIZE(mt9v011_init_default); i++)
mt9v011_write(sd, mt9v011_init_default[i].reg,
mt9v011_init_default[i].value);
set_balance(sd);
set_res(sd);
set_read_mode(sd);
return 0;
};
static int mt9v011_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
struct mt9v011 *core = to_mt9v011(sd);
v4l2_dbg(1, debug, sd, "g_ctrl called\n");
switch (ctrl->id) {
case V4L2_CID_GAIN:
ctrl->value = core->global_gain;
return 0;
case V4L2_CID_EXPOSURE:
ctrl->value = core->exposure;
return 0;
case V4L2_CID_RED_BALANCE:
ctrl->value = core->red_bal;
return 0;
case V4L2_CID_BLUE_BALANCE:
ctrl->value = core->blue_bal;
return 0;
case V4L2_CID_HFLIP:
ctrl->value = core->hflip ? 1 : 0;
return 0;
case V4L2_CID_VFLIP:
ctrl->value = core->vflip ? 1 : 0;
return 0;
}
return -EINVAL;
}
static int mt9v011_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
{
int i;
v4l2_dbg(1, debug, sd, "queryctrl called\n");
for (i = 0; i < ARRAY_SIZE(mt9v011_qctrl); i++)
if (qc->id && qc->id == mt9v011_qctrl[i].id) {
memcpy(qc, &(mt9v011_qctrl[i]),
sizeof(*qc));
return 0;
}
return -EINVAL;
}
static int mt9v011_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
struct mt9v011 *core = to_mt9v011(sd);
u8 i, n;
n = ARRAY_SIZE(mt9v011_qctrl);
for (i = 0; i < n; i++) {
if (ctrl->id != mt9v011_qctrl[i].id)
continue;
if (ctrl->value < mt9v011_qctrl[i].minimum ||
ctrl->value > mt9v011_qctrl[i].maximum)
return -ERANGE;
v4l2_dbg(1, debug, sd, "s_ctrl: id=%d, value=%d\n",
ctrl->id, ctrl->value);
break;
}
switch (ctrl->id) {
case V4L2_CID_GAIN:
core->global_gain = ctrl->value;
break;
case V4L2_CID_EXPOSURE:
core->exposure = ctrl->value;
break;
case V4L2_CID_RED_BALANCE:
core->red_bal = ctrl->value;
break;
case V4L2_CID_BLUE_BALANCE:
core->blue_bal = ctrl->value;
break;
case V4L2_CID_HFLIP:
core->hflip = ctrl->value;
set_read_mode(sd);
return 0;
case V4L2_CID_VFLIP:
core->vflip = ctrl->value;
set_read_mode(sd);
return 0;
default:
return -EINVAL;
}
set_balance(sd);
return 0;
}
static int mt9v011_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned index,
enum v4l2_mbus_pixelcode *code)
{
if (index > 0)
return -EINVAL;
*code = V4L2_MBUS_FMT_SGRBG8_1X8;
return 0;
}
static int mt9v011_try_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *fmt)
{
if (fmt->code != V4L2_MBUS_FMT_SGRBG8_1X8)
return -EINVAL;
v4l_bound_align_image(&fmt->width, 48, 639, 1,
&fmt->height, 32, 480, 1, 0);
fmt->field = V4L2_FIELD_NONE;
fmt->colorspace = V4L2_COLORSPACE_SRGB;
return 0;
}
static int mt9v011_g_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *parms)
{
struct v4l2_captureparm *cp = &parms->parm.capture;
if (parms->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
memset(cp, 0, sizeof(struct v4l2_captureparm));
cp->capability = V4L2_CAP_TIMEPERFRAME;
calc_fps(sd,
&cp->timeperframe.numerator,
&cp->timeperframe.denominator);
return 0;
}
static int mt9v011_s_parm(struct v4l2_subdev *sd, struct v4l2_streamparm *parms)
{
struct v4l2_captureparm *cp = &parms->parm.capture;
struct v4l2_fract *tpf = &cp->timeperframe;
u16 speed;
if (parms->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
if (cp->extendedmode != 0)
return -EINVAL;
speed = calc_speed(sd, tpf->numerator, tpf->denominator);
mt9v011_write(sd, R0A_MT9V011_CLK_SPEED, speed);
v4l2_dbg(1, debug, sd, "Setting speed to %d\n", speed);
/* Recalculate and update fps info */
calc_fps(sd, &tpf->numerator, &tpf->denominator);
return 0;
}
static int mt9v011_s_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *fmt)
{
struct mt9v011 *core = to_mt9v011(sd);
int rc;
rc = mt9v011_try_mbus_fmt(sd, fmt);
if (rc < 0)
return -EINVAL;
core->width = fmt->width;
core->height = fmt->height;
set_res(sd);
return 0;
}
#ifdef CONFIG_VIDEO_ADV_DEBUG
static int mt9v011_g_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
if (!v4l2_chip_match_i2c_client(client, ®->match))
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
reg->val = mt9v011_read(sd, reg->reg & 0xff);
reg->size = 2;
return 0;
}
static int mt9v011_s_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
if (!v4l2_chip_match_i2c_client(client, ®->match))
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
mt9v011_write(sd, reg->reg & 0xff, reg->val & 0xffff);
return 0;
}
#endif
static int mt9v011_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *chip)
{
u16 version;
struct i2c_client *client = v4l2_get_subdevdata(sd);
version = mt9v011_read(sd, R00_MT9V011_CHIP_VERSION);
return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_MT9V011,
version);
}
static const struct v4l2_subdev_core_ops mt9v011_core_ops = {
.queryctrl = mt9v011_queryctrl,
.g_ctrl = mt9v011_g_ctrl,
.s_ctrl = mt9v011_s_ctrl,
.reset = mt9v011_reset,
.g_chip_ident = mt9v011_g_chip_ident,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = mt9v011_g_register,
.s_register = mt9v011_s_register,
#endif
};
static const struct v4l2_subdev_video_ops mt9v011_video_ops = {
.enum_mbus_fmt = mt9v011_enum_mbus_fmt,
.try_mbus_fmt = mt9v011_try_mbus_fmt,
.s_mbus_fmt = mt9v011_s_mbus_fmt,
.g_parm = mt9v011_g_parm,
.s_parm = mt9v011_s_parm,
};
static const struct v4l2_subdev_ops mt9v011_ops = {
.core = &mt9v011_core_ops,
.video = &mt9v011_video_ops,
};
/****************************************************************************
I2C Client & Driver
****************************************************************************/
static int mt9v011_probe(struct i2c_client *c,
const struct i2c_device_id *id)
{
u16 version;
struct mt9v011 *core;
struct v4l2_subdev *sd;
/* Check if the adapter supports the needed features */
if (!i2c_check_functionality(c->adapter,
I2C_FUNC_SMBUS_READ_BYTE | I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
return -EIO;
core = kzalloc(sizeof(struct mt9v011), GFP_KERNEL);
if (!core)
return -ENOMEM;
sd = &core->sd;
v4l2_i2c_subdev_init(sd, c, &mt9v011_ops);
/* Check if the sensor is really a MT9V011 */
version = mt9v011_read(sd, R00_MT9V011_CHIP_VERSION);
if ((version != MT9V011_VERSION) &&
(version != MT9V011_REV_B_VERSION)) {
v4l2_info(sd, "*** unknown micron chip detected (0x%04x).\n",
version);
kfree(core);
return -EINVAL;
}
core->global_gain = 0x0024;
core->exposure = 0x01fc;
core->width = 640;
core->height = 480;
core->xtal = 27000000; /* Hz */
if (c->dev.platform_data) {
struct mt9v011_platform_data *pdata = c->dev.platform_data;
core->xtal = pdata->xtal;
v4l2_dbg(1, debug, sd, "xtal set to %d.%03d MHz\n",
core->xtal / 1000000, (core->xtal / 1000) % 1000);
}
v4l_info(c, "chip found @ 0x%02x (%s - chip version 0x%04x)\n",
c->addr << 1, c->adapter->name, version);
return 0;
}
static int mt9v011_remove(struct i2c_client *c)
{
struct v4l2_subdev *sd = i2c_get_clientdata(c);
v4l2_dbg(1, debug, sd,
"mt9v011.c: removing mt9v011 adapter on address 0x%x\n",
c->addr << 1);
v4l2_device_unregister_subdev(sd);
kfree(to_mt9v011(sd));
return 0;
}
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id mt9v011_id[] = {
{ "mt9v011", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, mt9v011_id);
static struct i2c_driver mt9v011_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "mt9v011",
},
.probe = mt9v011_probe,
.remove = mt9v011_remove,
.id_table = mt9v011_id,
};
module_i2c_driver(mt9v011_driver);
| gpl-2.0 |
TheNameIsNigel/kernel_common | net/sched/sch_ingress.c | 10981 | 3274 | /* net/sched/sch_ingress.c - Ingress qdisc
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Authors: Jamal Hadi Salim 1999
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
struct ingress_qdisc_data {
struct tcf_proto *filter_list;
};
/* ------------------------- Class/flow operations ------------------------- */
static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg)
{
return NULL;
}
static unsigned long ingress_get(struct Qdisc *sch, u32 classid)
{
return TC_H_MIN(classid) + 1;
}
static unsigned long ingress_bind_filter(struct Qdisc *sch,
unsigned long parent, u32 classid)
{
return ingress_get(sch, classid);
}
static void ingress_put(struct Qdisc *sch, unsigned long cl)
{
}
static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker)
{
}
static struct tcf_proto **ingress_find_tcf(struct Qdisc *sch, unsigned long cl)
{
struct ingress_qdisc_data *p = qdisc_priv(sch);
return &p->filter_list;
}
/* --------------------------- Qdisc operations ---------------------------- */
static int ingress_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct ingress_qdisc_data *p = qdisc_priv(sch);
struct tcf_result res;
int result;
result = tc_classify(skb, p->filter_list, &res);
qdisc_bstats_update(sch, skb);
switch (result) {
case TC_ACT_SHOT:
result = TC_ACT_SHOT;
sch->qstats.drops++;
break;
case TC_ACT_STOLEN:
case TC_ACT_QUEUED:
result = TC_ACT_STOLEN;
break;
case TC_ACT_RECLASSIFY:
case TC_ACT_OK:
skb->tc_index = TC_H_MIN(res.classid);
default:
result = TC_ACT_OK;
break;
}
return result;
}
/* ------------------------------------------------------------- */
static void ingress_destroy(struct Qdisc *sch)
{
struct ingress_qdisc_data *p = qdisc_priv(sch);
tcf_destroy_chain(&p->filter_list);
}
static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct nlattr *nest;
nest = nla_nest_start(skb, TCA_OPTIONS);
if (nest == NULL)
goto nla_put_failure;
nla_nest_end(skb, nest);
return skb->len;
nla_put_failure:
nla_nest_cancel(skb, nest);
return -1;
}
static const struct Qdisc_class_ops ingress_class_ops = {
.leaf = ingress_leaf,
.get = ingress_get,
.put = ingress_put,
.walk = ingress_walk,
.tcf_chain = ingress_find_tcf,
.bind_tcf = ingress_bind_filter,
.unbind_tcf = ingress_put,
};
static struct Qdisc_ops ingress_qdisc_ops __read_mostly = {
.cl_ops = &ingress_class_ops,
.id = "ingress",
.priv_size = sizeof(struct ingress_qdisc_data),
.enqueue = ingress_enqueue,
.destroy = ingress_destroy,
.dump = ingress_dump,
.owner = THIS_MODULE,
};
static int __init ingress_module_init(void)
{
return register_qdisc(&ingress_qdisc_ops);
}
static void __exit ingress_module_exit(void)
{
unregister_qdisc(&ingress_qdisc_ops);
}
module_init(ingress_module_init)
module_exit(ingress_module_exit)
MODULE_LICENSE("GPL");
| gpl-2.0 |
Gulyuk/alcatel_move_kernel | net/bridge/netfilter/ebt_mark.c | 13797 | 2697 | /*
* ebt_mark
*
* Authors:
* Bart De Schuymer <bdschuym@pandora.be>
*
* July, 2002
*
*/
/* The mark target can be used in any chain,
* I believe adding a mangle table just for marking is total overkill.
* Marking a frame doesn't really change anything in the frame anyway.
*/
#include <linux/module.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_mark_t.h>
static unsigned int
ebt_mark_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ebt_mark_t_info *info = par->targinfo;
int action = info->target & -16;
if (action == MARK_SET_VALUE)
skb->mark = info->mark;
else if (action == MARK_OR_VALUE)
skb->mark |= info->mark;
else if (action == MARK_AND_VALUE)
skb->mark &= info->mark;
else
skb->mark ^= info->mark;
return info->target | ~EBT_VERDICT_BITS;
}
static int ebt_mark_tg_check(const struct xt_tgchk_param *par)
{
const struct ebt_mark_t_info *info = par->targinfo;
int tmp;
tmp = info->target | ~EBT_VERDICT_BITS;
if (BASE_CHAIN && tmp == EBT_RETURN)
return -EINVAL;
if (tmp < -NUM_STANDARD_TARGETS || tmp >= 0)
return -EINVAL;
tmp = info->target & ~EBT_VERDICT_BITS;
if (tmp != MARK_SET_VALUE && tmp != MARK_OR_VALUE &&
tmp != MARK_AND_VALUE && tmp != MARK_XOR_VALUE)
return -EINVAL;
return 0;
}
#ifdef CONFIG_COMPAT
struct compat_ebt_mark_t_info {
compat_ulong_t mark;
compat_uint_t target;
};
static void mark_tg_compat_from_user(void *dst, const void *src)
{
const struct compat_ebt_mark_t_info *user = src;
struct ebt_mark_t_info *kern = dst;
kern->mark = user->mark;
kern->target = user->target;
}
static int mark_tg_compat_to_user(void __user *dst, const void *src)
{
struct compat_ebt_mark_t_info __user *user = dst;
const struct ebt_mark_t_info *kern = src;
if (put_user(kern->mark, &user->mark) ||
put_user(kern->target, &user->target))
return -EFAULT;
return 0;
}
#endif
static struct xt_target ebt_mark_tg_reg __read_mostly = {
.name = "mark",
.revision = 0,
.family = NFPROTO_BRIDGE,
.target = ebt_mark_tg,
.checkentry = ebt_mark_tg_check,
.targetsize = sizeof(struct ebt_mark_t_info),
#ifdef CONFIG_COMPAT
.compatsize = sizeof(struct compat_ebt_mark_t_info),
.compat_from_user = mark_tg_compat_from_user,
.compat_to_user = mark_tg_compat_to_user,
#endif
.me = THIS_MODULE,
};
static int __init ebt_mark_init(void)
{
return xt_register_target(&ebt_mark_tg_reg);
}
static void __exit ebt_mark_fini(void)
{
xt_unregister_target(&ebt_mark_tg_reg);
}
module_init(ebt_mark_init);
module_exit(ebt_mark_fini);
MODULE_DESCRIPTION("Ebtables: Packet mark modification");
MODULE_LICENSE("GPL");
| gpl-2.0 |
baldo/freifunk-hamburg | target/linux/mcs814x/files-3.3/drivers/usb/host/ohci-mcs814x.c | 230 | 4866 | /*
* OHCI HCD (Host Controller Driver) for USB.
*
* (C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
* (C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
* (C) Copyright 2002 Hewlett-Packard Company
*
* Bus Glue for Moschip MCS814x.
*
* Written by Christopher Hoover <ch@hpl.hp.com>
* Based on fragments of previous driver by Russell King et al.
*
* Modified for LH7A404 from ohci-sa1111.c
* by Durgesh Pattamatta <pattamattad@sharpsec.com>
*
* Modified for pxa27x from ohci-lh7a404.c
* by Nick Bane <nick@cecomputing.co.uk> 26-8-2004
*
* Modified for mcs814x from ohci-mcs814x.c
* by Lennert Buytenhek <buytenh@wantstofly.org> 28-2-2006
* Based on an earlier driver by Ray Lehtiniemi
*
* This file is licenced under the GPL.
*/
#include <linux/device.h>
#include <linux/signal.h>
#include <linux/platform_device.h>
#include <linux/of.h>
static int usb_hcd_mcs814x_probe(const struct hc_driver *driver,
struct platform_device *pdev)
{
int retval;
struct usb_hcd *hcd;
if (pdev->resource[1].flags != IORESOURCE_IRQ) {
pr_debug("resource[1] is not IORESOURCE_IRQ");
return -ENOMEM;
}
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
hcd = usb_create_hcd(driver, &pdev->dev, "mcs814x");
if (hcd == NULL)
return -ENOMEM;
hcd->rsrc_start = pdev->resource[0].start;
hcd->rsrc_len = pdev->resource[0].end - pdev->resource[0].start + 1;
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
usb_put_hcd(hcd);
retval = -EBUSY;
goto err1;
}
hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
if (hcd->regs == NULL) {
pr_debug("ioremap failed");
retval = -ENOMEM;
goto err2;
}
ohci_hcd_init(hcd_to_ohci(hcd));
retval = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_DISABLED);
if (retval == 0)
return retval;
iounmap(hcd->regs);
err2:
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
err1:
usb_put_hcd(hcd);
return retval;
}
static void usb_hcd_mcs814x_remove(struct usb_hcd *hcd,
struct platform_device *pdev)
{
usb_remove_hcd(hcd);
iounmap(hcd->regs);
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
}
static int __devinit ohci_mcs814x_start(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
int ret;
ret = ohci_init(ohci);
if (ret < 0)
return ret;
ret = ohci_run(ohci);
if (ret < 0) {
ohci_err(ohci, "can't start %s", hcd->self.bus_name);
ohci_stop(hcd);
return ret;
}
return 0;
}
static struct hc_driver ohci_mcs814x_hc_driver = {
.description = hcd_name,
.product_desc = "MCS814X OHCI",
.hcd_priv_size = sizeof(struct ohci_hcd),
.irq = ohci_irq,
.flags = HCD_USB11 | HCD_MEMORY,
.start = ohci_mcs814x_start,
.stop = ohci_stop,
.shutdown = ohci_shutdown,
.urb_enqueue = ohci_urb_enqueue,
.urb_dequeue = ohci_urb_dequeue,
.endpoint_disable = ohci_endpoint_disable,
.get_frame_number = ohci_get_frame,
.hub_status_data = ohci_hub_status_data,
.hub_control = ohci_hub_control,
#ifdef CONFIG_PM
.bus_suspend = ohci_bus_suspend,
.bus_resume = ohci_bus_resume,
#endif
.start_port_reset = ohci_start_port_reset,
};
extern int usb_disabled(void);
static int ohci_hcd_mcs814x_drv_probe(struct platform_device *pdev)
{
int ret;
ret = -ENODEV;
if (!usb_disabled())
ret = usb_hcd_mcs814x_probe(&ohci_mcs814x_hc_driver, pdev);
return ret;
}
static int ohci_hcd_mcs814x_drv_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
usb_hcd_mcs814x_remove(hcd, pdev);
return 0;
}
#ifdef CONFIG_PM
static int ohci_hcd_mcs814x_drv_suspend(struct platform_device *pdev, pm_message_t state)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
if (time_before(jiffies, ohci->next_statechange))
msleep(5);
ohci->next_statechange = jiffies;
hcd->state = HC_STATE_SUSPENDED;
return 0;
}
static int ohci_hcd_mcs814x_drv_resume(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
int status;
if (time_before(jiffies, ohci->next_statechange))
msleep(5);
ohci->next_statechange = jiffies;
ohci_finish_controller_resume(hcd);
return 0;
}
#endif
static const struct of_device_id mcs814x_ohci_id[] = {
{ .compatible = "moschip,mcs814x-ohci" },
{ .compatible = "ohci-le" },
{ /* sentinel */ },
};
static struct platform_driver ohci_hcd_mcs814x_driver = {
.probe = ohci_hcd_mcs814x_drv_probe,
.remove = ohci_hcd_mcs814x_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
#ifdef CONFIG_PM
.suspend = ohci_hcd_mcs814x_drv_suspend,
.resume = ohci_hcd_mcs814x_drv_resume,
#endif
.driver = {
.name = "mcs814x-ohci",
.owner = THIS_MODULE,
.of_match_table = mcs814x_ohci_id,
},
};
MODULE_ALIAS("platform:mcs814x-ohci");
| gpl-2.0 |
broadcomCM/android_kernel_samsung_bcm21553-common | arch/ia64/kernel/crash_dump.c | 1766 | 1608 | /*
* kernel/crash_dump.c - Memory preserving reboot related code.
*
* Created by: Simon Horman <horms@verge.net.au>
* Original code moved from kernel/crash.c
* Original code comment copied from the i386 version of this file
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/crash_dump.h>
#include <asm/page.h>
#include <asm/uaccess.h>
/* Stores the physical address of elf header of crash image. */
unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
/**
* copy_oldmem_page - copy one page from "oldmem"
* @pfn: page frame number to be copied
* @buf: target memory address for the copy; this can be in kernel address
* space or user address space (see @userbuf)
* @csize: number of bytes to copy
* @offset: offset in bytes into the page (based on pfn) to begin the copy
* @userbuf: if set, @buf is in user address space, use copy_to_user(),
* otherwise @buf is in kernel address space, use memcpy().
*
* Copy a page from "oldmem". For this page, there is no pte mapped
* in the current kernel. We stitch up a pte, similar to kmap_atomic.
*
* Calling copy_to_user() in atomic context is not desirable. Hence first
* copying the data to a pre-allocated kernel page and then copying to user
* space in non-atomic context.
*/
ssize_t
copy_oldmem_page(unsigned long pfn, char *buf,
size_t csize, unsigned long offset, int userbuf)
{
void *vaddr;
if (!csize)
return 0;
vaddr = __va(pfn<<PAGE_SHIFT);
if (userbuf) {
if (copy_to_user(buf, (vaddr + offset), csize)) {
return -EFAULT;
}
} else
memcpy(buf, (vaddr + offset), csize);
return csize;
}
| gpl-2.0 |
AndyLavr/Aspire-SW5-012_Kernel_4.8 | drivers/hwmon/asc7621.c | 1766 | 35691 | /*
* asc7621.c - Part of lm_sensors, Linux kernel modules for hardware monitoring
* Copyright (c) 2007, 2010 George Joseph <george.joseph@fairview5.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
/* Addresses to scan */
static const unsigned short normal_i2c[] = {
0x2c, 0x2d, 0x2e, I2C_CLIENT_END
};
enum asc7621_type {
asc7621,
asc7621a
};
#define INTERVAL_HIGH (HZ + HZ / 2)
#define INTERVAL_LOW (1 * 60 * HZ)
#define PRI_NONE 0
#define PRI_LOW 1
#define PRI_HIGH 2
#define FIRST_CHIP asc7621
#define LAST_CHIP asc7621a
struct asc7621_chip {
char *name;
enum asc7621_type chip_type;
u8 company_reg;
u8 company_id;
u8 verstep_reg;
u8 verstep_id;
const unsigned short *addresses;
};
static struct asc7621_chip asc7621_chips[] = {
{
.name = "asc7621",
.chip_type = asc7621,
.company_reg = 0x3e,
.company_id = 0x61,
.verstep_reg = 0x3f,
.verstep_id = 0x6c,
.addresses = normal_i2c,
},
{
.name = "asc7621a",
.chip_type = asc7621a,
.company_reg = 0x3e,
.company_id = 0x61,
.verstep_reg = 0x3f,
.verstep_id = 0x6d,
.addresses = normal_i2c,
},
};
/*
* Defines the highest register to be used, not the count.
* The actual count will probably be smaller because of gaps
* in the implementation (unused register locations).
* This define will safely set the array size of both the parameter
* and data arrays.
* This comes from the data sheet register description table.
*/
#define LAST_REGISTER 0xff
struct asc7621_data {
struct i2c_client client;
struct device *class_dev;
struct mutex update_lock;
int valid; /* !=0 if following fields are valid */
unsigned long last_high_reading; /* In jiffies */
unsigned long last_low_reading; /* In jiffies */
/*
* Registers we care about occupy the corresponding index
* in the array. Registers we don't care about are left
* at 0.
*/
u8 reg[LAST_REGISTER + 1];
};
/*
* Macro to get the parent asc7621_param structure
* from a sensor_device_attribute passed into the
* show/store functions.
*/
#define to_asc7621_param(_sda) \
container_of(_sda, struct asc7621_param, sda)
/*
* Each parameter to be retrieved needs an asc7621_param structure
* allocated. It contains the sensor_device_attribute structure
* and the control info needed to retrieve the value from the register map.
*/
struct asc7621_param {
struct sensor_device_attribute sda;
u8 priority;
u8 msb[3];
u8 lsb[3];
u8 mask[3];
u8 shift[3];
};
/*
* This is the map that ultimately indicates whether we'll be
* retrieving a register value or not, and at what frequency.
*/
static u8 asc7621_register_priorities[255];
static struct asc7621_data *asc7621_update_device(struct device *dev);
static inline u8 read_byte(struct i2c_client *client, u8 reg)
{
int res = i2c_smbus_read_byte_data(client, reg);
if (res < 0) {
dev_err(&client->dev,
"Unable to read from register 0x%02x.\n", reg);
return 0;
}
return res & 0xff;
}
static inline int write_byte(struct i2c_client *client, u8 reg, u8 data)
{
int res = i2c_smbus_write_byte_data(client, reg, data);
if (res < 0) {
dev_err(&client->dev,
"Unable to write value 0x%02x to register 0x%02x.\n",
data, reg);
}
return res;
}
/*
* Data Handlers
* Each function handles the formatting, storage
* and retrieval of like parameters.
*/
#define SETUP_SHOW_DATA_PARAM(d, a) \
struct sensor_device_attribute *sda = to_sensor_dev_attr(a); \
struct asc7621_data *data = asc7621_update_device(d); \
struct asc7621_param *param = to_asc7621_param(sda)
#define SETUP_STORE_DATA_PARAM(d, a) \
struct sensor_device_attribute *sda = to_sensor_dev_attr(a); \
struct i2c_client *client = to_i2c_client(d); \
struct asc7621_data *data = i2c_get_clientdata(client); \
struct asc7621_param *param = to_asc7621_param(sda)
/*
* u8 is just what it sounds like...an unsigned byte with no
* special formatting.
*/
static ssize_t show_u8(struct device *dev, struct device_attribute *attr,
char *buf)
{
SETUP_SHOW_DATA_PARAM(dev, attr);
return sprintf(buf, "%u\n", data->reg[param->msb[0]]);
}
static ssize_t store_u8(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
SETUP_STORE_DATA_PARAM(dev, attr);
long reqval;
if (kstrtol(buf, 10, &reqval))
return -EINVAL;
reqval = clamp_val(reqval, 0, 255);
mutex_lock(&data->update_lock);
data->reg[param->msb[0]] = reqval;
write_byte(client, param->msb[0], reqval);
mutex_unlock(&data->update_lock);
return count;
}
/*
* Many of the config values occupy only a few bits of a register.
*/
static ssize_t show_bitmask(struct device *dev,
struct device_attribute *attr, char *buf)
{
SETUP_SHOW_DATA_PARAM(dev, attr);
return sprintf(buf, "%u\n",
(data->reg[param->msb[0]] >> param->
shift[0]) & param->mask[0]);
}
static ssize_t store_bitmask(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
SETUP_STORE_DATA_PARAM(dev, attr);
long reqval;
u8 currval;
if (kstrtol(buf, 10, &reqval))
return -EINVAL;
reqval = clamp_val(reqval, 0, param->mask[0]);
reqval = (reqval & param->mask[0]) << param->shift[0];
mutex_lock(&data->update_lock);
currval = read_byte(client, param->msb[0]);
reqval |= (currval & ~(param->mask[0] << param->shift[0]));
data->reg[param->msb[0]] = reqval;
write_byte(client, param->msb[0], reqval);
mutex_unlock(&data->update_lock);
return count;
}
/*
* 16 bit fan rpm values
* reported by the device as the number of 11.111us periods (90khz)
* between full fan rotations. Therefore...
* RPM = (90000 * 60) / register value
*/
static ssize_t show_fan16(struct device *dev,
struct device_attribute *attr, char *buf)
{
SETUP_SHOW_DATA_PARAM(dev, attr);
u16 regval;
mutex_lock(&data->update_lock);
regval = (data->reg[param->msb[0]] << 8) | data->reg[param->lsb[0]];
mutex_unlock(&data->update_lock);
return sprintf(buf, "%u\n",
(regval == 0 ? -1 : (regval) ==
0xffff ? 0 : 5400000 / regval));
}
static ssize_t store_fan16(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
SETUP_STORE_DATA_PARAM(dev, attr);
long reqval;
if (kstrtol(buf, 10, &reqval))
return -EINVAL;
/*
* If a minimum RPM of zero is requested, then we set the register to
* 0xffff. This value allows the fan to be stopped completely without
* generating an alarm.
*/
reqval =
(reqval <= 0 ? 0xffff : clamp_val(5400000 / reqval, 0, 0xfffe));
mutex_lock(&data->update_lock);
data->reg[param->msb[0]] = (reqval >> 8) & 0xff;
data->reg[param->lsb[0]] = reqval & 0xff;
write_byte(client, param->msb[0], data->reg[param->msb[0]]);
write_byte(client, param->lsb[0], data->reg[param->lsb[0]]);
mutex_unlock(&data->update_lock);
return count;
}
/*
* Voltages are scaled in the device so that the nominal voltage
* is 3/4ths of the 0-255 range (i.e. 192).
* If all voltages are 'normal' then all voltage registers will
* read 0xC0.
*
* The data sheet provides us with the 3/4 scale value for each voltage
* which is stored in in_scaling. The sda->index parameter value provides
* the index into in_scaling.
*
* NOTE: The chip expects the first 2 inputs be 2.5 and 2.25 volts
* respectively. That doesn't mean that's what the motherboard provides. :)
*/
static const int asc7621_in_scaling[] = {
2500, 2250, 3300, 5000, 12000
};
static ssize_t show_in10(struct device *dev, struct device_attribute *attr,
char *buf)
{
SETUP_SHOW_DATA_PARAM(dev, attr);
u16 regval;
u8 nr = sda->index;
mutex_lock(&data->update_lock);
regval = (data->reg[param->msb[0]] << 8) | (data->reg[param->lsb[0]]);
mutex_unlock(&data->update_lock);
/* The LSB value is a 2-bit scaling of the MSB's LSbit value. */
regval = (regval >> 6) * asc7621_in_scaling[nr] / (0xc0 << 2);
return sprintf(buf, "%u\n", regval);
}
/* 8 bit voltage values (the mins and maxs) */
static ssize_t show_in8(struct device *dev, struct device_attribute *attr,
char *buf)
{
SETUP_SHOW_DATA_PARAM(dev, attr);
u8 nr = sda->index;
return sprintf(buf, "%u\n",
((data->reg[param->msb[0]] *
asc7621_in_scaling[nr]) / 0xc0));
}
static ssize_t store_in8(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
SETUP_STORE_DATA_PARAM(dev, attr);
long reqval;
u8 nr = sda->index;
if (kstrtol(buf, 10, &reqval))
return -EINVAL;
reqval = clamp_val(reqval, 0, 0xffff);
reqval = reqval * 0xc0 / asc7621_in_scaling[nr];
reqval = clamp_val(reqval, 0, 0xff);
mutex_lock(&data->update_lock);
data->reg[param->msb[0]] = reqval;
write_byte(client, param->msb[0], reqval);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t show_temp8(struct device *dev,
struct device_attribute *attr, char *buf)
{
SETUP_SHOW_DATA_PARAM(dev, attr);
return sprintf(buf, "%d\n", ((s8) data->reg[param->msb[0]]) * 1000);
}
static ssize_t store_temp8(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
SETUP_STORE_DATA_PARAM(dev, attr);
long reqval;
s8 temp;
if (kstrtol(buf, 10, &reqval))
return -EINVAL;
reqval = clamp_val(reqval, -127000, 127000);
temp = reqval / 1000;
mutex_lock(&data->update_lock);
data->reg[param->msb[0]] = temp;
write_byte(client, param->msb[0], temp);
mutex_unlock(&data->update_lock);
return count;
}
/*
* Temperatures that occupy 2 bytes always have the whole
* number of degrees in the MSB with some part of the LSB
* indicating fractional degrees.
*/
/* mmmmmmmm.llxxxxxx */
static ssize_t show_temp10(struct device *dev,
struct device_attribute *attr, char *buf)
{
SETUP_SHOW_DATA_PARAM(dev, attr);
u8 msb, lsb;
int temp;
mutex_lock(&data->update_lock);
msb = data->reg[param->msb[0]];
lsb = (data->reg[param->lsb[0]] >> 6) & 0x03;
temp = (((s8) msb) * 1000) + (lsb * 250);
mutex_unlock(&data->update_lock);
return sprintf(buf, "%d\n", temp);
}
/* mmmmmm.ll */
static ssize_t show_temp62(struct device *dev,
struct device_attribute *attr, char *buf)
{
SETUP_SHOW_DATA_PARAM(dev, attr);
u8 regval = data->reg[param->msb[0]];
int temp = ((s8) (regval & 0xfc) * 1000) + ((regval & 0x03) * 250);
return sprintf(buf, "%d\n", temp);
}
static ssize_t store_temp62(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
SETUP_STORE_DATA_PARAM(dev, attr);
long reqval, i, f;
s8 temp;
if (kstrtol(buf, 10, &reqval))
return -EINVAL;
reqval = clamp_val(reqval, -32000, 31750);
i = reqval / 1000;
f = reqval - (i * 1000);
temp = i << 2;
temp |= f / 250;
mutex_lock(&data->update_lock);
data->reg[param->msb[0]] = temp;
write_byte(client, param->msb[0], temp);
mutex_unlock(&data->update_lock);
return count;
}
/*
* The aSC7621 doesn't provide an "auto_point2". Instead, you
* specify the auto_point1 and a range. To keep with the sysfs
* hwmon specs, we synthesize the auto_point_2 from them.
*/
static const u32 asc7621_range_map[] = {
2000, 2500, 3330, 4000, 5000, 6670, 8000, 10000,
13330, 16000, 20000, 26670, 32000, 40000, 53330, 80000,
};
static ssize_t show_ap2_temp(struct device *dev,
struct device_attribute *attr, char *buf)
{
SETUP_SHOW_DATA_PARAM(dev, attr);
long auto_point1;
u8 regval;
int temp;
mutex_lock(&data->update_lock);
auto_point1 = ((s8) data->reg[param->msb[1]]) * 1000;
regval =
((data->reg[param->msb[0]] >> param->shift[0]) & param->mask[0]);
temp = auto_point1 + asc7621_range_map[clamp_val(regval, 0, 15)];
mutex_unlock(&data->update_lock);
return sprintf(buf, "%d\n", temp);
}
static ssize_t store_ap2_temp(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
SETUP_STORE_DATA_PARAM(dev, attr);
long reqval, auto_point1;
int i;
u8 currval, newval = 0;
if (kstrtol(buf, 10, &reqval))
return -EINVAL;
mutex_lock(&data->update_lock);
auto_point1 = data->reg[param->msb[1]] * 1000;
reqval = clamp_val(reqval, auto_point1 + 2000, auto_point1 + 80000);
for (i = ARRAY_SIZE(asc7621_range_map) - 1; i >= 0; i--) {
if (reqval >= auto_point1 + asc7621_range_map[i]) {
newval = i;
break;
}
}
newval = (newval & param->mask[0]) << param->shift[0];
currval = read_byte(client, param->msb[0]);
newval |= (currval & ~(param->mask[0] << param->shift[0]));
data->reg[param->msb[0]] = newval;
write_byte(client, param->msb[0], newval);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t show_pwm_ac(struct device *dev,
struct device_attribute *attr, char *buf)
{
SETUP_SHOW_DATA_PARAM(dev, attr);
u8 config, altbit, regval;
const u8 map[] = {
0x01, 0x02, 0x04, 0x1f, 0x00, 0x06, 0x07, 0x10,
0x08, 0x0f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f
};
mutex_lock(&data->update_lock);
config = (data->reg[param->msb[0]] >> param->shift[0]) & param->mask[0];
altbit = (data->reg[param->msb[1]] >> param->shift[1]) & param->mask[1];
regval = config | (altbit << 3);
mutex_unlock(&data->update_lock);
return sprintf(buf, "%u\n", map[clamp_val(regval, 0, 15)]);
}
static ssize_t store_pwm_ac(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
SETUP_STORE_DATA_PARAM(dev, attr);
unsigned long reqval;
u8 currval, config, altbit, newval;
const u16 map[] = {
0x04, 0x00, 0x01, 0xff, 0x02, 0xff, 0x05, 0x06,
0x08, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0f,
0x07, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03,
};
if (kstrtoul(buf, 10, &reqval))
return -EINVAL;
if (reqval > 31)
return -EINVAL;
reqval = map[reqval];
if (reqval == 0xff)
return -EINVAL;
config = reqval & 0x07;
altbit = (reqval >> 3) & 0x01;
config = (config & param->mask[0]) << param->shift[0];
altbit = (altbit & param->mask[1]) << param->shift[1];
mutex_lock(&data->update_lock);
currval = read_byte(client, param->msb[0]);
newval = config | (currval & ~(param->mask[0] << param->shift[0]));
newval = altbit | (newval & ~(param->mask[1] << param->shift[1]));
data->reg[param->msb[0]] = newval;
write_byte(client, param->msb[0], newval);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t show_pwm_enable(struct device *dev,
struct device_attribute *attr, char *buf)
{
SETUP_SHOW_DATA_PARAM(dev, attr);
u8 config, altbit, minoff, val, newval;
mutex_lock(&data->update_lock);
config = (data->reg[param->msb[0]] >> param->shift[0]) & param->mask[0];
altbit = (data->reg[param->msb[1]] >> param->shift[1]) & param->mask[1];
minoff = (data->reg[param->msb[2]] >> param->shift[2]) & param->mask[2];
mutex_unlock(&data->update_lock);
val = config | (altbit << 3);
newval = 0;
if (val == 3 || val >= 10)
newval = 255;
else if (val == 4)
newval = 0;
else if (val == 7)
newval = 1;
else if (minoff == 1)
newval = 2;
else
newval = 3;
return sprintf(buf, "%u\n", newval);
}
static ssize_t store_pwm_enable(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
SETUP_STORE_DATA_PARAM(dev, attr);
long reqval;
u8 currval, config, altbit, newval, minoff = 255;
if (kstrtol(buf, 10, &reqval))
return -EINVAL;
switch (reqval) {
case 0:
newval = 0x04;
break;
case 1:
newval = 0x07;
break;
case 2:
newval = 0x00;
minoff = 1;
break;
case 3:
newval = 0x00;
minoff = 0;
break;
case 255:
newval = 0x03;
break;
default:
return -EINVAL;
}
config = newval & 0x07;
altbit = (newval >> 3) & 0x01;
mutex_lock(&data->update_lock);
config = (config & param->mask[0]) << param->shift[0];
altbit = (altbit & param->mask[1]) << param->shift[1];
currval = read_byte(client, param->msb[0]);
newval = config | (currval & ~(param->mask[0] << param->shift[0]));
newval = altbit | (newval & ~(param->mask[1] << param->shift[1]));
data->reg[param->msb[0]] = newval;
write_byte(client, param->msb[0], newval);
if (minoff < 255) {
minoff = (minoff & param->mask[2]) << param->shift[2];
currval = read_byte(client, param->msb[2]);
newval =
minoff | (currval & ~(param->mask[2] << param->shift[2]));
data->reg[param->msb[2]] = newval;
write_byte(client, param->msb[2], newval);
}
mutex_unlock(&data->update_lock);
return count;
}
static const u32 asc7621_pwm_freq_map[] = {
10, 15, 23, 30, 38, 47, 62, 94,
23000, 24000, 25000, 26000, 27000, 28000, 29000, 30000
};
static ssize_t show_pwm_freq(struct device *dev,
struct device_attribute *attr, char *buf)
{
SETUP_SHOW_DATA_PARAM(dev, attr);
u8 regval =
(data->reg[param->msb[0]] >> param->shift[0]) & param->mask[0];
regval = clamp_val(regval, 0, 15);
return sprintf(buf, "%u\n", asc7621_pwm_freq_map[regval]);
}
static ssize_t store_pwm_freq(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
SETUP_STORE_DATA_PARAM(dev, attr);
unsigned long reqval;
u8 currval, newval = 255;
int i;
if (kstrtoul(buf, 10, &reqval))
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(asc7621_pwm_freq_map); i++) {
if (reqval == asc7621_pwm_freq_map[i]) {
newval = i;
break;
}
}
if (newval == 255)
return -EINVAL;
newval = (newval & param->mask[0]) << param->shift[0];
mutex_lock(&data->update_lock);
currval = read_byte(client, param->msb[0]);
newval |= (currval & ~(param->mask[0] << param->shift[0]));
data->reg[param->msb[0]] = newval;
write_byte(client, param->msb[0], newval);
mutex_unlock(&data->update_lock);
return count;
}
static const u32 asc7621_pwm_auto_spinup_map[] = {
0, 100, 250, 400, 700, 1000, 2000, 4000
};
static ssize_t show_pwm_ast(struct device *dev,
struct device_attribute *attr, char *buf)
{
SETUP_SHOW_DATA_PARAM(dev, attr);
u8 regval =
(data->reg[param->msb[0]] >> param->shift[0]) & param->mask[0];
regval = clamp_val(regval, 0, 7);
return sprintf(buf, "%u\n", asc7621_pwm_auto_spinup_map[regval]);
}
static ssize_t store_pwm_ast(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
SETUP_STORE_DATA_PARAM(dev, attr);
long reqval;
u8 currval, newval = 255;
u32 i;
if (kstrtol(buf, 10, &reqval))
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(asc7621_pwm_auto_spinup_map); i++) {
if (reqval == asc7621_pwm_auto_spinup_map[i]) {
newval = i;
break;
}
}
if (newval == 255)
return -EINVAL;
newval = (newval & param->mask[0]) << param->shift[0];
mutex_lock(&data->update_lock);
currval = read_byte(client, param->msb[0]);
newval |= (currval & ~(param->mask[0] << param->shift[0]));
data->reg[param->msb[0]] = newval;
write_byte(client, param->msb[0], newval);
mutex_unlock(&data->update_lock);
return count;
}
static const u32 asc7621_temp_smoothing_time_map[] = {
35000, 17600, 11800, 7000, 4400, 3000, 1600, 800
};
static ssize_t show_temp_st(struct device *dev,
struct device_attribute *attr, char *buf)
{
SETUP_SHOW_DATA_PARAM(dev, attr);
u8 regval =
(data->reg[param->msb[0]] >> param->shift[0]) & param->mask[0];
regval = clamp_val(regval, 0, 7);
return sprintf(buf, "%u\n", asc7621_temp_smoothing_time_map[regval]);
}
static ssize_t store_temp_st(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
SETUP_STORE_DATA_PARAM(dev, attr);
long reqval;
u8 currval, newval = 255;
u32 i;
if (kstrtol(buf, 10, &reqval))
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(asc7621_temp_smoothing_time_map); i++) {
if (reqval == asc7621_temp_smoothing_time_map[i]) {
newval = i;
break;
}
}
if (newval == 255)
return -EINVAL;
newval = (newval & param->mask[0]) << param->shift[0];
mutex_lock(&data->update_lock);
currval = read_byte(client, param->msb[0]);
newval |= (currval & ~(param->mask[0] << param->shift[0]));
data->reg[param->msb[0]] = newval;
write_byte(client, param->msb[0], newval);
mutex_unlock(&data->update_lock);
return count;
}
/*
* End of data handlers
*
* These defines do nothing more than make the table easier
* to read when wrapped at column 80.
*/
/*
* Creates a variable length array inititalizer.
* VAA(1,3,5,7) would produce {1,3,5,7}
*/
#define VAA(args...) {args}
#define PREAD(name, n, pri, rm, rl, m, s, r) \
{.sda = SENSOR_ATTR(name, S_IRUGO, show_##r, NULL, n), \
.priority = pri, .msb[0] = rm, .lsb[0] = rl, .mask[0] = m, \
.shift[0] = s,}
#define PWRITE(name, n, pri, rm, rl, m, s, r) \
{.sda = SENSOR_ATTR(name, S_IRUGO | S_IWUSR, show_##r, store_##r, n), \
.priority = pri, .msb[0] = rm, .lsb[0] = rl, .mask[0] = m, \
.shift[0] = s,}
/*
* PWRITEM assumes that the initializers for the .msb, .lsb, .mask and .shift
* were created using the VAA macro.
*/
#define PWRITEM(name, n, pri, rm, rl, m, s, r) \
{.sda = SENSOR_ATTR(name, S_IRUGO | S_IWUSR, show_##r, store_##r, n), \
.priority = pri, .msb = rm, .lsb = rl, .mask = m, .shift = s,}
static struct asc7621_param asc7621_params[] = {
PREAD(in0_input, 0, PRI_HIGH, 0x20, 0x13, 0, 0, in10),
PREAD(in1_input, 1, PRI_HIGH, 0x21, 0x18, 0, 0, in10),
PREAD(in2_input, 2, PRI_HIGH, 0x22, 0x11, 0, 0, in10),
PREAD(in3_input, 3, PRI_HIGH, 0x23, 0x12, 0, 0, in10),
PREAD(in4_input, 4, PRI_HIGH, 0x24, 0x14, 0, 0, in10),
PWRITE(in0_min, 0, PRI_LOW, 0x44, 0, 0, 0, in8),
PWRITE(in1_min, 1, PRI_LOW, 0x46, 0, 0, 0, in8),
PWRITE(in2_min, 2, PRI_LOW, 0x48, 0, 0, 0, in8),
PWRITE(in3_min, 3, PRI_LOW, 0x4a, 0, 0, 0, in8),
PWRITE(in4_min, 4, PRI_LOW, 0x4c, 0, 0, 0, in8),
PWRITE(in0_max, 0, PRI_LOW, 0x45, 0, 0, 0, in8),
PWRITE(in1_max, 1, PRI_LOW, 0x47, 0, 0, 0, in8),
PWRITE(in2_max, 2, PRI_LOW, 0x49, 0, 0, 0, in8),
PWRITE(in3_max, 3, PRI_LOW, 0x4b, 0, 0, 0, in8),
PWRITE(in4_max, 4, PRI_LOW, 0x4d, 0, 0, 0, in8),
PREAD(in0_alarm, 0, PRI_HIGH, 0x41, 0, 0x01, 0, bitmask),
PREAD(in1_alarm, 1, PRI_HIGH, 0x41, 0, 0x01, 1, bitmask),
PREAD(in2_alarm, 2, PRI_HIGH, 0x41, 0, 0x01, 2, bitmask),
PREAD(in3_alarm, 3, PRI_HIGH, 0x41, 0, 0x01, 3, bitmask),
PREAD(in4_alarm, 4, PRI_HIGH, 0x42, 0, 0x01, 0, bitmask),
PREAD(fan1_input, 0, PRI_HIGH, 0x29, 0x28, 0, 0, fan16),
PREAD(fan2_input, 1, PRI_HIGH, 0x2b, 0x2a, 0, 0, fan16),
PREAD(fan3_input, 2, PRI_HIGH, 0x2d, 0x2c, 0, 0, fan16),
PREAD(fan4_input, 3, PRI_HIGH, 0x2f, 0x2e, 0, 0, fan16),
PWRITE(fan1_min, 0, PRI_LOW, 0x55, 0x54, 0, 0, fan16),
PWRITE(fan2_min, 1, PRI_LOW, 0x57, 0x56, 0, 0, fan16),
PWRITE(fan3_min, 2, PRI_LOW, 0x59, 0x58, 0, 0, fan16),
PWRITE(fan4_min, 3, PRI_LOW, 0x5b, 0x5a, 0, 0, fan16),
PREAD(fan1_alarm, 0, PRI_HIGH, 0x42, 0, 0x01, 2, bitmask),
PREAD(fan2_alarm, 1, PRI_HIGH, 0x42, 0, 0x01, 3, bitmask),
PREAD(fan3_alarm, 2, PRI_HIGH, 0x42, 0, 0x01, 4, bitmask),
PREAD(fan4_alarm, 3, PRI_HIGH, 0x42, 0, 0x01, 5, bitmask),
PREAD(temp1_input, 0, PRI_HIGH, 0x25, 0x10, 0, 0, temp10),
PREAD(temp2_input, 1, PRI_HIGH, 0x26, 0x15, 0, 0, temp10),
PREAD(temp3_input, 2, PRI_HIGH, 0x27, 0x16, 0, 0, temp10),
PREAD(temp4_input, 3, PRI_HIGH, 0x33, 0x17, 0, 0, temp10),
PREAD(temp5_input, 4, PRI_HIGH, 0xf7, 0xf6, 0, 0, temp10),
PREAD(temp6_input, 5, PRI_HIGH, 0xf9, 0xf8, 0, 0, temp10),
PREAD(temp7_input, 6, PRI_HIGH, 0xfb, 0xfa, 0, 0, temp10),
PREAD(temp8_input, 7, PRI_HIGH, 0xfd, 0xfc, 0, 0, temp10),
PWRITE(temp1_min, 0, PRI_LOW, 0x4e, 0, 0, 0, temp8),
PWRITE(temp2_min, 1, PRI_LOW, 0x50, 0, 0, 0, temp8),
PWRITE(temp3_min, 2, PRI_LOW, 0x52, 0, 0, 0, temp8),
PWRITE(temp4_min, 3, PRI_LOW, 0x34, 0, 0, 0, temp8),
PWRITE(temp1_max, 0, PRI_LOW, 0x4f, 0, 0, 0, temp8),
PWRITE(temp2_max, 1, PRI_LOW, 0x51, 0, 0, 0, temp8),
PWRITE(temp3_max, 2, PRI_LOW, 0x53, 0, 0, 0, temp8),
PWRITE(temp4_max, 3, PRI_LOW, 0x35, 0, 0, 0, temp8),
PREAD(temp1_alarm, 0, PRI_HIGH, 0x41, 0, 0x01, 4, bitmask),
PREAD(temp2_alarm, 1, PRI_HIGH, 0x41, 0, 0x01, 5, bitmask),
PREAD(temp3_alarm, 2, PRI_HIGH, 0x41, 0, 0x01, 6, bitmask),
PREAD(temp4_alarm, 3, PRI_HIGH, 0x43, 0, 0x01, 0, bitmask),
PWRITE(temp1_source, 0, PRI_LOW, 0x02, 0, 0x07, 4, bitmask),
PWRITE(temp2_source, 1, PRI_LOW, 0x02, 0, 0x07, 0, bitmask),
PWRITE(temp3_source, 2, PRI_LOW, 0x03, 0, 0x07, 4, bitmask),
PWRITE(temp4_source, 3, PRI_LOW, 0x03, 0, 0x07, 0, bitmask),
PWRITE(temp1_smoothing_enable, 0, PRI_LOW, 0x62, 0, 0x01, 3, bitmask),
PWRITE(temp2_smoothing_enable, 1, PRI_LOW, 0x63, 0, 0x01, 7, bitmask),
PWRITE(temp3_smoothing_enable, 2, PRI_LOW, 0x63, 0, 0x01, 3, bitmask),
PWRITE(temp4_smoothing_enable, 3, PRI_LOW, 0x3c, 0, 0x01, 3, bitmask),
PWRITE(temp1_smoothing_time, 0, PRI_LOW, 0x62, 0, 0x07, 0, temp_st),
PWRITE(temp2_smoothing_time, 1, PRI_LOW, 0x63, 0, 0x07, 4, temp_st),
PWRITE(temp3_smoothing_time, 2, PRI_LOW, 0x63, 0, 0x07, 0, temp_st),
PWRITE(temp4_smoothing_time, 3, PRI_LOW, 0x3c, 0, 0x07, 0, temp_st),
PWRITE(temp1_auto_point1_temp_hyst, 0, PRI_LOW, 0x6d, 0, 0x0f, 4,
bitmask),
PWRITE(temp2_auto_point1_temp_hyst, 1, PRI_LOW, 0x6d, 0, 0x0f, 0,
bitmask),
PWRITE(temp3_auto_point1_temp_hyst, 2, PRI_LOW, 0x6e, 0, 0x0f, 4,
bitmask),
PWRITE(temp4_auto_point1_temp_hyst, 3, PRI_LOW, 0x6e, 0, 0x0f, 0,
bitmask),
PREAD(temp1_auto_point2_temp_hyst, 0, PRI_LOW, 0x6d, 0, 0x0f, 4,
bitmask),
PREAD(temp2_auto_point2_temp_hyst, 1, PRI_LOW, 0x6d, 0, 0x0f, 0,
bitmask),
PREAD(temp3_auto_point2_temp_hyst, 2, PRI_LOW, 0x6e, 0, 0x0f, 4,
bitmask),
PREAD(temp4_auto_point2_temp_hyst, 3, PRI_LOW, 0x6e, 0, 0x0f, 0,
bitmask),
PWRITE(temp1_auto_point1_temp, 0, PRI_LOW, 0x67, 0, 0, 0, temp8),
PWRITE(temp2_auto_point1_temp, 1, PRI_LOW, 0x68, 0, 0, 0, temp8),
PWRITE(temp3_auto_point1_temp, 2, PRI_LOW, 0x69, 0, 0, 0, temp8),
PWRITE(temp4_auto_point1_temp, 3, PRI_LOW, 0x3b, 0, 0, 0, temp8),
PWRITEM(temp1_auto_point2_temp, 0, PRI_LOW, VAA(0x5f, 0x67), VAA(0),
VAA(0x0f), VAA(4), ap2_temp),
PWRITEM(temp2_auto_point2_temp, 1, PRI_LOW, VAA(0x60, 0x68), VAA(0),
VAA(0x0f), VAA(4), ap2_temp),
PWRITEM(temp3_auto_point2_temp, 2, PRI_LOW, VAA(0x61, 0x69), VAA(0),
VAA(0x0f), VAA(4), ap2_temp),
PWRITEM(temp4_auto_point2_temp, 3, PRI_LOW, VAA(0x3c, 0x3b), VAA(0),
VAA(0x0f), VAA(4), ap2_temp),
PWRITE(temp1_crit, 0, PRI_LOW, 0x6a, 0, 0, 0, temp8),
PWRITE(temp2_crit, 1, PRI_LOW, 0x6b, 0, 0, 0, temp8),
PWRITE(temp3_crit, 2, PRI_LOW, 0x6c, 0, 0, 0, temp8),
PWRITE(temp4_crit, 3, PRI_LOW, 0x3d, 0, 0, 0, temp8),
PWRITE(temp5_enable, 4, PRI_LOW, 0x0e, 0, 0x01, 0, bitmask),
PWRITE(temp6_enable, 5, PRI_LOW, 0x0e, 0, 0x01, 1, bitmask),
PWRITE(temp7_enable, 6, PRI_LOW, 0x0e, 0, 0x01, 2, bitmask),
PWRITE(temp8_enable, 7, PRI_LOW, 0x0e, 0, 0x01, 3, bitmask),
PWRITE(remote1_offset, 0, PRI_LOW, 0x1c, 0, 0, 0, temp62),
PWRITE(remote2_offset, 1, PRI_LOW, 0x1d, 0, 0, 0, temp62),
PWRITE(pwm1, 0, PRI_HIGH, 0x30, 0, 0, 0, u8),
PWRITE(pwm2, 1, PRI_HIGH, 0x31, 0, 0, 0, u8),
PWRITE(pwm3, 2, PRI_HIGH, 0x32, 0, 0, 0, u8),
PWRITE(pwm1_invert, 0, PRI_LOW, 0x5c, 0, 0x01, 4, bitmask),
PWRITE(pwm2_invert, 1, PRI_LOW, 0x5d, 0, 0x01, 4, bitmask),
PWRITE(pwm3_invert, 2, PRI_LOW, 0x5e, 0, 0x01, 4, bitmask),
PWRITEM(pwm1_enable, 0, PRI_LOW, VAA(0x5c, 0x5c, 0x62), VAA(0, 0, 0),
VAA(0x07, 0x01, 0x01), VAA(5, 3, 5), pwm_enable),
PWRITEM(pwm2_enable, 1, PRI_LOW, VAA(0x5d, 0x5d, 0x62), VAA(0, 0, 0),
VAA(0x07, 0x01, 0x01), VAA(5, 3, 6), pwm_enable),
PWRITEM(pwm3_enable, 2, PRI_LOW, VAA(0x5e, 0x5e, 0x62), VAA(0, 0, 0),
VAA(0x07, 0x01, 0x01), VAA(5, 3, 7), pwm_enable),
PWRITEM(pwm1_auto_channels, 0, PRI_LOW, VAA(0x5c, 0x5c), VAA(0, 0),
VAA(0x07, 0x01), VAA(5, 3), pwm_ac),
PWRITEM(pwm2_auto_channels, 1, PRI_LOW, VAA(0x5d, 0x5d), VAA(0, 0),
VAA(0x07, 0x01), VAA(5, 3), pwm_ac),
PWRITEM(pwm3_auto_channels, 2, PRI_LOW, VAA(0x5e, 0x5e), VAA(0, 0),
VAA(0x07, 0x01), VAA(5, 3), pwm_ac),
PWRITE(pwm1_auto_point1_pwm, 0, PRI_LOW, 0x64, 0, 0, 0, u8),
PWRITE(pwm2_auto_point1_pwm, 1, PRI_LOW, 0x65, 0, 0, 0, u8),
PWRITE(pwm3_auto_point1_pwm, 2, PRI_LOW, 0x66, 0, 0, 0, u8),
PWRITE(pwm1_auto_point2_pwm, 0, PRI_LOW, 0x38, 0, 0, 0, u8),
PWRITE(pwm2_auto_point2_pwm, 1, PRI_LOW, 0x39, 0, 0, 0, u8),
PWRITE(pwm3_auto_point2_pwm, 2, PRI_LOW, 0x3a, 0, 0, 0, u8),
PWRITE(pwm1_freq, 0, PRI_LOW, 0x5f, 0, 0x0f, 0, pwm_freq),
PWRITE(pwm2_freq, 1, PRI_LOW, 0x60, 0, 0x0f, 0, pwm_freq),
PWRITE(pwm3_freq, 2, PRI_LOW, 0x61, 0, 0x0f, 0, pwm_freq),
PREAD(pwm1_auto_zone_assigned, 0, PRI_LOW, 0, 0, 0x03, 2, bitmask),
PREAD(pwm2_auto_zone_assigned, 1, PRI_LOW, 0, 0, 0x03, 4, bitmask),
PREAD(pwm3_auto_zone_assigned, 2, PRI_LOW, 0, 0, 0x03, 6, bitmask),
PWRITE(pwm1_auto_spinup_time, 0, PRI_LOW, 0x5c, 0, 0x07, 0, pwm_ast),
PWRITE(pwm2_auto_spinup_time, 1, PRI_LOW, 0x5d, 0, 0x07, 0, pwm_ast),
PWRITE(pwm3_auto_spinup_time, 2, PRI_LOW, 0x5e, 0, 0x07, 0, pwm_ast),
PWRITE(peci_enable, 0, PRI_LOW, 0x40, 0, 0x01, 4, bitmask),
PWRITE(peci_avg, 0, PRI_LOW, 0x36, 0, 0x07, 0, bitmask),
PWRITE(peci_domain, 0, PRI_LOW, 0x36, 0, 0x01, 3, bitmask),
PWRITE(peci_legacy, 0, PRI_LOW, 0x36, 0, 0x01, 4, bitmask),
PWRITE(peci_diode, 0, PRI_LOW, 0x0e, 0, 0x07, 4, bitmask),
PWRITE(peci_4domain, 0, PRI_LOW, 0x0e, 0, 0x01, 4, bitmask),
};
static struct asc7621_data *asc7621_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct asc7621_data *data = i2c_get_clientdata(client);
int i;
/*
* The asc7621 chips guarantee consistent reads of multi-byte values
* regardless of the order of the reads. No special logic is needed
* so we can just read the registers in whatever order they appear
* in the asc7621_params array.
*/
mutex_lock(&data->update_lock);
/* Read all the high priority registers */
if (!data->valid ||
time_after(jiffies, data->last_high_reading + INTERVAL_HIGH)) {
for (i = 0; i < ARRAY_SIZE(asc7621_register_priorities); i++) {
if (asc7621_register_priorities[i] == PRI_HIGH) {
data->reg[i] =
i2c_smbus_read_byte_data(client, i) & 0xff;
}
}
data->last_high_reading = jiffies;
} /* last_reading */
/* Read all the low priority registers. */
if (!data->valid ||
time_after(jiffies, data->last_low_reading + INTERVAL_LOW)) {
for (i = 0; i < ARRAY_SIZE(asc7621_params); i++) {
if (asc7621_register_priorities[i] == PRI_LOW) {
data->reg[i] =
i2c_smbus_read_byte_data(client, i) & 0xff;
}
}
data->last_low_reading = jiffies;
} /* last_reading */
data->valid = 1;
mutex_unlock(&data->update_lock);
return data;
}
/*
* Standard detection and initialization below
*
* Helper function that checks if an address is valid
* for a particular chip.
*/
static inline int valid_address_for_chip(int chip_type, int address)
{
int i;
for (i = 0; asc7621_chips[chip_type].addresses[i] != I2C_CLIENT_END;
i++) {
if (asc7621_chips[chip_type].addresses[i] == address)
return 1;
}
return 0;
}
static void asc7621_init_client(struct i2c_client *client)
{
int value;
/* Warn if part was not "READY" */
value = read_byte(client, 0x40);
if (value & 0x02) {
dev_err(&client->dev,
"Client (%d,0x%02x) config is locked.\n",
i2c_adapter_id(client->adapter), client->addr);
}
if (!(value & 0x04)) {
dev_err(&client->dev, "Client (%d,0x%02x) is not ready.\n",
i2c_adapter_id(client->adapter), client->addr);
}
/*
* Start monitoring
*
* Try to clear LOCK, Set START, save everything else
*/
value = (value & ~0x02) | 0x01;
write_byte(client, 0x40, value & 0xff);
}
static int
asc7621_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct asc7621_data *data;
int i, err;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -EIO;
data = devm_kzalloc(&client->dev, sizeof(struct asc7621_data),
GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
/* Initialize the asc7621 chip */
asc7621_init_client(client);
/* Create the sysfs entries */
for (i = 0; i < ARRAY_SIZE(asc7621_params); i++) {
err =
device_create_file(&client->dev,
&(asc7621_params[i].sda.dev_attr));
if (err)
goto exit_remove;
}
data->class_dev = hwmon_device_register(&client->dev);
if (IS_ERR(data->class_dev)) {
err = PTR_ERR(data->class_dev);
goto exit_remove;
}
return 0;
exit_remove:
for (i = 0; i < ARRAY_SIZE(asc7621_params); i++) {
device_remove_file(&client->dev,
&(asc7621_params[i].sda.dev_attr));
}
return err;
}
static int asc7621_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
int company, verstep, chip_index;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
for (chip_index = FIRST_CHIP; chip_index <= LAST_CHIP; chip_index++) {
if (!valid_address_for_chip(chip_index, client->addr))
continue;
company = read_byte(client,
asc7621_chips[chip_index].company_reg);
verstep = read_byte(client,
asc7621_chips[chip_index].verstep_reg);
if (company == asc7621_chips[chip_index].company_id &&
verstep == asc7621_chips[chip_index].verstep_id) {
strlcpy(info->type, asc7621_chips[chip_index].name,
I2C_NAME_SIZE);
dev_info(&adapter->dev, "Matched %s at 0x%02x\n",
asc7621_chips[chip_index].name, client->addr);
return 0;
}
}
return -ENODEV;
}
static int asc7621_remove(struct i2c_client *client)
{
struct asc7621_data *data = i2c_get_clientdata(client);
int i;
hwmon_device_unregister(data->class_dev);
for (i = 0; i < ARRAY_SIZE(asc7621_params); i++) {
device_remove_file(&client->dev,
&(asc7621_params[i].sda.dev_attr));
}
return 0;
}
static const struct i2c_device_id asc7621_id[] = {
{"asc7621", asc7621},
{"asc7621a", asc7621a},
{},
};
MODULE_DEVICE_TABLE(i2c, asc7621_id);
static struct i2c_driver asc7621_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "asc7621",
},
.probe = asc7621_probe,
.remove = asc7621_remove,
.id_table = asc7621_id,
.detect = asc7621_detect,
.address_list = normal_i2c,
};
static int __init sm_asc7621_init(void)
{
int i, j;
/*
* Collect all the registers needed into a single array.
* This way, if a register isn't actually used for anything,
* we don't retrieve it.
*/
for (i = 0; i < ARRAY_SIZE(asc7621_params); i++) {
for (j = 0; j < ARRAY_SIZE(asc7621_params[i].msb); j++)
asc7621_register_priorities[asc7621_params[i].msb[j]] =
asc7621_params[i].priority;
for (j = 0; j < ARRAY_SIZE(asc7621_params[i].lsb); j++)
asc7621_register_priorities[asc7621_params[i].lsb[j]] =
asc7621_params[i].priority;
}
return i2c_add_driver(&asc7621_driver);
}
static void __exit sm_asc7621_exit(void)
{
i2c_del_driver(&asc7621_driver);
}
MODULE_LICENSE("GPL");
MODULE_AUTHOR("George Joseph");
MODULE_DESCRIPTION("Andigilog aSC7621 and aSC7621a driver");
module_init(sm_asc7621_init);
module_exit(sm_asc7621_exit);
| gpl-2.0 |
maxfu/legacy_android_kernel_exynos4210 | arch/arm/mach-imx/mach-pcm043.c | 2278 | 10808 | /*
* Copyright (C) 2009 Sascha Hauer, Pengutronix
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/types.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/mtd/physmap.h>
#include <linux/mtd/plat-ram.h>
#include <linux/memory.h>
#include <linux/gpio.h>
#include <linux/smc911x.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/i2c/at24.h>
#include <linux/usb/otg.h>
#include <linux/usb/ulpi.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
#include <asm/mach/map.h>
#include <mach/hardware.h>
#include <mach/common.h>
#include <mach/iomux-mx35.h>
#include <mach/ulpi.h>
#include <mach/audmux.h>
#include "devices-imx35.h"
static const struct fb_videomode fb_modedb[] = {
{
/* 240x320 @ 60 Hz */
.name = "Sharp-LQ035Q7",
.refresh = 60,
.xres = 240,
.yres = 320,
.pixclock = 185925,
.left_margin = 9,
.right_margin = 16,
.upper_margin = 7,
.lower_margin = 9,
.hsync_len = 1,
.vsync_len = 1,
.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_SHARP_MODE | FB_SYNC_CLK_INVERT | FB_SYNC_CLK_IDLE_EN,
.vmode = FB_VMODE_NONINTERLACED,
.flag = 0,
}, {
/* 240x320 @ 60 Hz */
.name = "TX090",
.refresh = 60,
.xres = 240,
.yres = 320,
.pixclock = 38255,
.left_margin = 144,
.right_margin = 0,
.upper_margin = 7,
.lower_margin = 40,
.hsync_len = 96,
.vsync_len = 1,
.sync = FB_SYNC_VERT_HIGH_ACT | FB_SYNC_OE_ACT_HIGH,
.vmode = FB_VMODE_NONINTERLACED,
.flag = 0,
},
};
static const struct ipu_platform_data mx3_ipu_data __initconst = {
.irq_base = MXC_IPU_IRQ_START,
};
static struct mx3fb_platform_data mx3fb_pdata __initdata = {
.name = "Sharp-LQ035Q7",
.mode = fb_modedb,
.num_modes = ARRAY_SIZE(fb_modedb),
};
static struct physmap_flash_data pcm043_flash_data = {
.width = 2,
};
static struct resource pcm043_flash_resource = {
.start = 0xa0000000,
.end = 0xa1ffffff,
.flags = IORESOURCE_MEM,
};
static struct platform_device pcm043_flash = {
.name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = &pcm043_flash_data,
},
.resource = &pcm043_flash_resource,
.num_resources = 1,
};
static const struct imxuart_platform_data uart_pdata __initconst = {
.flags = IMXUART_HAVE_RTSCTS,
};
static const struct imxi2c_platform_data pcm043_i2c0_data __initconst = {
.bitrate = 50000,
};
static struct at24_platform_data board_eeprom = {
.byte_len = 4096,
.page_size = 32,
.flags = AT24_FLAG_ADDR16,
};
static struct i2c_board_info pcm043_i2c_devices[] = {
{
I2C_BOARD_INFO("at24", 0x52), /* E0=0, E1=1, E2=0 */
.platform_data = &board_eeprom,
}, {
I2C_BOARD_INFO("pcf8563", 0x51),
},
};
static struct platform_device *devices[] __initdata = {
&pcm043_flash,
};
static iomux_v3_cfg_t pcm043_pads[] = {
/* UART1 */
MX35_PAD_CTS1__UART1_CTS,
MX35_PAD_RTS1__UART1_RTS,
MX35_PAD_TXD1__UART1_TXD_MUX,
MX35_PAD_RXD1__UART1_RXD_MUX,
/* UART2 */
MX35_PAD_CTS2__UART2_CTS,
MX35_PAD_RTS2__UART2_RTS,
MX35_PAD_TXD2__UART2_TXD_MUX,
MX35_PAD_RXD2__UART2_RXD_MUX,
/* FEC */
MX35_PAD_FEC_TX_CLK__FEC_TX_CLK,
MX35_PAD_FEC_RX_CLK__FEC_RX_CLK,
MX35_PAD_FEC_RX_DV__FEC_RX_DV,
MX35_PAD_FEC_COL__FEC_COL,
MX35_PAD_FEC_RDATA0__FEC_RDATA_0,
MX35_PAD_FEC_TDATA0__FEC_TDATA_0,
MX35_PAD_FEC_TX_EN__FEC_TX_EN,
MX35_PAD_FEC_MDC__FEC_MDC,
MX35_PAD_FEC_MDIO__FEC_MDIO,
MX35_PAD_FEC_TX_ERR__FEC_TX_ERR,
MX35_PAD_FEC_RX_ERR__FEC_RX_ERR,
MX35_PAD_FEC_CRS__FEC_CRS,
MX35_PAD_FEC_RDATA1__FEC_RDATA_1,
MX35_PAD_FEC_TDATA1__FEC_TDATA_1,
MX35_PAD_FEC_RDATA2__FEC_RDATA_2,
MX35_PAD_FEC_TDATA2__FEC_TDATA_2,
MX35_PAD_FEC_RDATA3__FEC_RDATA_3,
MX35_PAD_FEC_TDATA3__FEC_TDATA_3,
/* I2C1 */
MX35_PAD_I2C1_CLK__I2C1_SCL,
MX35_PAD_I2C1_DAT__I2C1_SDA,
/* Display */
MX35_PAD_LD0__IPU_DISPB_DAT_0,
MX35_PAD_LD1__IPU_DISPB_DAT_1,
MX35_PAD_LD2__IPU_DISPB_DAT_2,
MX35_PAD_LD3__IPU_DISPB_DAT_3,
MX35_PAD_LD4__IPU_DISPB_DAT_4,
MX35_PAD_LD5__IPU_DISPB_DAT_5,
MX35_PAD_LD6__IPU_DISPB_DAT_6,
MX35_PAD_LD7__IPU_DISPB_DAT_7,
MX35_PAD_LD8__IPU_DISPB_DAT_8,
MX35_PAD_LD9__IPU_DISPB_DAT_9,
MX35_PAD_LD10__IPU_DISPB_DAT_10,
MX35_PAD_LD11__IPU_DISPB_DAT_11,
MX35_PAD_LD12__IPU_DISPB_DAT_12,
MX35_PAD_LD13__IPU_DISPB_DAT_13,
MX35_PAD_LD14__IPU_DISPB_DAT_14,
MX35_PAD_LD15__IPU_DISPB_DAT_15,
MX35_PAD_LD16__IPU_DISPB_DAT_16,
MX35_PAD_LD17__IPU_DISPB_DAT_17,
MX35_PAD_D3_HSYNC__IPU_DISPB_D3_HSYNC,
MX35_PAD_D3_FPSHIFT__IPU_DISPB_D3_CLK,
MX35_PAD_D3_DRDY__IPU_DISPB_D3_DRDY,
MX35_PAD_CONTRAST__IPU_DISPB_CONTR,
MX35_PAD_D3_VSYNC__IPU_DISPB_D3_VSYNC,
MX35_PAD_D3_REV__IPU_DISPB_D3_REV,
MX35_PAD_D3_CLS__IPU_DISPB_D3_CLS,
/* gpio */
MX35_PAD_ATA_CS0__GPIO2_6,
/* USB host */
MX35_PAD_I2C2_CLK__USB_TOP_USBH2_PWR,
MX35_PAD_I2C2_DAT__USB_TOP_USBH2_OC,
/* SSI */
MX35_PAD_STXFS4__AUDMUX_AUD4_TXFS,
MX35_PAD_STXD4__AUDMUX_AUD4_TXD,
MX35_PAD_SRXD4__AUDMUX_AUD4_RXD,
MX35_PAD_SCK4__AUDMUX_AUD4_TXC,
/* CAN2 */
MX35_PAD_TX5_RX0__CAN2_TXCAN,
MX35_PAD_TX4_RX1__CAN2_RXCAN,
/* esdhc */
MX35_PAD_SD1_CMD__ESDHC1_CMD,
MX35_PAD_SD1_CLK__ESDHC1_CLK,
MX35_PAD_SD1_DATA0__ESDHC1_DAT0,
MX35_PAD_SD1_DATA1__ESDHC1_DAT1,
MX35_PAD_SD1_DATA2__ESDHC1_DAT2,
MX35_PAD_SD1_DATA3__ESDHC1_DAT3,
MX35_PAD_ATA_DATA10__GPIO2_23, /* WriteProtect */
MX35_PAD_ATA_DATA11__GPIO2_24, /* CardDetect */
};
#define AC97_GPIO_TXFS IMX_GPIO_NR(2, 31)
#define AC97_GPIO_TXD IMX_GPIO_NR(2, 28)
#define AC97_GPIO_RESET IMX_GPIO_NR(2, 0)
#define SD1_GPIO_WP IMX_GPIO_NR(2, 23)
#define SD1_GPIO_CD IMX_GPIO_NR(2, 24)
static void pcm043_ac97_warm_reset(struct snd_ac97 *ac97)
{
iomux_v3_cfg_t txfs_gpio = MX35_PAD_STXFS4__GPIO2_31;
iomux_v3_cfg_t txfs = MX35_PAD_STXFS4__AUDMUX_AUD4_TXFS;
int ret;
ret = gpio_request(AC97_GPIO_TXFS, "SSI");
if (ret) {
printk("failed to get GPIO_TXFS: %d\n", ret);
return;
}
mxc_iomux_v3_setup_pad(txfs_gpio);
/* warm reset */
gpio_direction_output(AC97_GPIO_TXFS, 1);
udelay(2);
gpio_set_value(AC97_GPIO_TXFS, 0);
gpio_free(AC97_GPIO_TXFS);
mxc_iomux_v3_setup_pad(txfs);
}
static void pcm043_ac97_cold_reset(struct snd_ac97 *ac97)
{
iomux_v3_cfg_t txfs_gpio = MX35_PAD_STXFS4__GPIO2_31;
iomux_v3_cfg_t txfs = MX35_PAD_STXFS4__AUDMUX_AUD4_TXFS;
iomux_v3_cfg_t txd_gpio = MX35_PAD_STXD4__GPIO2_28;
iomux_v3_cfg_t txd = MX35_PAD_STXD4__AUDMUX_AUD4_TXD;
iomux_v3_cfg_t reset_gpio = MX35_PAD_SD2_CMD__GPIO2_0;
int ret;
ret = gpio_request(AC97_GPIO_TXFS, "SSI");
if (ret)
goto err1;
ret = gpio_request(AC97_GPIO_TXD, "SSI");
if (ret)
goto err2;
ret = gpio_request(AC97_GPIO_RESET, "SSI");
if (ret)
goto err3;
mxc_iomux_v3_setup_pad(txfs_gpio);
mxc_iomux_v3_setup_pad(txd_gpio);
mxc_iomux_v3_setup_pad(reset_gpio);
gpio_direction_output(AC97_GPIO_TXFS, 0);
gpio_direction_output(AC97_GPIO_TXD, 0);
/* cold reset */
gpio_direction_output(AC97_GPIO_RESET, 0);
udelay(10);
gpio_direction_output(AC97_GPIO_RESET, 1);
mxc_iomux_v3_setup_pad(txd);
mxc_iomux_v3_setup_pad(txfs);
gpio_free(AC97_GPIO_RESET);
err3:
gpio_free(AC97_GPIO_TXD);
err2:
gpio_free(AC97_GPIO_TXFS);
err1:
if (ret)
printk("%s failed with %d\n", __func__, ret);
mdelay(1);
}
static const struct imx_ssi_platform_data pcm043_ssi_pdata __initconst = {
.ac97_reset = pcm043_ac97_cold_reset,
.ac97_warm_reset = pcm043_ac97_warm_reset,
.flags = IMX_SSI_USE_AC97,
};
static const struct mxc_nand_platform_data
pcm037_nand_board_info __initconst = {
.width = 1,
.hw_ecc = 1,
};
static int pcm043_otg_init(struct platform_device *pdev)
{
return mx35_initialize_usb_hw(pdev->id, MXC_EHCI_INTERFACE_DIFF_UNI);
}
static struct mxc_usbh_platform_data otg_pdata __initdata = {
.init = pcm043_otg_init,
.portsc = MXC_EHCI_MODE_UTMI,
};
static int pcm043_usbh1_init(struct platform_device *pdev)
{
return mx35_initialize_usb_hw(pdev->id, MXC_EHCI_INTERFACE_SINGLE_UNI |
MXC_EHCI_INTERNAL_PHY | MXC_EHCI_IPPUE_DOWN);
}
static const struct mxc_usbh_platform_data usbh1_pdata __initconst = {
.init = pcm043_usbh1_init,
.portsc = MXC_EHCI_MODE_SERIAL,
};
static const struct fsl_usb2_platform_data otg_device_pdata __initconst = {
.operating_mode = FSL_USB2_DR_DEVICE,
.phy_mode = FSL_USB2_PHY_UTMI,
};
static int otg_mode_host;
static int __init pcm043_otg_mode(char *options)
{
if (!strcmp(options, "host"))
otg_mode_host = 1;
else if (!strcmp(options, "device"))
otg_mode_host = 0;
else
pr_info("otg_mode neither \"host\" nor \"device\". "
"Defaulting to device\n");
return 0;
}
__setup("otg_mode=", pcm043_otg_mode);
static struct esdhc_platform_data sd1_pdata = {
.wp_gpio = SD1_GPIO_WP,
.cd_gpio = SD1_GPIO_CD,
};
/*
* Board specific initialization.
*/
static void __init pcm043_init(void)
{
mxc_iomux_v3_setup_multiple_pads(pcm043_pads, ARRAY_SIZE(pcm043_pads));
mxc_audmux_v2_configure_port(3,
MXC_AUDMUX_V2_PTCR_SYN | /* 4wire mode */
MXC_AUDMUX_V2_PTCR_TFSEL(0) |
MXC_AUDMUX_V2_PTCR_TFSDIR,
MXC_AUDMUX_V2_PDCR_RXDSEL(0));
mxc_audmux_v2_configure_port(0,
MXC_AUDMUX_V2_PTCR_SYN | /* 4wire mode */
MXC_AUDMUX_V2_PTCR_TCSEL(3) |
MXC_AUDMUX_V2_PTCR_TCLKDIR, /* clock is output */
MXC_AUDMUX_V2_PDCR_RXDSEL(3));
imx35_add_fec(NULL);
platform_add_devices(devices, ARRAY_SIZE(devices));
imx35_add_imx2_wdt(NULL);
imx35_add_imx_uart0(&uart_pdata);
imx35_add_mxc_nand(&pcm037_nand_board_info);
imx35_add_imx_ssi(0, &pcm043_ssi_pdata);
imx35_add_imx_uart1(&uart_pdata);
i2c_register_board_info(0, pcm043_i2c_devices,
ARRAY_SIZE(pcm043_i2c_devices));
imx35_add_imx_i2c0(&pcm043_i2c0_data);
imx35_add_ipu_core(&mx3_ipu_data);
imx35_add_mx3_sdc_fb(&mx3fb_pdata);
if (otg_mode_host) {
otg_pdata.otg = imx_otg_ulpi_create(ULPI_OTG_DRVVBUS |
ULPI_OTG_DRVVBUS_EXT);
if (otg_pdata.otg)
imx35_add_mxc_ehci_otg(&otg_pdata);
}
imx35_add_mxc_ehci_hs(&usbh1_pdata);
if (!otg_mode_host)
imx35_add_fsl_usb2_udc(&otg_device_pdata);
imx35_add_flexcan1(NULL);
imx35_add_sdhci_esdhc_imx(0, &sd1_pdata);
}
static void __init pcm043_timer_init(void)
{
mx35_clocks_init();
}
struct sys_timer pcm043_timer = {
.init = pcm043_timer_init,
};
MACHINE_START(PCM043, "Phytec Phycore pcm043")
/* Maintainer: Pengutronix */
.boot_params = MX3x_PHYS_OFFSET + 0x100,
.map_io = mx35_map_io,
.init_early = imx35_init_early,
.init_irq = mx35_init_irq,
.timer = &pcm043_timer,
.init_machine = pcm043_init,
MACHINE_END
| gpl-2.0 |
TeamHackYU/SKernel_Yu | drivers/gpu/drm/r128/r128_ioc32.c | 2534 | 6987 | /**
* \file r128_ioc32.c
*
* 32-bit ioctl compatibility routines for the R128 DRM.
*
* \author Dave Airlie <airlied@linux.ie> with code from patches by Egbert Eich
*
* Copyright (C) Paul Mackerras 2005
* Copyright (C) Egbert Eich 2003,2004
* Copyright (C) Dave Airlie 2005
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <linux/compat.h>
#include <drm/drmP.h>
#include <drm/r128_drm.h>
typedef struct drm_r128_init32 {
int func;
unsigned int sarea_priv_offset;
int is_pci;
int cce_mode;
int cce_secure;
int ring_size;
int usec_timeout;
unsigned int fb_bpp;
unsigned int front_offset, front_pitch;
unsigned int back_offset, back_pitch;
unsigned int depth_bpp;
unsigned int depth_offset, depth_pitch;
unsigned int span_offset;
unsigned int fb_offset;
unsigned int mmio_offset;
unsigned int ring_offset;
unsigned int ring_rptr_offset;
unsigned int buffers_offset;
unsigned int agp_textures_offset;
} drm_r128_init32_t;
static int compat_r128_init(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_r128_init32_t init32;
drm_r128_init_t __user *init;
if (copy_from_user(&init32, (void __user *)arg, sizeof(init32)))
return -EFAULT;
init = compat_alloc_user_space(sizeof(*init));
if (!access_ok(VERIFY_WRITE, init, sizeof(*init))
|| __put_user(init32.func, &init->func)
|| __put_user(init32.sarea_priv_offset, &init->sarea_priv_offset)
|| __put_user(init32.is_pci, &init->is_pci)
|| __put_user(init32.cce_mode, &init->cce_mode)
|| __put_user(init32.cce_secure, &init->cce_secure)
|| __put_user(init32.ring_size, &init->ring_size)
|| __put_user(init32.usec_timeout, &init->usec_timeout)
|| __put_user(init32.fb_bpp, &init->fb_bpp)
|| __put_user(init32.front_offset, &init->front_offset)
|| __put_user(init32.front_pitch, &init->front_pitch)
|| __put_user(init32.back_offset, &init->back_offset)
|| __put_user(init32.back_pitch, &init->back_pitch)
|| __put_user(init32.depth_bpp, &init->depth_bpp)
|| __put_user(init32.depth_offset, &init->depth_offset)
|| __put_user(init32.depth_pitch, &init->depth_pitch)
|| __put_user(init32.span_offset, &init->span_offset)
|| __put_user(init32.fb_offset, &init->fb_offset)
|| __put_user(init32.mmio_offset, &init->mmio_offset)
|| __put_user(init32.ring_offset, &init->ring_offset)
|| __put_user(init32.ring_rptr_offset, &init->ring_rptr_offset)
|| __put_user(init32.buffers_offset, &init->buffers_offset)
|| __put_user(init32.agp_textures_offset,
&init->agp_textures_offset))
return -EFAULT;
return drm_ioctl(file, DRM_IOCTL_R128_INIT, (unsigned long)init);
}
typedef struct drm_r128_depth32 {
int func;
int n;
u32 x;
u32 y;
u32 buffer;
u32 mask;
} drm_r128_depth32_t;
static int compat_r128_depth(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_r128_depth32_t depth32;
drm_r128_depth_t __user *depth;
if (copy_from_user(&depth32, (void __user *)arg, sizeof(depth32)))
return -EFAULT;
depth = compat_alloc_user_space(sizeof(*depth));
if (!access_ok(VERIFY_WRITE, depth, sizeof(*depth))
|| __put_user(depth32.func, &depth->func)
|| __put_user(depth32.n, &depth->n)
|| __put_user((int __user *)(unsigned long)depth32.x, &depth->x)
|| __put_user((int __user *)(unsigned long)depth32.y, &depth->y)
|| __put_user((unsigned int __user *)(unsigned long)depth32.buffer,
&depth->buffer)
|| __put_user((unsigned char __user *)(unsigned long)depth32.mask,
&depth->mask))
return -EFAULT;
return drm_ioctl(file, DRM_IOCTL_R128_DEPTH, (unsigned long)depth);
}
typedef struct drm_r128_stipple32 {
u32 mask;
} drm_r128_stipple32_t;
static int compat_r128_stipple(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_r128_stipple32_t stipple32;
drm_r128_stipple_t __user *stipple;
if (copy_from_user(&stipple32, (void __user *)arg, sizeof(stipple32)))
return -EFAULT;
stipple = compat_alloc_user_space(sizeof(*stipple));
if (!access_ok(VERIFY_WRITE, stipple, sizeof(*stipple))
|| __put_user((unsigned int __user *)(unsigned long)stipple32.mask,
&stipple->mask))
return -EFAULT;
return drm_ioctl(file, DRM_IOCTL_R128_STIPPLE, (unsigned long)stipple);
}
typedef struct drm_r128_getparam32 {
int param;
u32 value;
} drm_r128_getparam32_t;
static int compat_r128_getparam(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_r128_getparam32_t getparam32;
drm_r128_getparam_t __user *getparam;
if (copy_from_user(&getparam32, (void __user *)arg, sizeof(getparam32)))
return -EFAULT;
getparam = compat_alloc_user_space(sizeof(*getparam));
if (!access_ok(VERIFY_WRITE, getparam, sizeof(*getparam))
|| __put_user(getparam32.param, &getparam->param)
|| __put_user((void __user *)(unsigned long)getparam32.value,
&getparam->value))
return -EFAULT;
return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
}
drm_ioctl_compat_t *r128_compat_ioctls[] = {
[DRM_R128_INIT] = compat_r128_init,
[DRM_R128_DEPTH] = compat_r128_depth,
[DRM_R128_STIPPLE] = compat_r128_stipple,
[DRM_R128_GETPARAM] = compat_r128_getparam,
};
/**
* Called whenever a 32-bit process running under a 64-bit kernel
* performs an ioctl on /dev/dri/card<n>.
*
* \param filp file pointer.
* \param cmd command.
* \param arg user argument.
* \return zero on success or negative number on failure.
*/
long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
unsigned int nr = DRM_IOCTL_NR(cmd);
drm_ioctl_compat_t *fn = NULL;
int ret;
if (nr < DRM_COMMAND_BASE)
return drm_compat_ioctl(filp, cmd, arg);
if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls))
fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
if (fn != NULL)
ret = (*fn) (filp, cmd, arg);
else
ret = drm_ioctl(filp, cmd, arg);
return ret;
}
| gpl-2.0 |
EPDCenterSpain/kernel_odys_genio | drivers/media/rc/keymaps/rc-lme2510.c | 2790 | 2730 | /* LME2510 remote control
*
*
* Copyright (C) 2010 Malcolm Priestley (tvboxspy@gmail.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <media/rc-map.h>
static struct rc_map_table lme2510_rc[] = {
/* Type 1 - 26 buttons */
{ 0x10ed45, KEY_0 },
{ 0x10ed5f, KEY_1 },
{ 0x10ed50, KEY_2 },
{ 0x10ed5d, KEY_3 },
{ 0x10ed41, KEY_4 },
{ 0x10ed0a, KEY_5 },
{ 0x10ed42, KEY_6 },
{ 0x10ed47, KEY_7 },
{ 0x10ed49, KEY_8 },
{ 0x10ed05, KEY_9 },
{ 0x10ed43, KEY_POWER },
{ 0x10ed46, KEY_SUBTITLE },
{ 0x10ed06, KEY_PAUSE },
{ 0x10ed03, KEY_MEDIA_REPEAT},
{ 0x10ed02, KEY_PAUSE },
{ 0x10ed5e, KEY_VOLUMEUP },
{ 0x10ed5c, KEY_VOLUMEDOWN },
{ 0x10ed09, KEY_CHANNELUP },
{ 0x10ed1a, KEY_CHANNELDOWN },
{ 0x10ed1e, KEY_PLAY },
{ 0x10ed1b, KEY_ZOOM },
{ 0x10ed59, KEY_MUTE },
{ 0x10ed5a, KEY_TV },
{ 0x10ed18, KEY_RECORD },
{ 0x10ed07, KEY_EPG },
{ 0x10ed01, KEY_STOP },
/* Type 2 - 20 buttons */
{ 0xbf15, KEY_0 },
{ 0xbf08, KEY_1 },
{ 0xbf09, KEY_2 },
{ 0xbf0a, KEY_3 },
{ 0xbf0c, KEY_4 },
{ 0xbf0d, KEY_5 },
{ 0xbf0e, KEY_6 },
{ 0xbf10, KEY_7 },
{ 0xbf11, KEY_8 },
{ 0xbf12, KEY_9 },
{ 0xbf00, KEY_POWER },
{ 0xbf04, KEY_MEDIA_REPEAT}, /* Recall */
{ 0xbf1a, KEY_PAUSE }, /* Timeshift */
{ 0xbf02, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */
{ 0xbf06, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/
{ 0xbf01, KEY_CHANNELUP },
{ 0xbf05, KEY_CHANNELDOWN },
{ 0xbf14, KEY_ZOOM },
{ 0xbf18, KEY_RECORD },
{ 0xbf16, KEY_STOP },
/* Type 3 - 20 buttons */
{ 0x1c, KEY_0 },
{ 0x07, KEY_1 },
{ 0x15, KEY_2 },
{ 0x09, KEY_3 },
{ 0x16, KEY_4 },
{ 0x19, KEY_5 },
{ 0x0d, KEY_6 },
{ 0x0c, KEY_7 },
{ 0x18, KEY_8 },
{ 0x5e, KEY_9 },
{ 0x45, KEY_POWER },
{ 0x44, KEY_MEDIA_REPEAT}, /* Recall */
{ 0x4a, KEY_PAUSE }, /* Timeshift */
{ 0x47, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */
{ 0x43, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/
{ 0x46, KEY_CHANNELUP },
{ 0x40, KEY_CHANNELDOWN },
{ 0x08, KEY_ZOOM },
{ 0x42, KEY_RECORD },
{ 0x5a, KEY_STOP },
};
static struct rc_map_list lme2510_map = {
.map = {
.scan = lme2510_rc,
.size = ARRAY_SIZE(lme2510_rc),
.rc_type = RC_TYPE_NEC,
.name = RC_MAP_LME2510,
}
};
static int __init init_rc_lme2510_map(void)
{
return rc_map_register(&lme2510_map);
}
static void __exit exit_rc_lme2510_map(void)
{
rc_map_unregister(&lme2510_map);
}
module_init(init_rc_lme2510_map)
module_exit(exit_rc_lme2510_map)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Malcolm Priestley tvboxspy@gmail.com");
| gpl-2.0 |
PhSchmitt/mptcp-nexus-a444 | drivers/mtd/maps/dc21285.c | 4838 | 5533 | /*
* MTD map driver for flash on the DC21285 (the StrongARM-110 companion chip)
*
* (C) 2000 Nicolas Pitre <nico@fluxnic.net>
*
* This code is GPL
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#include <asm/io.h>
#include <asm/hardware/dec21285.h>
#include <asm/mach-types.h>
static struct mtd_info *dc21285_mtd;
#ifdef CONFIG_ARCH_NETWINDER
/*
* This is really ugly, but it seams to be the only
* realiable way to do it, as the cpld state machine
* is unpredictible. So we have a 25us penalty per
* write access.
*/
static void nw_en_write(void)
{
unsigned long flags;
/*
* we want to write a bit pattern XXX1 to Xilinx to enable
* the write gate, which will be open for about the next 2ms.
*/
spin_lock_irqsave(&nw_gpio_lock, flags);
nw_cpld_modify(CPLD_FLASH_WR_ENABLE, CPLD_FLASH_WR_ENABLE);
spin_unlock_irqrestore(&nw_gpio_lock, flags);
/*
* let the ISA bus to catch on...
*/
udelay(25);
}
#else
#define nw_en_write() do { } while (0)
#endif
static map_word dc21285_read8(struct map_info *map, unsigned long ofs)
{
map_word val;
val.x[0] = *(uint8_t*)(map->virt + ofs);
return val;
}
static map_word dc21285_read16(struct map_info *map, unsigned long ofs)
{
map_word val;
val.x[0] = *(uint16_t*)(map->virt + ofs);
return val;
}
static map_word dc21285_read32(struct map_info *map, unsigned long ofs)
{
map_word val;
val.x[0] = *(uint32_t*)(map->virt + ofs);
return val;
}
static void dc21285_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
{
memcpy(to, (void*)(map->virt + from), len);
}
static void dc21285_write8(struct map_info *map, const map_word d, unsigned long adr)
{
if (machine_is_netwinder())
nw_en_write();
*CSR_ROMWRITEREG = adr & 3;
adr &= ~3;
*(uint8_t*)(map->virt + adr) = d.x[0];
}
static void dc21285_write16(struct map_info *map, const map_word d, unsigned long adr)
{
if (machine_is_netwinder())
nw_en_write();
*CSR_ROMWRITEREG = adr & 3;
adr &= ~3;
*(uint16_t*)(map->virt + adr) = d.x[0];
}
static void dc21285_write32(struct map_info *map, const map_word d, unsigned long adr)
{
if (machine_is_netwinder())
nw_en_write();
*(uint32_t*)(map->virt + adr) = d.x[0];
}
static void dc21285_copy_to_32(struct map_info *map, unsigned long to, const void *from, ssize_t len)
{
while (len > 0) {
map_word d;
d.x[0] = *((uint32_t*)from);
dc21285_write32(map, d, to);
from += 4;
to += 4;
len -= 4;
}
}
static void dc21285_copy_to_16(struct map_info *map, unsigned long to, const void *from, ssize_t len)
{
while (len > 0) {
map_word d;
d.x[0] = *((uint16_t*)from);
dc21285_write16(map, d, to);
from += 2;
to += 2;
len -= 2;
}
}
static void dc21285_copy_to_8(struct map_info *map, unsigned long to, const void *from, ssize_t len)
{
map_word d;
d.x[0] = *((uint8_t*)from);
dc21285_write8(map, d, to);
from++;
to++;
len--;
}
static struct map_info dc21285_map = {
.name = "DC21285 flash",
.phys = NO_XIP,
.size = 16*1024*1024,
.copy_from = dc21285_copy_from,
};
/* Partition stuff */
static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
static int __init init_dc21285(void)
{
/* Determine bankwidth */
switch (*CSR_SA110_CNTL & (3<<14)) {
case SA110_CNTL_ROMWIDTH_8:
dc21285_map.bankwidth = 1;
dc21285_map.read = dc21285_read8;
dc21285_map.write = dc21285_write8;
dc21285_map.copy_to = dc21285_copy_to_8;
break;
case SA110_CNTL_ROMWIDTH_16:
dc21285_map.bankwidth = 2;
dc21285_map.read = dc21285_read16;
dc21285_map.write = dc21285_write16;
dc21285_map.copy_to = dc21285_copy_to_16;
break;
case SA110_CNTL_ROMWIDTH_32:
dc21285_map.bankwidth = 4;
dc21285_map.read = dc21285_read32;
dc21285_map.write = dc21285_write32;
dc21285_map.copy_to = dc21285_copy_to_32;
break;
default:
printk (KERN_ERR "DC21285 flash: undefined bankwidth\n");
return -ENXIO;
}
printk (KERN_NOTICE "DC21285 flash support (%d-bit bankwidth)\n",
dc21285_map.bankwidth*8);
/* Let's map the flash area */
dc21285_map.virt = ioremap(DC21285_FLASH, 16*1024*1024);
if (!dc21285_map.virt) {
printk("Failed to ioremap\n");
return -EIO;
}
if (machine_is_ebsa285()) {
dc21285_mtd = do_map_probe("cfi_probe", &dc21285_map);
} else {
dc21285_mtd = do_map_probe("jedec_probe", &dc21285_map);
}
if (!dc21285_mtd) {
iounmap(dc21285_map.virt);
return -ENXIO;
}
dc21285_mtd->owner = THIS_MODULE;
mtd_device_parse_register(dc21285_mtd, probes, NULL, NULL, 0);
if(machine_is_ebsa285()) {
/*
* Flash timing is determined with bits 19-16 of the
* CSR_SA110_CNTL. The value is the number of wait cycles, or
* 0 for 16 cycles (the default). Cycles are 20 ns.
* Here we use 7 for 140 ns flash chips.
*/
/* access time */
*CSR_SA110_CNTL = ((*CSR_SA110_CNTL & ~0x000f0000) | (7 << 16));
/* burst time */
*CSR_SA110_CNTL = ((*CSR_SA110_CNTL & ~0x00f00000) | (7 << 20));
/* tristate time */
*CSR_SA110_CNTL = ((*CSR_SA110_CNTL & ~0x0f000000) | (7 << 24));
}
return 0;
}
static void __exit cleanup_dc21285(void)
{
mtd_device_unregister(dc21285_mtd);
map_destroy(dc21285_mtd);
iounmap(dc21285_map.virt);
}
module_init(init_dc21285);
module_exit(cleanup_dc21285);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Nicolas Pitre <nico@fluxnic.net>");
MODULE_DESCRIPTION("MTD map driver for DC21285 boards");
| gpl-2.0 |
NaughtyMonkey75/kernel_htc_msm8974 | arch/arm/mach-omap2/clkt_dpll.c | 4838 | 10585 | /*
* OMAP2/3/4 DPLL clock functions
*
* Copyright (C) 2005-2008 Texas Instruments, Inc.
* Copyright (C) 2004-2010 Nokia Corporation
*
* Contacts:
* Richard Woodruff <r-woodruff2@ti.com>
* Paul Walmsley
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#undef DEBUG
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <asm/div64.h>
#include <plat/clock.h>
#include <plat/cpu.h>
#include "clock.h"
#include "cm-regbits-24xx.h"
#include "cm-regbits-34xx.h"
/* DPLL rate rounding: minimum DPLL multiplier, divider values */
#define DPLL_MIN_MULTIPLIER 2
#define DPLL_MIN_DIVIDER 1
/* Possible error results from _dpll_test_mult */
#define DPLL_MULT_UNDERFLOW -1
/*
* Scale factor to mitigate roundoff errors in DPLL rate rounding.
* The higher the scale factor, the greater the risk of arithmetic overflow,
* but the closer the rounded rate to the target rate. DPLL_SCALE_FACTOR
* must be a power of DPLL_SCALE_BASE.
*/
#define DPLL_SCALE_FACTOR 64
#define DPLL_SCALE_BASE 2
#define DPLL_ROUNDING_VAL ((DPLL_SCALE_BASE / 2) * \
(DPLL_SCALE_FACTOR / DPLL_SCALE_BASE))
/* DPLL valid Fint frequency band limits - from 34xx TRM Section 4.7.6.2 */
#define OMAP3430_DPLL_FINT_BAND1_MIN 750000
#define OMAP3430_DPLL_FINT_BAND1_MAX 2100000
#define OMAP3430_DPLL_FINT_BAND2_MIN 7500000
#define OMAP3430_DPLL_FINT_BAND2_MAX 21000000
/*
* DPLL valid Fint frequency range for OMAP36xx and OMAP4xxx.
* From device data manual section 4.3 "DPLL and DLL Specifications".
*/
#define OMAP3PLUS_DPLL_FINT_JTYPE_MIN 500000
#define OMAP3PLUS_DPLL_FINT_JTYPE_MAX 2500000
#define OMAP3PLUS_DPLL_FINT_MIN 32000
#define OMAP3PLUS_DPLL_FINT_MAX 52000000
/* _dpll_test_fint() return codes */
#define DPLL_FINT_UNDERFLOW -1
#define DPLL_FINT_INVALID -2
/* Private functions */
/*
* _dpll_test_fint - test whether an Fint value is valid for the DPLL
* @clk: DPLL struct clk to test
* @n: divider value (N) to test
*
* Tests whether a particular divider @n will result in a valid DPLL
* internal clock frequency Fint. See the 34xx TRM 4.7.6.2 "DPLL Jitter
* Correction". Returns 0 if OK, -1 if the enclosing loop can terminate
* (assuming that it is counting N upwards), or -2 if the enclosing loop
* should skip to the next iteration (again assuming N is increasing).
*/
static int _dpll_test_fint(struct clk *clk, u8 n)
{
struct dpll_data *dd;
long fint, fint_min, fint_max;
int ret = 0;
dd = clk->dpll_data;
/* DPLL divider must result in a valid jitter correction val */
fint = clk->parent->rate / n;
if (cpu_is_omap24xx()) {
/* Should not be called for OMAP2, so warn if it is called */
WARN(1, "No fint limits available for OMAP2!\n");
return DPLL_FINT_INVALID;
} else if (cpu_is_omap3430()) {
fint_min = OMAP3430_DPLL_FINT_BAND1_MIN;
fint_max = OMAP3430_DPLL_FINT_BAND2_MAX;
} else if (dd->flags & DPLL_J_TYPE) {
fint_min = OMAP3PLUS_DPLL_FINT_JTYPE_MIN;
fint_max = OMAP3PLUS_DPLL_FINT_JTYPE_MAX;
} else {
fint_min = OMAP3PLUS_DPLL_FINT_MIN;
fint_max = OMAP3PLUS_DPLL_FINT_MAX;
}
if (fint < fint_min) {
pr_debug("rejecting n=%d due to Fint failure, "
"lowering max_divider\n", n);
dd->max_divider = n;
ret = DPLL_FINT_UNDERFLOW;
} else if (fint > fint_max) {
pr_debug("rejecting n=%d due to Fint failure, "
"boosting min_divider\n", n);
dd->min_divider = n;
ret = DPLL_FINT_INVALID;
} else if (cpu_is_omap3430() && fint > OMAP3430_DPLL_FINT_BAND1_MAX &&
fint < OMAP3430_DPLL_FINT_BAND2_MIN) {
pr_debug("rejecting n=%d due to Fint failure\n", n);
ret = DPLL_FINT_INVALID;
}
return ret;
}
static unsigned long _dpll_compute_new_rate(unsigned long parent_rate,
unsigned int m, unsigned int n)
{
unsigned long long num;
num = (unsigned long long)parent_rate * m;
do_div(num, n);
return num;
}
/*
* _dpll_test_mult - test a DPLL multiplier value
* @m: pointer to the DPLL m (multiplier) value under test
* @n: current DPLL n (divider) value under test
* @new_rate: pointer to storage for the resulting rounded rate
* @target_rate: the desired DPLL rate
* @parent_rate: the DPLL's parent clock rate
*
* This code tests a DPLL multiplier value, ensuring that the
* resulting rate will not be higher than the target_rate, and that
* the multiplier value itself is valid for the DPLL. Initially, the
* integer pointed to by the m argument should be prescaled by
* multiplying by DPLL_SCALE_FACTOR. The code will replace this with
* a non-scaled m upon return. This non-scaled m will result in a
* new_rate as close as possible to target_rate (but not greater than
* target_rate) given the current (parent_rate, n, prescaled m)
* triple. Returns DPLL_MULT_UNDERFLOW in the event that the
* non-scaled m attempted to underflow, which can allow the calling
* function to bail out early; or 0 upon success.
*/
static int _dpll_test_mult(int *m, int n, unsigned long *new_rate,
unsigned long target_rate,
unsigned long parent_rate)
{
int r = 0, carry = 0;
/* Unscale m and round if necessary */
if (*m % DPLL_SCALE_FACTOR >= DPLL_ROUNDING_VAL)
carry = 1;
*m = (*m / DPLL_SCALE_FACTOR) + carry;
/*
* The new rate must be <= the target rate to avoid programming
* a rate that is impossible for the hardware to handle
*/
*new_rate = _dpll_compute_new_rate(parent_rate, *m, n);
if (*new_rate > target_rate) {
(*m)--;
*new_rate = 0;
}
/* Guard against m underflow */
if (*m < DPLL_MIN_MULTIPLIER) {
*m = DPLL_MIN_MULTIPLIER;
*new_rate = 0;
r = DPLL_MULT_UNDERFLOW;
}
if (*new_rate == 0)
*new_rate = _dpll_compute_new_rate(parent_rate, *m, n);
return r;
}
/* Public functions */
void omap2_init_dpll_parent(struct clk *clk)
{
u32 v;
struct dpll_data *dd;
dd = clk->dpll_data;
if (!dd)
return;
v = __raw_readl(dd->control_reg);
v &= dd->enable_mask;
v >>= __ffs(dd->enable_mask);
/* Reparent the struct clk in case the dpll is in bypass */
if (cpu_is_omap24xx()) {
if (v == OMAP2XXX_EN_DPLL_LPBYPASS ||
v == OMAP2XXX_EN_DPLL_FRBYPASS)
clk_reparent(clk, dd->clk_bypass);
} else if (cpu_is_omap34xx()) {
if (v == OMAP3XXX_EN_DPLL_LPBYPASS ||
v == OMAP3XXX_EN_DPLL_FRBYPASS)
clk_reparent(clk, dd->clk_bypass);
} else if (cpu_is_omap44xx()) {
if (v == OMAP4XXX_EN_DPLL_LPBYPASS ||
v == OMAP4XXX_EN_DPLL_FRBYPASS ||
v == OMAP4XXX_EN_DPLL_MNBYPASS)
clk_reparent(clk, dd->clk_bypass);
}
return;
}
/**
* omap2_get_dpll_rate - returns the current DPLL CLKOUT rate
* @clk: struct clk * of a DPLL
*
* DPLLs can be locked or bypassed - basically, enabled or disabled.
* When locked, the DPLL output depends on the M and N values. When
* bypassed, on OMAP2xxx, the output rate is either the 32KiHz clock
* or sys_clk. Bypass rates on OMAP3 depend on the DPLL: DPLLs 1 and
* 2 are bypassed with dpll1_fclk and dpll2_fclk respectively
* (generated by DPLL3), while DPLL 3, 4, and 5 bypass rates are sys_clk.
* Returns the current DPLL CLKOUT rate (*not* CLKOUTX2) if the DPLL is
* locked, or the appropriate bypass rate if the DPLL is bypassed, or 0
* if the clock @clk is not a DPLL.
*/
u32 omap2_get_dpll_rate(struct clk *clk)
{
long long dpll_clk;
u32 dpll_mult, dpll_div, v;
struct dpll_data *dd;
dd = clk->dpll_data;
if (!dd)
return 0;
/* Return bypass rate if DPLL is bypassed */
v = __raw_readl(dd->control_reg);
v &= dd->enable_mask;
v >>= __ffs(dd->enable_mask);
if (cpu_is_omap24xx()) {
if (v == OMAP2XXX_EN_DPLL_LPBYPASS ||
v == OMAP2XXX_EN_DPLL_FRBYPASS)
return dd->clk_bypass->rate;
} else if (cpu_is_omap34xx()) {
if (v == OMAP3XXX_EN_DPLL_LPBYPASS ||
v == OMAP3XXX_EN_DPLL_FRBYPASS)
return dd->clk_bypass->rate;
} else if (cpu_is_omap44xx()) {
if (v == OMAP4XXX_EN_DPLL_LPBYPASS ||
v == OMAP4XXX_EN_DPLL_FRBYPASS ||
v == OMAP4XXX_EN_DPLL_MNBYPASS)
return dd->clk_bypass->rate;
}
v = __raw_readl(dd->mult_div1_reg);
dpll_mult = v & dd->mult_mask;
dpll_mult >>= __ffs(dd->mult_mask);
dpll_div = v & dd->div1_mask;
dpll_div >>= __ffs(dd->div1_mask);
dpll_clk = (long long)dd->clk_ref->rate * dpll_mult;
do_div(dpll_clk, dpll_div + 1);
return dpll_clk;
}
/* DPLL rate rounding code */
/**
* omap2_dpll_round_rate - round a target rate for an OMAP DPLL
* @clk: struct clk * for a DPLL
* @target_rate: desired DPLL clock rate
*
* Given a DPLL and a desired target rate, round the target rate to a
* possible, programmable rate for this DPLL. Attempts to select the
* minimum possible n. Stores the computed (m, n) in the DPLL's
* dpll_data structure so set_rate() will not need to call this
* (expensive) function again. Returns ~0 if the target rate cannot
* be rounded, or the rounded rate upon success.
*/
long omap2_dpll_round_rate(struct clk *clk, unsigned long target_rate)
{
int m, n, r, scaled_max_m;
unsigned long scaled_rt_rp;
unsigned long new_rate = 0;
struct dpll_data *dd;
if (!clk || !clk->dpll_data)
return ~0;
dd = clk->dpll_data;
pr_debug("clock: %s: starting DPLL round_rate, target rate %ld\n",
clk->name, target_rate);
scaled_rt_rp = target_rate / (dd->clk_ref->rate / DPLL_SCALE_FACTOR);
scaled_max_m = dd->max_multiplier * DPLL_SCALE_FACTOR;
dd->last_rounded_rate = 0;
for (n = dd->min_divider; n <= dd->max_divider; n++) {
/* Is the (input clk, divider) pair valid for the DPLL? */
r = _dpll_test_fint(clk, n);
if (r == DPLL_FINT_UNDERFLOW)
break;
else if (r == DPLL_FINT_INVALID)
continue;
/* Compute the scaled DPLL multiplier, based on the divider */
m = scaled_rt_rp * n;
/*
* Since we're counting n up, a m overflow means we
* can bail out completely (since as n increases in
* the next iteration, there's no way that m can
* increase beyond the current m)
*/
if (m > scaled_max_m)
break;
r = _dpll_test_mult(&m, n, &new_rate, target_rate,
dd->clk_ref->rate);
/* m can't be set low enough for this n - try with a larger n */
if (r == DPLL_MULT_UNDERFLOW)
continue;
pr_debug("clock: %s: m = %d: n = %d: new_rate = %ld\n",
clk->name, m, n, new_rate);
if (target_rate == new_rate) {
dd->last_rounded_m = m;
dd->last_rounded_n = n;
dd->last_rounded_rate = target_rate;
break;
}
}
if (target_rate != new_rate) {
pr_debug("clock: %s: cannot round to rate %ld\n", clk->name,
target_rate);
return ~0;
}
return target_rate;
}
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.