repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
bigzz/ZenKernel_Shamu | drivers/tty/hvc/hvc_beat.c | 8667 | 3211 | /*
* Beat hypervisor console driver
*
* (C) Copyright 2006 TOSHIBA CORPORATION
*
* This code is based on drivers/char/hvc_rtas.c:
* (C) Copyright IBM Corporation 2001-2005
* (C) Copyright Red Hat, Inc. 2005
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/string.h>
#include <linux/console.h>
#include <asm/prom.h>
#include <asm/hvconsole.h>
#include <asm/firmware.h>
#include "hvc_console.h"
extern int64_t beat_get_term_char(uint64_t, uint64_t *, uint64_t *, uint64_t *);
extern int64_t beat_put_term_char(uint64_t, uint64_t, uint64_t, uint64_t);
struct hvc_struct *hvc_beat_dev = NULL;
/* bug: only one queue is available regardless of vtermno */
static int hvc_beat_get_chars(uint32_t vtermno, char *buf, int cnt)
{
static unsigned char q[sizeof(unsigned long) * 2]
__attribute__((aligned(sizeof(unsigned long))));
static int qlen = 0;
u64 got;
again:
if (qlen) {
if (qlen > cnt) {
memcpy(buf, q, cnt);
qlen -= cnt;
memmove(q + cnt, q, qlen);
return cnt;
} else { /* qlen <= cnt */
int r;
memcpy(buf, q, qlen);
r = qlen;
qlen = 0;
return r;
}
}
if (beat_get_term_char(vtermno, &got,
((u64 *)q), ((u64 *)q) + 1) == 0) {
qlen = got;
goto again;
}
return 0;
}
static int hvc_beat_put_chars(uint32_t vtermno, const char *buf, int cnt)
{
unsigned long kb[2];
int rest, nlen;
for (rest = cnt; rest > 0; rest -= nlen) {
nlen = (rest > 16) ? 16 : rest;
memcpy(kb, buf, nlen);
beat_put_term_char(vtermno, nlen, kb[0], kb[1]);
buf += nlen;
}
return cnt;
}
static const struct hv_ops hvc_beat_get_put_ops = {
.get_chars = hvc_beat_get_chars,
.put_chars = hvc_beat_put_chars,
};
static int hvc_beat_useit = 1;
static int hvc_beat_config(char *p)
{
hvc_beat_useit = simple_strtoul(p, NULL, 0);
return 0;
}
static int __init hvc_beat_console_init(void)
{
if (hvc_beat_useit && of_machine_is_compatible("Beat")) {
hvc_instantiate(0, 0, &hvc_beat_get_put_ops);
}
return 0;
}
/* temp */
static int __init hvc_beat_init(void)
{
struct hvc_struct *hp;
if (!firmware_has_feature(FW_FEATURE_BEAT))
return -ENODEV;
hp = hvc_alloc(0, 0, &hvc_beat_get_put_ops, 16);
if (IS_ERR(hp))
return PTR_ERR(hp);
hvc_beat_dev = hp;
return 0;
}
static void __exit hvc_beat_exit(void)
{
if (hvc_beat_dev)
hvc_remove(hvc_beat_dev);
}
module_init(hvc_beat_init);
module_exit(hvc_beat_exit);
__setup("hvc_beat=", hvc_beat_config);
console_initcall(hvc_beat_console_init);
| gpl-2.0 |
dryize/endeavoru_4.18 | arch/mips/kernel/gpio_txx9.c | 13531 | 2288 | /*
* A gpio chip driver for TXx9 SoCs
*
* Copyright (C) 2008 Atsushi Nemoto <anemo@mba.ocn.ne.jp>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/gpio.h>
#include <linux/errno.h>
#include <linux/io.h>
#include <asm/txx9pio.h>
static DEFINE_SPINLOCK(txx9_gpio_lock);
static struct txx9_pio_reg __iomem *txx9_pioptr;
static int txx9_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
return __raw_readl(&txx9_pioptr->din) & (1 << offset);
}
static void txx9_gpio_set_raw(unsigned int offset, int value)
{
u32 val;
val = __raw_readl(&txx9_pioptr->dout);
if (value)
val |= 1 << offset;
else
val &= ~(1 << offset);
__raw_writel(val, &txx9_pioptr->dout);
}
static void txx9_gpio_set(struct gpio_chip *chip, unsigned int offset,
int value)
{
unsigned long flags;
spin_lock_irqsave(&txx9_gpio_lock, flags);
txx9_gpio_set_raw(offset, value);
mmiowb();
spin_unlock_irqrestore(&txx9_gpio_lock, flags);
}
static int txx9_gpio_dir_in(struct gpio_chip *chip, unsigned int offset)
{
unsigned long flags;
spin_lock_irqsave(&txx9_gpio_lock, flags);
__raw_writel(__raw_readl(&txx9_pioptr->dir) & ~(1 << offset),
&txx9_pioptr->dir);
mmiowb();
spin_unlock_irqrestore(&txx9_gpio_lock, flags);
return 0;
}
static int txx9_gpio_dir_out(struct gpio_chip *chip, unsigned int offset,
int value)
{
unsigned long flags;
spin_lock_irqsave(&txx9_gpio_lock, flags);
txx9_gpio_set_raw(offset, value);
__raw_writel(__raw_readl(&txx9_pioptr->dir) | (1 << offset),
&txx9_pioptr->dir);
mmiowb();
spin_unlock_irqrestore(&txx9_gpio_lock, flags);
return 0;
}
static struct gpio_chip txx9_gpio_chip = {
.get = txx9_gpio_get,
.set = txx9_gpio_set,
.direction_input = txx9_gpio_dir_in,
.direction_output = txx9_gpio_dir_out,
.label = "TXx9",
};
int __init txx9_gpio_init(unsigned long baseaddr,
unsigned int base, unsigned int num)
{
txx9_pioptr = ioremap(baseaddr, sizeof(struct txx9_pio_reg));
if (!txx9_pioptr)
return -ENODEV;
txx9_gpio_chip.base = base;
txx9_gpio_chip.ngpio = num;
return gpiochip_add(&txx9_gpio_chip);
}
| gpl-2.0 |
johnzz/fastsocket | kernel/arch/xtensa/boot/lib/zmem.c | 14043 | 1984 | #include <linux/zlib.h>
/* bits taken from ppc */
extern void *avail_ram, *end_avail;
void exit (void)
{
for (;;);
}
void *zalloc(unsigned size)
{
void *p = avail_ram;
size = (size + 7) & -8;
avail_ram += size;
if (avail_ram > end_avail) {
//puts("oops... out of memory\n");
//pause();
exit ();
}
return p;
}
#define HEAD_CRC 2
#define EXTRA_FIELD 4
#define ORIG_NAME 8
#define COMMENT 0x10
#define RESERVED 0xe0
#define DEFLATED 8
void gunzip (void *dst, int dstlen, unsigned char *src, int *lenp)
{
z_stream s;
int r, i, flags;
/* skip header */
i = 10;
flags = src[3];
if (src[2] != DEFLATED || (flags & RESERVED) != 0) {
//puts("bad gzipped data\n");
exit();
}
if ((flags & EXTRA_FIELD) != 0)
i = 12 + src[10] + (src[11] << 8);
if ((flags & ORIG_NAME) != 0)
while (src[i++] != 0)
;
if ((flags & COMMENT) != 0)
while (src[i++] != 0)
;
if ((flags & HEAD_CRC) != 0)
i += 2;
if (i >= *lenp) {
//puts("gunzip: ran out of data in header\n");
exit();
}
s.workspace = zalloc(zlib_inflate_workspacesize());
r = zlib_inflateInit2(&s, -MAX_WBITS);
if (r != Z_OK) {
//puts("inflateInit2 returned "); puthex(r); puts("\n");
exit();
}
s.next_in = src + i;
s.avail_in = *lenp - i;
s.next_out = dst;
s.avail_out = dstlen;
r = zlib_inflate(&s, Z_FINISH);
if (r != Z_OK && r != Z_STREAM_END) {
//puts("inflate returned "); puthex(r); puts("\n");
exit();
}
*lenp = s.next_out - (unsigned char *) dst;
zlib_inflateEnd(&s);
}
| gpl-2.0 |
pcarrier/linux | drivers/char/tpm/tpm-chip.c | 476 | 6163 | /*
* Copyright (C) 2004 IBM Corporation
* Copyright (C) 2014 Intel Corporation
*
* Authors:
* Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
* Leendert van Doorn <leendert@watson.ibm.com>
* Dave Safford <safford@watson.ibm.com>
* Reiner Sailer <sailer@watson.ibm.com>
* Kylene Hall <kjhall@us.ibm.com>
*
* Maintained by: <tpmdd-devel@lists.sourceforge.net>
*
* TPM chip management routines.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2 of the
* License.
*
*/
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/freezer.h>
#include <linux/major.h>
#include "tpm.h"
#include "tpm_eventlog.h"
static DECLARE_BITMAP(dev_mask, TPM_NUM_DEVICES);
static LIST_HEAD(tpm_chip_list);
static DEFINE_SPINLOCK(driver_lock);
struct class *tpm_class;
dev_t tpm_devt;
/*
* tpm_chip_find_get - return tpm_chip for a given chip number
* @chip_num the device number for the chip
*/
struct tpm_chip *tpm_chip_find_get(int chip_num)
{
struct tpm_chip *pos, *chip = NULL;
rcu_read_lock();
list_for_each_entry_rcu(pos, &tpm_chip_list, list) {
if (chip_num != TPM_ANY_NUM && chip_num != pos->dev_num)
continue;
if (try_module_get(pos->pdev->driver->owner)) {
chip = pos;
break;
}
}
rcu_read_unlock();
return chip;
}
/**
* tpm_dev_release() - free chip memory and the device number
* @dev: the character device for the TPM chip
*
* This is used as the release function for the character device.
*/
static void tpm_dev_release(struct device *dev)
{
struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev);
spin_lock(&driver_lock);
clear_bit(chip->dev_num, dev_mask);
spin_unlock(&driver_lock);
kfree(chip);
}
/**
* tpmm_chip_alloc() - allocate a new struct tpm_chip instance
* @dev: device to which the chip is associated
* @ops: struct tpm_class_ops instance
*
* Allocates a new struct tpm_chip instance and assigns a free
* device number for it. Caller does not have to worry about
* freeing the allocated resources. When the devices is removed
* devres calls tpmm_chip_remove() to do the job.
*/
struct tpm_chip *tpmm_chip_alloc(struct device *dev,
const struct tpm_class_ops *ops)
{
struct tpm_chip *chip;
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
return ERR_PTR(-ENOMEM);
mutex_init(&chip->tpm_mutex);
INIT_LIST_HEAD(&chip->list);
chip->ops = ops;
spin_lock(&driver_lock);
chip->dev_num = find_first_zero_bit(dev_mask, TPM_NUM_DEVICES);
spin_unlock(&driver_lock);
if (chip->dev_num >= TPM_NUM_DEVICES) {
dev_err(dev, "No available tpm device numbers\n");
kfree(chip);
return ERR_PTR(-ENOMEM);
}
set_bit(chip->dev_num, dev_mask);
scnprintf(chip->devname, sizeof(chip->devname), "tpm%d", chip->dev_num);
chip->pdev = dev;
dev_set_drvdata(dev, chip);
chip->dev.class = tpm_class;
chip->dev.release = tpm_dev_release;
chip->dev.parent = chip->pdev;
if (chip->dev_num == 0)
chip->dev.devt = MKDEV(MISC_MAJOR, TPM_MINOR);
else
chip->dev.devt = MKDEV(MAJOR(tpm_devt), chip->dev_num);
dev_set_name(&chip->dev, "%s", chip->devname);
device_initialize(&chip->dev);
cdev_init(&chip->cdev, &tpm_fops);
chip->cdev.owner = chip->pdev->driver->owner;
chip->cdev.kobj.parent = &chip->dev.kobj;
return chip;
}
EXPORT_SYMBOL_GPL(tpmm_chip_alloc);
static int tpm_dev_add_device(struct tpm_chip *chip)
{
int rc;
rc = cdev_add(&chip->cdev, chip->dev.devt, 1);
if (rc) {
dev_err(&chip->dev,
"unable to cdev_add() %s, major %d, minor %d, err=%d\n",
chip->devname, MAJOR(chip->dev.devt),
MINOR(chip->dev.devt), rc);
device_unregister(&chip->dev);
return rc;
}
rc = device_add(&chip->dev);
if (rc) {
dev_err(&chip->dev,
"unable to device_register() %s, major %d, minor %d, err=%d\n",
chip->devname, MAJOR(chip->dev.devt),
MINOR(chip->dev.devt), rc);
return rc;
}
return rc;
}
static void tpm_dev_del_device(struct tpm_chip *chip)
{
cdev_del(&chip->cdev);
device_unregister(&chip->dev);
}
static int tpm1_chip_register(struct tpm_chip *chip)
{
int rc;
if (chip->flags & TPM_CHIP_FLAG_TPM2)
return 0;
rc = tpm_sysfs_add_device(chip);
if (rc)
return rc;
rc = tpm_add_ppi(chip);
if (rc) {
tpm_sysfs_del_device(chip);
return rc;
}
chip->bios_dir = tpm_bios_log_setup(chip->devname);
return 0;
}
static void tpm1_chip_unregister(struct tpm_chip *chip)
{
if (chip->flags & TPM_CHIP_FLAG_TPM2)
return;
if (chip->bios_dir)
tpm_bios_log_teardown(chip->bios_dir);
tpm_remove_ppi(chip);
tpm_sysfs_del_device(chip);
}
/*
* tpm_chip_register() - create a character device for the TPM chip
* @chip: TPM chip to use.
*
* Creates a character device for the TPM chip and adds sysfs attributes for
* the device. As the last step this function adds the chip to the list of TPM
* chips available for in-kernel use.
*
* This function should be only called after the chip initialization is
* complete.
*/
int tpm_chip_register(struct tpm_chip *chip)
{
int rc;
rc = tpm1_chip_register(chip);
if (rc)
return rc;
rc = tpm_dev_add_device(chip);
if (rc)
goto out_err;
/* Make the chip available. */
spin_lock(&driver_lock);
list_add_rcu(&chip->list, &tpm_chip_list);
spin_unlock(&driver_lock);
chip->flags |= TPM_CHIP_FLAG_REGISTERED;
return 0;
out_err:
tpm1_chip_unregister(chip);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_chip_register);
/*
* tpm_chip_unregister() - release the TPM driver
* @chip: TPM chip to use.
*
* Takes the chip first away from the list of available TPM chips and then
* cleans up all the resources reserved by tpm_chip_register().
*
* NOTE: This function should be only called before deinitializing chip
* resources.
*/
void tpm_chip_unregister(struct tpm_chip *chip)
{
if (!(chip->flags & TPM_CHIP_FLAG_REGISTERED))
return;
spin_lock(&driver_lock);
list_del_rcu(&chip->list);
spin_unlock(&driver_lock);
synchronize_rcu();
tpm1_chip_unregister(chip);
tpm_dev_del_device(chip);
}
EXPORT_SYMBOL_GPL(tpm_chip_unregister);
| gpl-2.0 |
getitnowmarketing/Gz-One-Commando | drivers/net/usb/rndis_host.c | 476 | 19039 | /*
* Host Side support for RNDIS Networking Links
* Copyright (C) 2005 by David Brownell
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/workqueue.h>
#include <linux/mii.h>
#include <linux/usb.h>
#include <linux/usb/cdc.h>
#include <linux/usb/usbnet.h>
#include <linux/usb/rndis_host.h>
/*
* RNDIS is NDIS remoted over USB. It's a MSFT variant of CDC ACM ... of
* course ACM was intended for modems, not Ethernet links! USB's standard
* for Ethernet links is "CDC Ethernet", which is significantly simpler.
*
* NOTE that Microsoft's "RNDIS 1.0" specification is incomplete. Issues
* include:
* - Power management in particular relies on information that's scattered
* through other documentation, and which is incomplete or incorrect even
* there.
* - There are various undocumented protocol requirements, such as the
* need to send unused garbage in control-OUT messages.
* - In some cases, MS-Windows will emit undocumented requests; this
* matters more to peripheral implementations than host ones.
*
* Moreover there's a no-open-specs variant of RNDIS called "ActiveSync".
*
* For these reasons and others, ** USE OF RNDIS IS STRONGLY DISCOURAGED ** in
* favor of such non-proprietary alternatives as CDC Ethernet or the newer (and
* currently rare) "Ethernet Emulation Model" (EEM).
*/
/*
* RNDIS notifications from device: command completion; "reverse"
* keepalives; etc
*/
void rndis_status(struct usbnet *dev, struct urb *urb)
{
devdbg(dev, "rndis status urb, len %d stat %d",
urb->actual_length, urb->status);
// FIXME for keepalives, respond immediately (asynchronously)
// if not an RNDIS status, do like cdc_status(dev,urb) does
}
EXPORT_SYMBOL_GPL(rndis_status);
/*
* RNDIS indicate messages.
*/
static void rndis_msg_indicate(struct usbnet *dev, struct rndis_indicate *msg,
int buflen)
{
struct cdc_state *info = (void *)&dev->data;
struct device *udev = &info->control->dev;
if (dev->driver_info->indication) {
dev->driver_info->indication(dev, msg, buflen);
} else {
switch (msg->status) {
case RNDIS_STATUS_MEDIA_CONNECT:
dev_info(udev, "rndis media connect\n");
break;
case RNDIS_STATUS_MEDIA_DISCONNECT:
dev_info(udev, "rndis media disconnect\n");
break;
default:
dev_info(udev, "rndis indication: 0x%08x\n",
le32_to_cpu(msg->status));
}
}
}
/*
* RPC done RNDIS-style. Caller guarantees:
* - message is properly byteswapped
* - there's no other request pending
* - buf can hold up to 1KB response (required by RNDIS spec)
* On return, the first few entries are already byteswapped.
*
* Call context is likely probe(), before interface name is known,
* which is why we won't try to use it in the diagnostics.
*/
int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen)
{
struct cdc_state *info = (void *) &dev->data;
int master_ifnum;
int retval;
unsigned count;
__le32 rsp;
u32 xid = 0, msg_len, request_id;
/* REVISIT when this gets called from contexts other than probe() or
* disconnect(): either serialize, or dispatch responses on xid
*/
/* Issue the request; xid is unique, don't bother byteswapping it */
if (likely(buf->msg_type != RNDIS_MSG_HALT
&& buf->msg_type != RNDIS_MSG_RESET)) {
xid = dev->xid++;
if (!xid)
xid = dev->xid++;
buf->request_id = (__force __le32) xid;
}
master_ifnum = info->control->cur_altsetting->desc.bInterfaceNumber;
retval = usb_control_msg(dev->udev,
usb_sndctrlpipe(dev->udev, 0),
USB_CDC_SEND_ENCAPSULATED_COMMAND,
USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0, master_ifnum,
buf, le32_to_cpu(buf->msg_len),
RNDIS_CONTROL_TIMEOUT_MS);
if (unlikely(retval < 0 || xid == 0))
return retval;
// FIXME Seems like some devices discard responses when
// we time out and cancel our "get response" requests...
// so, this is fragile. Probably need to poll for status.
/* ignore status endpoint, just poll the control channel;
* the request probably completed immediately
*/
rsp = buf->msg_type | RNDIS_MSG_COMPLETION;
for (count = 0; count < 10; count++) {
memset(buf, 0, CONTROL_BUFFER_SIZE);
retval = usb_control_msg(dev->udev,
usb_rcvctrlpipe(dev->udev, 0),
USB_CDC_GET_ENCAPSULATED_RESPONSE,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0, master_ifnum,
buf, buflen,
RNDIS_CONTROL_TIMEOUT_MS);
if (likely(retval >= 8)) {
msg_len = le32_to_cpu(buf->msg_len);
request_id = (__force u32) buf->request_id;
if (likely(buf->msg_type == rsp)) {
if (likely(request_id == xid)) {
if (unlikely(rsp == RNDIS_MSG_RESET_C))
return 0;
if (likely(RNDIS_STATUS_SUCCESS
== buf->status))
return 0;
dev_dbg(&info->control->dev,
"rndis reply status %08x\n",
le32_to_cpu(buf->status));
return -EL3RST;
}
dev_dbg(&info->control->dev,
"rndis reply id %d expected %d\n",
request_id, xid);
/* then likely retry */
} else switch (buf->msg_type) {
case RNDIS_MSG_INDICATE: /* fault/event */
rndis_msg_indicate(dev, (void *)buf, buflen);
break;
case RNDIS_MSG_KEEPALIVE: { /* ping */
struct rndis_keepalive_c *msg = (void *)buf;
msg->msg_type = RNDIS_MSG_KEEPALIVE_C;
msg->msg_len = cpu_to_le32(sizeof *msg);
msg->status = RNDIS_STATUS_SUCCESS;
retval = usb_control_msg(dev->udev,
usb_sndctrlpipe(dev->udev, 0),
USB_CDC_SEND_ENCAPSULATED_COMMAND,
USB_TYPE_CLASS | USB_RECIP_INTERFACE,
0, master_ifnum,
msg, sizeof *msg,
RNDIS_CONTROL_TIMEOUT_MS);
if (unlikely(retval < 0))
dev_dbg(&info->control->dev,
"rndis keepalive err %d\n",
retval);
}
break;
default:
dev_dbg(&info->control->dev,
"unexpected rndis msg %08x len %d\n",
le32_to_cpu(buf->msg_type), msg_len);
}
} else {
/* device probably issued a protocol stall; ignore */
dev_dbg(&info->control->dev,
"rndis response error, code %d\n", retval);
}
msleep(20);
}
dev_dbg(&info->control->dev, "rndis response timeout\n");
return -ETIMEDOUT;
}
EXPORT_SYMBOL_GPL(rndis_command);
/*
* rndis_query:
*
* Performs a query for @oid along with 0 or more bytes of payload as
* specified by @in_len. If @reply_len is not set to -1 then the reply
* length is checked against this value, resulting in an error if it
* doesn't match.
*
* NOTE: Adding a payload exactly or greater than the size of the expected
* response payload is an evident requirement MSFT added for ActiveSync.
*
* The only exception is for OIDs that return a variably sized response,
* in which case no payload should be added. This undocumented (and
* nonsensical!) issue was found by sniffing protocol requests from the
* ActiveSync 4.1 Windows driver.
*/
static int rndis_query(struct usbnet *dev, struct usb_interface *intf,
void *buf, __le32 oid, u32 in_len,
void **reply, int *reply_len)
{
int retval;
union {
void *buf;
struct rndis_msg_hdr *header;
struct rndis_query *get;
struct rndis_query_c *get_c;
} u;
u32 off, len;
u.buf = buf;
memset(u.get, 0, sizeof *u.get + in_len);
u.get->msg_type = RNDIS_MSG_QUERY;
u.get->msg_len = cpu_to_le32(sizeof *u.get + in_len);
u.get->oid = oid;
u.get->len = cpu_to_le32(in_len);
u.get->offset = cpu_to_le32(20);
retval = rndis_command(dev, u.header, CONTROL_BUFFER_SIZE);
if (unlikely(retval < 0)) {
dev_err(&intf->dev, "RNDIS_MSG_QUERY(0x%08x) failed, %d\n",
oid, retval);
return retval;
}
off = le32_to_cpu(u.get_c->offset);
len = le32_to_cpu(u.get_c->len);
if (unlikely((8 + off + len) > CONTROL_BUFFER_SIZE))
goto response_error;
if (*reply_len != -1 && len != *reply_len)
goto response_error;
*reply = (unsigned char *) &u.get_c->request_id + off;
*reply_len = len;
return retval;
response_error:
dev_err(&intf->dev, "RNDIS_MSG_QUERY(0x%08x) "
"invalid response - off %d len %d\n",
oid, off, len);
return -EDOM;
}
/* same as usbnet_netdev_ops but MTU change not allowed */
static const struct net_device_ops rndis_netdev_ops = {
.ndo_open = usbnet_open,
.ndo_stop = usbnet_stop,
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
int
generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
{
int retval;
struct net_device *net = dev->net;
struct cdc_state *info = (void *) &dev->data;
union {
void *buf;
struct rndis_msg_hdr *header;
struct rndis_init *init;
struct rndis_init_c *init_c;
struct rndis_query *get;
struct rndis_query_c *get_c;
struct rndis_set *set;
struct rndis_set_c *set_c;
struct rndis_halt *halt;
} u;
u32 tmp;
__le32 phym_unspec, *phym;
int reply_len;
unsigned char *bp;
/* we can't rely on i/o from stack working, or stack allocation */
u.buf = kmalloc(CONTROL_BUFFER_SIZE, GFP_KERNEL);
if (!u.buf)
return -ENOMEM;
retval = usbnet_generic_cdc_bind(dev, intf);
if (retval < 0)
goto fail;
u.init->msg_type = RNDIS_MSG_INIT;
u.init->msg_len = cpu_to_le32(sizeof *u.init);
u.init->major_version = cpu_to_le32(1);
u.init->minor_version = cpu_to_le32(0);
/* max transfer (in spec) is 0x4000 at full speed, but for
* TX we'll stick to one Ethernet packet plus RNDIS framing.
* For RX we handle drivers that zero-pad to end-of-packet.
* Don't let userspace change these settings.
*
* NOTE: there still seems to be wierdness here, as if we need
* to do some more things to make sure WinCE targets accept this.
* They default to jumbograms of 8KB or 16KB, which is absurd
* for such low data rates and which is also more than Linux
* can usually expect to allocate for SKB data...
*/
net->hard_header_len += sizeof (struct rndis_data_hdr);
dev->hard_mtu = net->mtu + net->hard_header_len;
dev->maxpacket = usb_maxpacket(dev->udev, dev->out, 1);
if (dev->maxpacket == 0) {
if (netif_msg_probe(dev))
dev_dbg(&intf->dev, "dev->maxpacket can't be 0\n");
retval = -EINVAL;
goto fail_and_release;
}
dev->rx_urb_size = dev->hard_mtu + (dev->maxpacket + 1);
dev->rx_urb_size &= ~(dev->maxpacket - 1);
u.init->max_transfer_size = cpu_to_le32(dev->rx_urb_size);
net->netdev_ops = &rndis_netdev_ops;
retval = rndis_command(dev, u.header, CONTROL_BUFFER_SIZE);
if (unlikely(retval < 0)) {
/* it might not even be an RNDIS device!! */
dev_err(&intf->dev, "RNDIS init failed, %d\n", retval);
goto fail_and_release;
}
tmp = le32_to_cpu(u.init_c->max_transfer_size);
if (tmp < dev->hard_mtu) {
if (tmp <= net->hard_header_len) {
dev_err(&intf->dev,
"dev can't take %u byte packets (max %u)\n",
dev->hard_mtu, tmp);
retval = -EINVAL;
goto halt_fail_and_release;
}
dev_warn(&intf->dev,
"dev can't take %u byte packets (max %u), "
"adjusting MTU to %u\n",
dev->hard_mtu, tmp, tmp - net->hard_header_len);
dev->hard_mtu = tmp;
net->mtu = dev->hard_mtu - net->hard_header_len;
}
/* REVISIT: peripheral "alignment" request is ignored ... */
dev_dbg(&intf->dev,
"hard mtu %u (%u from dev), rx buflen %Zu, align %d\n",
dev->hard_mtu, tmp, dev->rx_urb_size,
1 << le32_to_cpu(u.init_c->packet_alignment));
/* module has some device initialization code needs to be done right
* after RNDIS_INIT */
if (dev->driver_info->early_init &&
dev->driver_info->early_init(dev) != 0)
goto halt_fail_and_release;
/* Check physical medium */
phym = NULL;
reply_len = sizeof *phym;
retval = rndis_query(dev, intf, u.buf, OID_GEN_PHYSICAL_MEDIUM,
0, (void **) &phym, &reply_len);
if (retval != 0 || !phym) {
/* OID is optional so don't fail here. */
phym_unspec = RNDIS_PHYSICAL_MEDIUM_UNSPECIFIED;
phym = &phym_unspec;
}
if ((flags & FLAG_RNDIS_PHYM_WIRELESS) &&
*phym != RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN) {
if (netif_msg_probe(dev))
dev_dbg(&intf->dev, "driver requires wireless "
"physical medium, but device is not.\n");
retval = -ENODEV;
goto halt_fail_and_release;
}
if ((flags & FLAG_RNDIS_PHYM_NOT_WIRELESS) &&
*phym == RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN) {
if (netif_msg_probe(dev))
dev_dbg(&intf->dev, "driver requires non-wireless "
"physical medium, but device is wireless.\n");
retval = -ENODEV;
goto halt_fail_and_release;
}
/* Get designated host ethernet address */
reply_len = ETH_ALEN;
retval = rndis_query(dev, intf, u.buf, OID_802_3_PERMANENT_ADDRESS,
48, (void **) &bp, &reply_len);
if (unlikely(retval< 0)) {
dev_err(&intf->dev, "rndis get ethaddr, %d\n", retval);
goto halt_fail_and_release;
}
memcpy(net->dev_addr, bp, ETH_ALEN);
memcpy(net->perm_addr, bp, ETH_ALEN);
/* set a nonzero filter to enable data transfers */
memset(u.set, 0, sizeof *u.set);
u.set->msg_type = RNDIS_MSG_SET;
u.set->msg_len = cpu_to_le32(4 + sizeof *u.set);
u.set->oid = OID_GEN_CURRENT_PACKET_FILTER;
u.set->len = cpu_to_le32(4);
u.set->offset = cpu_to_le32((sizeof *u.set) - 8);
*(__le32 *)(u.buf + sizeof *u.set) = RNDIS_DEFAULT_FILTER;
retval = rndis_command(dev, u.header, CONTROL_BUFFER_SIZE);
if (unlikely(retval < 0)) {
dev_err(&intf->dev, "rndis set packet filter, %d\n", retval);
goto halt_fail_and_release;
}
retval = 0;
kfree(u.buf);
return retval;
halt_fail_and_release:
memset(u.halt, 0, sizeof *u.halt);
u.halt->msg_type = RNDIS_MSG_HALT;
u.halt->msg_len = cpu_to_le32(sizeof *u.halt);
(void) rndis_command(dev, (void *)u.halt, CONTROL_BUFFER_SIZE);
fail_and_release:
usb_set_intfdata(info->data, NULL);
usb_driver_release_interface(driver_of(intf), info->data);
info->data = NULL;
fail:
kfree(u.buf);
return retval;
}
EXPORT_SYMBOL_GPL(generic_rndis_bind);
static int rndis_bind(struct usbnet *dev, struct usb_interface *intf)
{
return generic_rndis_bind(dev, intf, FLAG_RNDIS_PHYM_NOT_WIRELESS);
}
void rndis_unbind(struct usbnet *dev, struct usb_interface *intf)
{
struct rndis_halt *halt;
/* try to clear any rndis state/activity (no i/o from stack!) */
halt = kzalloc(CONTROL_BUFFER_SIZE, GFP_KERNEL);
if (halt) {
halt->msg_type = RNDIS_MSG_HALT;
halt->msg_len = cpu_to_le32(sizeof *halt);
(void) rndis_command(dev, (void *)halt, CONTROL_BUFFER_SIZE);
kfree(halt);
}
usbnet_cdc_unbind(dev, intf);
}
EXPORT_SYMBOL_GPL(rndis_unbind);
/*
* DATA -- host must not write zlps
*/
int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
/* peripheral may have batched packets to us... */
while (likely(skb->len)) {
struct rndis_data_hdr *hdr = (void *)skb->data;
struct sk_buff *skb2;
u32 msg_len, data_offset, data_len;
msg_len = le32_to_cpu(hdr->msg_len);
data_offset = le32_to_cpu(hdr->data_offset);
data_len = le32_to_cpu(hdr->data_len);
/* don't choke if we see oob, per-packet data, etc */
if (unlikely(hdr->msg_type != RNDIS_MSG_PACKET
|| skb->len < msg_len
|| (data_offset + data_len + 8) > msg_len)) {
dev->net->stats.rx_frame_errors++;
devdbg(dev, "bad rndis message %d/%d/%d/%d, len %d",
le32_to_cpu(hdr->msg_type),
msg_len, data_offset, data_len, skb->len);
return 0;
}
skb_pull(skb, 8 + data_offset);
/* at most one packet left? */
if (likely((data_len - skb->len) <= sizeof *hdr)) {
skb_trim(skb, data_len);
break;
}
/* try to return all the packets in the batch */
skb2 = skb_clone(skb, GFP_ATOMIC);
if (unlikely(!skb2))
break;
skb_pull(skb, msg_len - sizeof *hdr);
skb_trim(skb2, data_len);
usbnet_skb_return(dev, skb2);
}
/* caller will usbnet_skb_return the remaining packet */
return 1;
}
EXPORT_SYMBOL_GPL(rndis_rx_fixup);
struct sk_buff *
rndis_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
{
struct rndis_data_hdr *hdr;
struct sk_buff *skb2;
unsigned len = skb->len;
if (likely(!skb_cloned(skb))) {
int room = skb_headroom(skb);
/* enough head room as-is? */
if (unlikely((sizeof *hdr) <= room))
goto fill;
/* enough room, but needs to be readjusted? */
room += skb_tailroom(skb);
if (likely((sizeof *hdr) <= room)) {
skb->data = memmove(skb->head + sizeof *hdr,
skb->data, len);
skb_set_tail_pointer(skb, len);
goto fill;
}
}
/* create a new skb, with the correct size (and tailpad) */
skb2 = skb_copy_expand(skb, sizeof *hdr, 1, flags);
dev_kfree_skb_any(skb);
if (unlikely(!skb2))
return skb2;
skb = skb2;
/* fill out the RNDIS header. we won't bother trying to batch
* packets; Linux minimizes wasted bandwidth through tx queues.
*/
fill:
hdr = (void *) __skb_push(skb, sizeof *hdr);
memset(hdr, 0, sizeof *hdr);
hdr->msg_type = RNDIS_MSG_PACKET;
hdr->msg_len = cpu_to_le32(skb->len);
hdr->data_offset = cpu_to_le32(sizeof(*hdr) - 8);
hdr->data_len = cpu_to_le32(len);
/* FIXME make the last packet always be short ... */
return skb;
}
EXPORT_SYMBOL_GPL(rndis_tx_fixup);
static const struct driver_info rndis_info = {
.description = "RNDIS device",
.flags = FLAG_ETHER | FLAG_FRAMING_RN | FLAG_NO_SETINT,
.bind = rndis_bind,
.unbind = rndis_unbind,
.status = rndis_status,
.rx_fixup = rndis_rx_fixup,
.tx_fixup = rndis_tx_fixup,
};
/*-------------------------------------------------------------------------*/
static const struct usb_device_id products [] = {
{
/* RNDIS is MSFT's un-official variant of CDC ACM */
USB_INTERFACE_INFO(USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
.driver_info = (unsigned long) &rndis_info,
}, {
/* "ActiveSync" is an undocumented variant of RNDIS, used in WM5 */
USB_INTERFACE_INFO(USB_CLASS_MISC, 1, 1),
.driver_info = (unsigned long) &rndis_info,
}, {
/* RNDIS for tethering */
USB_INTERFACE_INFO(USB_CLASS_WIRELESS_CONTROLLER, 1, 3),
.driver_info = (unsigned long) &rndis_info,
},
{ }, // END
};
MODULE_DEVICE_TABLE(usb, products);
static struct usb_driver rndis_driver = {
.name = "rndis_host",
.id_table = products,
.probe = usbnet_probe,
.disconnect = usbnet_disconnect,
.suspend = usbnet_suspend,
.resume = usbnet_resume,
};
static int __init rndis_init(void)
{
return usb_register(&rndis_driver);
}
module_init(rndis_init);
static void __exit rndis_exit(void)
{
usb_deregister(&rndis_driver);
}
module_exit(rndis_exit);
MODULE_AUTHOR("David Brownell");
MODULE_DESCRIPTION("USB Host side RNDIS driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
EPDCenter/android_kernel_amlogic_s805_3go_aplay2 | crypto/cast6_generic.c | 2268 | 9687 | /* Kernel cryptographic api.
* cast6.c - Cast6 cipher algorithm [rfc2612].
*
* CAST-256 (*cast6*) is a DES like Substitution-Permutation Network (SPN)
* cryptosystem built upon the CAST-128 (*cast5*) [rfc2144] encryption
* algorithm.
*
* Copyright (C) 2003 Kartikey Mahendra Bhatt <kartik_me@hotmail.com>.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
#include <asm/byteorder.h>
#include <linux/init.h>
#include <linux/crypto.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <crypto/cast6.h>
#define s1 cast_s1
#define s2 cast_s2
#define s3 cast_s3
#define s4 cast_s4
#define F1(D, r, m) ((I = ((m) + (D))), (I = rol32(I, (r))), \
(((s1[I >> 24] ^ s2[(I>>16)&0xff]) - s3[(I>>8)&0xff]) + s4[I&0xff]))
#define F2(D, r, m) ((I = ((m) ^ (D))), (I = rol32(I, (r))), \
(((s1[I >> 24] - s2[(I>>16)&0xff]) + s3[(I>>8)&0xff]) ^ s4[I&0xff]))
#define F3(D, r, m) ((I = ((m) - (D))), (I = rol32(I, (r))), \
(((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff]))
static const u32 Tm[24][8] = {
{ 0x5a827999, 0xc95c653a, 0x383650db, 0xa7103c7c, 0x15ea281d,
0x84c413be, 0xf39dff5f, 0x6277eb00 } ,
{ 0xd151d6a1, 0x402bc242, 0xaf05ade3, 0x1ddf9984, 0x8cb98525,
0xfb9370c6, 0x6a6d5c67, 0xd9474808 } ,
{ 0x482133a9, 0xb6fb1f4a, 0x25d50aeb, 0x94aef68c, 0x0388e22d,
0x7262cdce, 0xe13cb96f, 0x5016a510 } ,
{ 0xbef090b1, 0x2dca7c52, 0x9ca467f3, 0x0b7e5394, 0x7a583f35,
0xe9322ad6, 0x580c1677, 0xc6e60218 } ,
{ 0x35bfedb9, 0xa499d95a, 0x1373c4fb, 0x824db09c, 0xf1279c3d,
0x600187de, 0xcedb737f, 0x3db55f20 } ,
{ 0xac8f4ac1, 0x1b693662, 0x8a432203, 0xf91d0da4, 0x67f6f945,
0xd6d0e4e6, 0x45aad087, 0xb484bc28 } ,
{ 0x235ea7c9, 0x9238936a, 0x01127f0b, 0x6fec6aac, 0xdec6564d,
0x4da041ee, 0xbc7a2d8f, 0x2b541930 } ,
{ 0x9a2e04d1, 0x0907f072, 0x77e1dc13, 0xe6bbc7b4, 0x5595b355,
0xc46f9ef6, 0x33498a97, 0xa2237638 } ,
{ 0x10fd61d9, 0x7fd74d7a, 0xeeb1391b, 0x5d8b24bc, 0xcc65105d,
0x3b3efbfe, 0xaa18e79f, 0x18f2d340 } ,
{ 0x87ccbee1, 0xf6a6aa82, 0x65809623, 0xd45a81c4, 0x43346d65,
0xb20e5906, 0x20e844a7, 0x8fc23048 } ,
{ 0xfe9c1be9, 0x6d76078a, 0xdc4ff32b, 0x4b29decc, 0xba03ca6d,
0x28ddb60e, 0x97b7a1af, 0x06918d50 } ,
{ 0x756b78f1, 0xe4456492, 0x531f5033, 0xc1f93bd4, 0x30d32775,
0x9fad1316, 0x0e86feb7, 0x7d60ea58 } ,
{ 0xec3ad5f9, 0x5b14c19a, 0xc9eead3b, 0x38c898dc, 0xa7a2847d,
0x167c701e, 0x85565bbf, 0xf4304760 } ,
{ 0x630a3301, 0xd1e41ea2, 0x40be0a43, 0xaf97f5e4, 0x1e71e185,
0x8d4bcd26, 0xfc25b8c7, 0x6affa468 } ,
{ 0xd9d99009, 0x48b37baa, 0xb78d674b, 0x266752ec, 0x95413e8d,
0x041b2a2e, 0x72f515cf, 0xe1cf0170 } ,
{ 0x50a8ed11, 0xbf82d8b2, 0x2e5cc453, 0x9d36aff4, 0x0c109b95,
0x7aea8736, 0xe9c472d7, 0x589e5e78 } ,
{ 0xc7784a19, 0x365235ba, 0xa52c215b, 0x14060cfc, 0x82dff89d,
0xf1b9e43e, 0x6093cfdf, 0xcf6dbb80 } ,
{ 0x3e47a721, 0xad2192c2, 0x1bfb7e63, 0x8ad56a04, 0xf9af55a5,
0x68894146, 0xd7632ce7, 0x463d1888 } ,
{ 0xb5170429, 0x23f0efca, 0x92cadb6b, 0x01a4c70c, 0x707eb2ad,
0xdf589e4e, 0x4e3289ef, 0xbd0c7590 } ,
{ 0x2be66131, 0x9ac04cd2, 0x099a3873, 0x78742414, 0xe74e0fb5,
0x5627fb56, 0xc501e6f7, 0x33dbd298 } ,
{ 0xa2b5be39, 0x118fa9da, 0x8069957b, 0xef43811c, 0x5e1d6cbd,
0xccf7585e, 0x3bd143ff, 0xaaab2fa0 } ,
{ 0x19851b41, 0x885f06e2, 0xf738f283, 0x6612de24, 0xd4ecc9c5,
0x43c6b566, 0xb2a0a107, 0x217a8ca8 } ,
{ 0x90547849, 0xff2e63ea, 0x6e084f8b, 0xdce23b2c, 0x4bbc26cd,
0xba96126e, 0x296ffe0f, 0x9849e9b0 } ,
{ 0x0723d551, 0x75fdc0f2, 0xe4d7ac93, 0x53b19834, 0xc28b83d5,
0x31656f76, 0xa03f5b17, 0x0f1946b8 }
};
static const u8 Tr[4][8] = {
{ 0x13, 0x04, 0x15, 0x06, 0x17, 0x08, 0x19, 0x0a } ,
{ 0x1b, 0x0c, 0x1d, 0x0e, 0x1f, 0x10, 0x01, 0x12 } ,
{ 0x03, 0x14, 0x05, 0x16, 0x07, 0x18, 0x09, 0x1a } ,
{ 0x0b, 0x1c, 0x0d, 0x1e, 0x0f, 0x00, 0x11, 0x02 }
};
/* forward octave */
static inline void W(u32 *key, unsigned int i)
{
u32 I;
key[6] ^= F1(key[7], Tr[i % 4][0], Tm[i][0]);
key[5] ^= F2(key[6], Tr[i % 4][1], Tm[i][1]);
key[4] ^= F3(key[5], Tr[i % 4][2], Tm[i][2]);
key[3] ^= F1(key[4], Tr[i % 4][3], Tm[i][3]);
key[2] ^= F2(key[3], Tr[i % 4][4], Tm[i][4]);
key[1] ^= F3(key[2], Tr[i % 4][5], Tm[i][5]);
key[0] ^= F1(key[1], Tr[i % 4][6], Tm[i][6]);
key[7] ^= F2(key[0], Tr[i % 4][7], Tm[i][7]);
}
int __cast6_setkey(struct cast6_ctx *c, const u8 *in_key,
unsigned key_len, u32 *flags)
{
int i;
u32 key[8];
__be32 p_key[8]; /* padded key */
if (key_len % 4 != 0) {
*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
memset(p_key, 0, 32);
memcpy(p_key, in_key, key_len);
key[0] = be32_to_cpu(p_key[0]); /* A */
key[1] = be32_to_cpu(p_key[1]); /* B */
key[2] = be32_to_cpu(p_key[2]); /* C */
key[3] = be32_to_cpu(p_key[3]); /* D */
key[4] = be32_to_cpu(p_key[4]); /* E */
key[5] = be32_to_cpu(p_key[5]); /* F */
key[6] = be32_to_cpu(p_key[6]); /* G */
key[7] = be32_to_cpu(p_key[7]); /* H */
for (i = 0; i < 12; i++) {
W(key, 2 * i);
W(key, 2 * i + 1);
c->Kr[i][0] = key[0] & 0x1f;
c->Kr[i][1] = key[2] & 0x1f;
c->Kr[i][2] = key[4] & 0x1f;
c->Kr[i][3] = key[6] & 0x1f;
c->Km[i][0] = key[7];
c->Km[i][1] = key[5];
c->Km[i][2] = key[3];
c->Km[i][3] = key[1];
}
return 0;
}
EXPORT_SYMBOL_GPL(__cast6_setkey);
int cast6_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
{
return __cast6_setkey(crypto_tfm_ctx(tfm), key, keylen,
&tfm->crt_flags);
}
EXPORT_SYMBOL_GPL(cast6_setkey);
/*forward quad round*/
static inline void Q(u32 *block, u8 *Kr, u32 *Km)
{
u32 I;
block[2] ^= F1(block[3], Kr[0], Km[0]);
block[1] ^= F2(block[2], Kr[1], Km[1]);
block[0] ^= F3(block[1], Kr[2], Km[2]);
block[3] ^= F1(block[0], Kr[3], Km[3]);
}
/*reverse quad round*/
static inline void QBAR(u32 *block, u8 *Kr, u32 *Km)
{
u32 I;
block[3] ^= F1(block[0], Kr[3], Km[3]);
block[0] ^= F3(block[1], Kr[2], Km[2]);
block[1] ^= F2(block[2], Kr[1], Km[1]);
block[2] ^= F1(block[3], Kr[0], Km[0]);
}
void __cast6_encrypt(struct cast6_ctx *c, u8 *outbuf, const u8 *inbuf)
{
const __be32 *src = (const __be32 *)inbuf;
__be32 *dst = (__be32 *)outbuf;
u32 block[4];
u32 *Km;
u8 *Kr;
block[0] = be32_to_cpu(src[0]);
block[1] = be32_to_cpu(src[1]);
block[2] = be32_to_cpu(src[2]);
block[3] = be32_to_cpu(src[3]);
Km = c->Km[0]; Kr = c->Kr[0]; Q(block, Kr, Km);
Km = c->Km[1]; Kr = c->Kr[1]; Q(block, Kr, Km);
Km = c->Km[2]; Kr = c->Kr[2]; Q(block, Kr, Km);
Km = c->Km[3]; Kr = c->Kr[3]; Q(block, Kr, Km);
Km = c->Km[4]; Kr = c->Kr[4]; Q(block, Kr, Km);
Km = c->Km[5]; Kr = c->Kr[5]; Q(block, Kr, Km);
Km = c->Km[6]; Kr = c->Kr[6]; QBAR(block, Kr, Km);
Km = c->Km[7]; Kr = c->Kr[7]; QBAR(block, Kr, Km);
Km = c->Km[8]; Kr = c->Kr[8]; QBAR(block, Kr, Km);
Km = c->Km[9]; Kr = c->Kr[9]; QBAR(block, Kr, Km);
Km = c->Km[10]; Kr = c->Kr[10]; QBAR(block, Kr, Km);
Km = c->Km[11]; Kr = c->Kr[11]; QBAR(block, Kr, Km);
dst[0] = cpu_to_be32(block[0]);
dst[1] = cpu_to_be32(block[1]);
dst[2] = cpu_to_be32(block[2]);
dst[3] = cpu_to_be32(block[3]);
}
EXPORT_SYMBOL_GPL(__cast6_encrypt);
static void cast6_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
{
__cast6_encrypt(crypto_tfm_ctx(tfm), outbuf, inbuf);
}
void __cast6_decrypt(struct cast6_ctx *c, u8 *outbuf, const u8 *inbuf)
{
const __be32 *src = (const __be32 *)inbuf;
__be32 *dst = (__be32 *)outbuf;
u32 block[4];
u32 *Km;
u8 *Kr;
block[0] = be32_to_cpu(src[0]);
block[1] = be32_to_cpu(src[1]);
block[2] = be32_to_cpu(src[2]);
block[3] = be32_to_cpu(src[3]);
Km = c->Km[11]; Kr = c->Kr[11]; Q(block, Kr, Km);
Km = c->Km[10]; Kr = c->Kr[10]; Q(block, Kr, Km);
Km = c->Km[9]; Kr = c->Kr[9]; Q(block, Kr, Km);
Km = c->Km[8]; Kr = c->Kr[8]; Q(block, Kr, Km);
Km = c->Km[7]; Kr = c->Kr[7]; Q(block, Kr, Km);
Km = c->Km[6]; Kr = c->Kr[6]; Q(block, Kr, Km);
Km = c->Km[5]; Kr = c->Kr[5]; QBAR(block, Kr, Km);
Km = c->Km[4]; Kr = c->Kr[4]; QBAR(block, Kr, Km);
Km = c->Km[3]; Kr = c->Kr[3]; QBAR(block, Kr, Km);
Km = c->Km[2]; Kr = c->Kr[2]; QBAR(block, Kr, Km);
Km = c->Km[1]; Kr = c->Kr[1]; QBAR(block, Kr, Km);
Km = c->Km[0]; Kr = c->Kr[0]; QBAR(block, Kr, Km);
dst[0] = cpu_to_be32(block[0]);
dst[1] = cpu_to_be32(block[1]);
dst[2] = cpu_to_be32(block[2]);
dst[3] = cpu_to_be32(block[3]);
}
EXPORT_SYMBOL_GPL(__cast6_decrypt);
static void cast6_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
{
__cast6_decrypt(crypto_tfm_ctx(tfm), outbuf, inbuf);
}
static struct crypto_alg alg = {
.cra_name = "cast6",
.cra_driver_name = "cast6-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = CAST6_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct cast6_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
.cia_min_keysize = CAST6_MIN_KEY_SIZE,
.cia_max_keysize = CAST6_MAX_KEY_SIZE,
.cia_setkey = cast6_setkey,
.cia_encrypt = cast6_encrypt,
.cia_decrypt = cast6_decrypt}
}
};
static int __init cast6_mod_init(void)
{
return crypto_register_alg(&alg);
}
static void __exit cast6_mod_fini(void)
{
crypto_unregister_alg(&alg);
}
module_init(cast6_mod_init);
module_exit(cast6_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Cast6 Cipher Algorithm");
MODULE_ALIAS("cast6");
| gpl-2.0 |
tvall43/linux-stable | init/do_mounts_rd.c | 2524 | 8635 | /*
* Many of the syscalls used in this file expect some of the arguments
* to be __user pointers not __kernel pointers. To limit the sparse
* noise, turn off sparse checking for this file.
*/
#ifdef __CHECKER__
#undef __CHECKER__
#warning "Sparse checking disabled for this file"
#endif
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/minix_fs.h>
#include <linux/ext2_fs.h>
#include <linux/romfs_fs.h>
#include <linux/cramfs_fs.h>
#include <linux/initrd.h>
#include <linux/string.h>
#include <linux/slab.h>
#include "do_mounts.h"
#include "../fs/squashfs/squashfs_fs.h"
#include <linux/decompress/generic.h>
int __initdata rd_prompt = 1;/* 1 = prompt for RAM disk, 0 = don't prompt */
static int __init prompt_ramdisk(char *str)
{
rd_prompt = simple_strtol(str,NULL,0) & 1;
return 1;
}
__setup("prompt_ramdisk=", prompt_ramdisk);
int __initdata rd_image_start; /* starting block # of image */
static int __init ramdisk_start_setup(char *str)
{
rd_image_start = simple_strtol(str,NULL,0);
return 1;
}
__setup("ramdisk_start=", ramdisk_start_setup);
static int __init crd_load(int in_fd, int out_fd, decompress_fn deco);
/*
* This routine tries to find a RAM disk image to load, and returns the
* number of blocks to read for a non-compressed image, 0 if the image
* is a compressed image, and -1 if an image with the right magic
* numbers could not be found.
*
* We currently check for the following magic numbers:
* minix
* ext2
* romfs
* cramfs
* squashfs
* gzip
*/
static int __init
identify_ramdisk_image(int fd, int start_block, decompress_fn *decompressor)
{
const int size = 512;
struct minix_super_block *minixsb;
struct romfs_super_block *romfsb;
struct cramfs_super *cramfsb;
struct squashfs_super_block *squashfsb;
int nblocks = -1;
unsigned char *buf;
const char *compress_name;
unsigned long n;
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
minixsb = (struct minix_super_block *) buf;
romfsb = (struct romfs_super_block *) buf;
cramfsb = (struct cramfs_super *) buf;
squashfsb = (struct squashfs_super_block *) buf;
memset(buf, 0xe5, size);
/*
* Read block 0 to test for compressed kernel
*/
sys_lseek(fd, start_block * BLOCK_SIZE, 0);
sys_read(fd, buf, size);
*decompressor = decompress_method(buf, size, &compress_name);
if (compress_name) {
printk(KERN_NOTICE "RAMDISK: %s image found at block %d\n",
compress_name, start_block);
if (!*decompressor)
printk(KERN_EMERG
"RAMDISK: %s decompressor not configured!\n",
compress_name);
nblocks = 0;
goto done;
}
/* romfs is at block zero too */
if (romfsb->word0 == ROMSB_WORD0 &&
romfsb->word1 == ROMSB_WORD1) {
printk(KERN_NOTICE
"RAMDISK: romfs filesystem found at block %d\n",
start_block);
nblocks = (ntohl(romfsb->size)+BLOCK_SIZE-1)>>BLOCK_SIZE_BITS;
goto done;
}
if (cramfsb->magic == CRAMFS_MAGIC) {
printk(KERN_NOTICE
"RAMDISK: cramfs filesystem found at block %d\n",
start_block);
nblocks = (cramfsb->size + BLOCK_SIZE - 1) >> BLOCK_SIZE_BITS;
goto done;
}
/* squashfs is at block zero too */
if (le32_to_cpu(squashfsb->s_magic) == SQUASHFS_MAGIC) {
printk(KERN_NOTICE
"RAMDISK: squashfs filesystem found at block %d\n",
start_block);
nblocks = (le64_to_cpu(squashfsb->bytes_used) + BLOCK_SIZE - 1)
>> BLOCK_SIZE_BITS;
goto done;
}
/*
* Read 512 bytes further to check if cramfs is padded
*/
sys_lseek(fd, start_block * BLOCK_SIZE + 0x200, 0);
sys_read(fd, buf, size);
if (cramfsb->magic == CRAMFS_MAGIC) {
printk(KERN_NOTICE
"RAMDISK: cramfs filesystem found at block %d\n",
start_block);
nblocks = (cramfsb->size + BLOCK_SIZE - 1) >> BLOCK_SIZE_BITS;
goto done;
}
/*
* Read block 1 to test for minix and ext2 superblock
*/
sys_lseek(fd, (start_block+1) * BLOCK_SIZE, 0);
sys_read(fd, buf, size);
/* Try minix */
if (minixsb->s_magic == MINIX_SUPER_MAGIC ||
minixsb->s_magic == MINIX_SUPER_MAGIC2) {
printk(KERN_NOTICE
"RAMDISK: Minix filesystem found at block %d\n",
start_block);
nblocks = minixsb->s_nzones << minixsb->s_log_zone_size;
goto done;
}
/* Try ext2 */
n = ext2_image_size(buf);
if (n) {
printk(KERN_NOTICE
"RAMDISK: ext2 filesystem found at block %d\n",
start_block);
nblocks = n;
goto done;
}
printk(KERN_NOTICE
"RAMDISK: Couldn't find valid RAM disk image starting at %d.\n",
start_block);
done:
sys_lseek(fd, start_block * BLOCK_SIZE, 0);
kfree(buf);
return nblocks;
}
int __init rd_load_image(char *from)
{
int res = 0;
int in_fd, out_fd;
unsigned long rd_blocks, devblocks;
int nblocks, i, disk;
char *buf = NULL;
unsigned short rotate = 0;
decompress_fn decompressor = NULL;
#if !defined(CONFIG_S390)
char rotator[4] = { '|' , '/' , '-' , '\\' };
#endif
out_fd = sys_open("/dev/ram", O_RDWR, 0);
if (out_fd < 0)
goto out;
in_fd = sys_open(from, O_RDONLY, 0);
if (in_fd < 0)
goto noclose_input;
nblocks = identify_ramdisk_image(in_fd, rd_image_start, &decompressor);
if (nblocks < 0)
goto done;
if (nblocks == 0) {
if (crd_load(in_fd, out_fd, decompressor) == 0)
goto successful_load;
goto done;
}
/*
* NOTE NOTE: nblocks is not actually blocks but
* the number of kibibytes of data to load into a ramdisk.
* So any ramdisk block size that is a multiple of 1KiB should
* work when the appropriate ramdisk_blocksize is specified
* on the command line.
*
* The default ramdisk_blocksize is 1KiB and it is generally
* silly to use anything else, so make sure to use 1KiB
* blocksize while generating ext2fs ramdisk-images.
*/
if (sys_ioctl(out_fd, BLKGETSIZE, (unsigned long)&rd_blocks) < 0)
rd_blocks = 0;
else
rd_blocks >>= 1;
if (nblocks > rd_blocks) {
printk("RAMDISK: image too big! (%dKiB/%ldKiB)\n",
nblocks, rd_blocks);
goto done;
}
/*
* OK, time to copy in the data
*/
if (sys_ioctl(in_fd, BLKGETSIZE, (unsigned long)&devblocks) < 0)
devblocks = 0;
else
devblocks >>= 1;
if (strcmp(from, "/initrd.image") == 0)
devblocks = nblocks;
if (devblocks == 0) {
printk(KERN_ERR "RAMDISK: could not determine device size\n");
goto done;
}
buf = kmalloc(BLOCK_SIZE, GFP_KERNEL);
if (!buf) {
printk(KERN_ERR "RAMDISK: could not allocate buffer\n");
goto done;
}
printk(KERN_NOTICE "RAMDISK: Loading %dKiB [%ld disk%s] into ram disk... ",
nblocks, ((nblocks-1)/devblocks)+1, nblocks>devblocks ? "s" : "");
for (i = 0, disk = 1; i < nblocks; i++) {
if (i && (i % devblocks == 0)) {
printk("done disk #%d.\n", disk++);
rotate = 0;
if (sys_close(in_fd)) {
printk("Error closing the disk.\n");
goto noclose_input;
}
change_floppy("disk #%d", disk);
in_fd = sys_open(from, O_RDONLY, 0);
if (in_fd < 0) {
printk("Error opening disk.\n");
goto noclose_input;
}
printk("Loading disk #%d... ", disk);
}
sys_read(in_fd, buf, BLOCK_SIZE);
sys_write(out_fd, buf, BLOCK_SIZE);
#if !defined(CONFIG_S390)
if (!(i % 16)) {
printk("%c\b", rotator[rotate & 0x3]);
rotate++;
}
#endif
}
printk("done.\n");
successful_load:
res = 1;
done:
sys_close(in_fd);
noclose_input:
sys_close(out_fd);
out:
kfree(buf);
sys_unlink("/dev/ram");
return res;
}
int __init rd_load_disk(int n)
{
if (rd_prompt)
change_floppy("root floppy disk to be loaded into RAM disk");
create_dev("/dev/root", ROOT_DEV);
create_dev("/dev/ram", MKDEV(RAMDISK_MAJOR, n));
return rd_load_image("/dev/root");
}
static int exit_code;
static int decompress_error;
static int crd_infd, crd_outfd;
static int __init compr_fill(void *buf, unsigned int len)
{
int r = sys_read(crd_infd, buf, len);
if (r < 0)
printk(KERN_ERR "RAMDISK: error while reading compressed data");
else if (r == 0)
printk(KERN_ERR "RAMDISK: EOF while reading compressed data");
return r;
}
static int __init compr_flush(void *window, unsigned int outcnt)
{
int written = sys_write(crd_outfd, window, outcnt);
if (written != outcnt) {
if (decompress_error == 0)
printk(KERN_ERR
"RAMDISK: incomplete write (%d != %d)\n",
written, outcnt);
decompress_error = 1;
return -1;
}
return outcnt;
}
static void __init error(char *x)
{
printk(KERN_ERR "%s\n", x);
exit_code = 1;
decompress_error = 1;
}
static int __init crd_load(int in_fd, int out_fd, decompress_fn deco)
{
int result;
crd_infd = in_fd;
crd_outfd = out_fd;
result = deco(NULL, 0, compr_fill, compr_flush, NULL, NULL, error);
if (decompress_error)
result = 1;
return result;
}
| gpl-2.0 |
junkyde/vikinger | drivers/tty/vt/keyboard.c | 3036 | 51751 | /*
* Written for linux by Johan Myreen as a translation from
* the assembly version by Linus (with diacriticals added)
*
* Some additional features added by Christoph Niemann (ChN), March 1993
*
* Loadable keymaps by Risto Kankkunen, May 1993
*
* Diacriticals redone & other small changes, aeb@cwi.nl, June 1993
* Added decr/incr_console, dynamic keymaps, Unicode support,
* dynamic function/string keys, led setting, Sept 1994
* `Sticky' modifier keys, 951006.
*
* 11-11-96: SAK should now work in the raw mode (Martin Mares)
*
* Modified to provide 'generic' keyboard support by Hamish Macdonald
* Merge with the m68k keyboard driver and split-off of the PC low-level
* parts by Geert Uytterhoeven, May 1997
*
* 27-05-97: Added support for the Magic SysRq Key (Martin Mares)
* 30-07-98: Dead keys redone, aeb@cwi.nl.
* 21-08-02: Converted to input API, major cleanup. (Vojtech Pavlik)
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/consolemap.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/kbd_kern.h>
#include <linux/kbd_diacr.h>
#include <linux/vt_kern.h>
#include <linux/input.h>
#include <linux/reboot.h>
#include <linux/notifier.h>
#include <linux/jiffies.h>
#include <linux/uaccess.h>
#include <asm/irq_regs.h>
extern void ctrl_alt_del(void);
/*
* Exported functions/variables
*/
#define KBD_DEFMODE ((1 << VC_REPEAT) | (1 << VC_META))
/*
* Some laptops take the 789uiojklm,. keys as number pad when NumLock is on.
* This seems a good reason to start with NumLock off. On HIL keyboards
* of PARISC machines however there is no NumLock key and everyone expects the
* keypad to be used for numbers.
*/
#if defined(CONFIG_PARISC) && (defined(CONFIG_KEYBOARD_HIL) || defined(CONFIG_KEYBOARD_HIL_OLD))
#define KBD_DEFLEDS (1 << VC_NUMLOCK)
#else
#define KBD_DEFLEDS 0
#endif
#define KBD_DEFLOCK 0
/*
* Handler Tables.
*/
#define K_HANDLERS\
k_self, k_fn, k_spec, k_pad,\
k_dead, k_cons, k_cur, k_shift,\
k_meta, k_ascii, k_lock, k_lowercase,\
k_slock, k_dead2, k_brl, k_ignore
typedef void (k_handler_fn)(struct vc_data *vc, unsigned char value,
char up_flag);
static k_handler_fn K_HANDLERS;
static k_handler_fn *k_handler[16] = { K_HANDLERS };
#define FN_HANDLERS\
fn_null, fn_enter, fn_show_ptregs, fn_show_mem,\
fn_show_state, fn_send_intr, fn_lastcons, fn_caps_toggle,\
fn_num, fn_hold, fn_scroll_forw, fn_scroll_back,\
fn_boot_it, fn_caps_on, fn_compose, fn_SAK,\
fn_dec_console, fn_inc_console, fn_spawn_con, fn_bare_num
typedef void (fn_handler_fn)(struct vc_data *vc);
static fn_handler_fn FN_HANDLERS;
static fn_handler_fn *fn_handler[] = { FN_HANDLERS };
/*
* Variables exported for vt_ioctl.c
*/
struct vt_spawn_console vt_spawn_con = {
.lock = __SPIN_LOCK_UNLOCKED(vt_spawn_con.lock),
.pid = NULL,
.sig = 0,
};
/*
* Internal Data.
*/
static struct kbd_struct kbd_table[MAX_NR_CONSOLES];
static struct kbd_struct *kbd = kbd_table;
/* maximum values each key_handler can handle */
static const int max_vals[] = {
255, ARRAY_SIZE(func_table) - 1, ARRAY_SIZE(fn_handler) - 1, NR_PAD - 1,
NR_DEAD - 1, 255, 3, NR_SHIFT - 1, 255, NR_ASCII - 1, NR_LOCK - 1,
255, NR_LOCK - 1, 255, NR_BRL - 1
};
static const int NR_TYPES = ARRAY_SIZE(max_vals);
static struct input_handler kbd_handler;
static DEFINE_SPINLOCK(kbd_event_lock);
static unsigned long key_down[BITS_TO_LONGS(KEY_CNT)]; /* keyboard key bitmap */
static unsigned char shift_down[NR_SHIFT]; /* shift state counters.. */
static bool dead_key_next;
static int npadch = -1; /* -1 or number assembled on pad */
static unsigned int diacr;
static char rep; /* flag telling character repeat */
static int shift_state = 0;
static unsigned char ledstate = 0xff; /* undefined */
static unsigned char ledioctl;
static struct ledptr {
unsigned int *addr;
unsigned int mask;
unsigned char valid:1;
} ledptrs[3];
/*
* Notifier list for console keyboard events
*/
static ATOMIC_NOTIFIER_HEAD(keyboard_notifier_list);
int register_keyboard_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_register(&keyboard_notifier_list, nb);
}
EXPORT_SYMBOL_GPL(register_keyboard_notifier);
int unregister_keyboard_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_unregister(&keyboard_notifier_list, nb);
}
EXPORT_SYMBOL_GPL(unregister_keyboard_notifier);
/*
* Translation of scancodes to keycodes. We set them on only the first
* keyboard in the list that accepts the scancode and keycode.
* Explanation for not choosing the first attached keyboard anymore:
* USB keyboards for example have two event devices: one for all "normal"
* keys and one for extra function keys (like "volume up", "make coffee",
* etc.). So this means that scancodes for the extra function keys won't
* be valid for the first event device, but will be for the second.
*/
struct getset_keycode_data {
struct input_keymap_entry ke;
int error;
};
static int getkeycode_helper(struct input_handle *handle, void *data)
{
struct getset_keycode_data *d = data;
d->error = input_get_keycode(handle->dev, &d->ke);
return d->error == 0; /* stop as soon as we successfully get one */
}
static int getkeycode(unsigned int scancode)
{
struct getset_keycode_data d = {
.ke = {
.flags = 0,
.len = sizeof(scancode),
.keycode = 0,
},
.error = -ENODEV,
};
memcpy(d.ke.scancode, &scancode, sizeof(scancode));
input_handler_for_each_handle(&kbd_handler, &d, getkeycode_helper);
return d.error ?: d.ke.keycode;
}
static int setkeycode_helper(struct input_handle *handle, void *data)
{
struct getset_keycode_data *d = data;
d->error = input_set_keycode(handle->dev, &d->ke);
return d->error == 0; /* stop as soon as we successfully set one */
}
static int setkeycode(unsigned int scancode, unsigned int keycode)
{
struct getset_keycode_data d = {
.ke = {
.flags = 0,
.len = sizeof(scancode),
.keycode = keycode,
},
.error = -ENODEV,
};
memcpy(d.ke.scancode, &scancode, sizeof(scancode));
input_handler_for_each_handle(&kbd_handler, &d, setkeycode_helper);
return d.error;
}
/*
* Making beeps and bells. Note that we prefer beeps to bells, but when
* shutting the sound off we do both.
*/
static int kd_sound_helper(struct input_handle *handle, void *data)
{
unsigned int *hz = data;
struct input_dev *dev = handle->dev;
if (test_bit(EV_SND, dev->evbit)) {
if (test_bit(SND_TONE, dev->sndbit)) {
input_inject_event(handle, EV_SND, SND_TONE, *hz);
if (*hz)
return 0;
}
if (test_bit(SND_BELL, dev->sndbit))
input_inject_event(handle, EV_SND, SND_BELL, *hz ? 1 : 0);
}
return 0;
}
static void kd_nosound(unsigned long ignored)
{
static unsigned int zero;
input_handler_for_each_handle(&kbd_handler, &zero, kd_sound_helper);
}
static DEFINE_TIMER(kd_mksound_timer, kd_nosound, 0, 0);
void kd_mksound(unsigned int hz, unsigned int ticks)
{
del_timer_sync(&kd_mksound_timer);
input_handler_for_each_handle(&kbd_handler, &hz, kd_sound_helper);
if (hz && ticks)
mod_timer(&kd_mksound_timer, jiffies + ticks);
}
EXPORT_SYMBOL(kd_mksound);
/*
* Setting the keyboard rate.
*/
static int kbd_rate_helper(struct input_handle *handle, void *data)
{
struct input_dev *dev = handle->dev;
struct kbd_repeat *rep = data;
if (test_bit(EV_REP, dev->evbit)) {
if (rep[0].delay > 0)
input_inject_event(handle,
EV_REP, REP_DELAY, rep[0].delay);
if (rep[0].period > 0)
input_inject_event(handle,
EV_REP, REP_PERIOD, rep[0].period);
rep[1].delay = dev->rep[REP_DELAY];
rep[1].period = dev->rep[REP_PERIOD];
}
return 0;
}
int kbd_rate(struct kbd_repeat *rep)
{
struct kbd_repeat data[2] = { *rep };
input_handler_for_each_handle(&kbd_handler, data, kbd_rate_helper);
*rep = data[1]; /* Copy currently used settings */
return 0;
}
/*
* Helper Functions.
*/
static void put_queue(struct vc_data *vc, int ch)
{
struct tty_struct *tty = vc->port.tty;
if (tty) {
tty_insert_flip_char(tty, ch, 0);
con_schedule_flip(tty);
}
}
static void puts_queue(struct vc_data *vc, char *cp)
{
struct tty_struct *tty = vc->port.tty;
if (!tty)
return;
while (*cp) {
tty_insert_flip_char(tty, *cp, 0);
cp++;
}
con_schedule_flip(tty);
}
static void applkey(struct vc_data *vc, int key, char mode)
{
static char buf[] = { 0x1b, 'O', 0x00, 0x00 };
buf[1] = (mode ? 'O' : '[');
buf[2] = key;
puts_queue(vc, buf);
}
/*
* Many other routines do put_queue, but I think either
* they produce ASCII, or they produce some user-assigned
* string, and in both cases we might assume that it is
* in utf-8 already.
*/
static void to_utf8(struct vc_data *vc, uint c)
{
if (c < 0x80)
/* 0******* */
put_queue(vc, c);
else if (c < 0x800) {
/* 110***** 10****** */
put_queue(vc, 0xc0 | (c >> 6));
put_queue(vc, 0x80 | (c & 0x3f));
} else if (c < 0x10000) {
if (c >= 0xD800 && c < 0xE000)
return;
if (c == 0xFFFF)
return;
/* 1110**** 10****** 10****** */
put_queue(vc, 0xe0 | (c >> 12));
put_queue(vc, 0x80 | ((c >> 6) & 0x3f));
put_queue(vc, 0x80 | (c & 0x3f));
} else if (c < 0x110000) {
/* 11110*** 10****** 10****** 10****** */
put_queue(vc, 0xf0 | (c >> 18));
put_queue(vc, 0x80 | ((c >> 12) & 0x3f));
put_queue(vc, 0x80 | ((c >> 6) & 0x3f));
put_queue(vc, 0x80 | (c & 0x3f));
}
}
/*
* Called after returning from RAW mode or when changing consoles - recompute
* shift_down[] and shift_state from key_down[] maybe called when keymap is
* undefined, so that shiftkey release is seen. The caller must hold the
* kbd_event_lock.
*/
static void do_compute_shiftstate(void)
{
unsigned int i, j, k, sym, val;
shift_state = 0;
memset(shift_down, 0, sizeof(shift_down));
for (i = 0; i < ARRAY_SIZE(key_down); i++) {
if (!key_down[i])
continue;
k = i * BITS_PER_LONG;
for (j = 0; j < BITS_PER_LONG; j++, k++) {
if (!test_bit(k, key_down))
continue;
sym = U(key_maps[0][k]);
if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK)
continue;
val = KVAL(sym);
if (val == KVAL(K_CAPSSHIFT))
val = KVAL(K_SHIFT);
shift_down[val]++;
shift_state |= (1 << val);
}
}
}
/* We still have to export this method to vt.c */
void compute_shiftstate(void)
{
unsigned long flags;
spin_lock_irqsave(&kbd_event_lock, flags);
do_compute_shiftstate();
spin_unlock_irqrestore(&kbd_event_lock, flags);
}
/*
* We have a combining character DIACR here, followed by the character CH.
* If the combination occurs in the table, return the corresponding value.
* Otherwise, if CH is a space or equals DIACR, return DIACR.
* Otherwise, conclude that DIACR was not combining after all,
* queue it and return CH.
*/
static unsigned int handle_diacr(struct vc_data *vc, unsigned int ch)
{
unsigned int d = diacr;
unsigned int i;
diacr = 0;
if ((d & ~0xff) == BRL_UC_ROW) {
if ((ch & ~0xff) == BRL_UC_ROW)
return d | ch;
} else {
for (i = 0; i < accent_table_size; i++)
if (accent_table[i].diacr == d && accent_table[i].base == ch)
return accent_table[i].result;
}
if (ch == ' ' || ch == (BRL_UC_ROW|0) || ch == d)
return d;
if (kbd->kbdmode == VC_UNICODE)
to_utf8(vc, d);
else {
int c = conv_uni_to_8bit(d);
if (c != -1)
put_queue(vc, c);
}
return ch;
}
/*
* Special function handlers
*/
static void fn_enter(struct vc_data *vc)
{
if (diacr) {
if (kbd->kbdmode == VC_UNICODE)
to_utf8(vc, diacr);
else {
int c = conv_uni_to_8bit(diacr);
if (c != -1)
put_queue(vc, c);
}
diacr = 0;
}
put_queue(vc, 13);
if (vc_kbd_mode(kbd, VC_CRLF))
put_queue(vc, 10);
}
static void fn_caps_toggle(struct vc_data *vc)
{
if (rep)
return;
chg_vc_kbd_led(kbd, VC_CAPSLOCK);
}
static void fn_caps_on(struct vc_data *vc)
{
if (rep)
return;
set_vc_kbd_led(kbd, VC_CAPSLOCK);
}
static void fn_show_ptregs(struct vc_data *vc)
{
struct pt_regs *regs = get_irq_regs();
if (regs)
show_regs(regs);
}
static void fn_hold(struct vc_data *vc)
{
struct tty_struct *tty = vc->port.tty;
if (rep || !tty)
return;
/*
* Note: SCROLLOCK will be set (cleared) by stop_tty (start_tty);
* these routines are also activated by ^S/^Q.
* (And SCROLLOCK can also be set by the ioctl KDSKBLED.)
*/
if (tty->stopped)
start_tty(tty);
else
stop_tty(tty);
}
static void fn_num(struct vc_data *vc)
{
if (vc_kbd_mode(kbd, VC_APPLIC))
applkey(vc, 'P', 1);
else
fn_bare_num(vc);
}
/*
* Bind this to Shift-NumLock if you work in application keypad mode
* but want to be able to change the NumLock flag.
* Bind this to NumLock if you prefer that the NumLock key always
* changes the NumLock flag.
*/
static void fn_bare_num(struct vc_data *vc)
{
if (!rep)
chg_vc_kbd_led(kbd, VC_NUMLOCK);
}
static void fn_lastcons(struct vc_data *vc)
{
/* switch to the last used console, ChN */
set_console(last_console);
}
static void fn_dec_console(struct vc_data *vc)
{
int i, cur = fg_console;
/* Currently switching? Queue this next switch relative to that. */
if (want_console != -1)
cur = want_console;
for (i = cur - 1; i != cur; i--) {
if (i == -1)
i = MAX_NR_CONSOLES - 1;
if (vc_cons_allocated(i))
break;
}
set_console(i);
}
static void fn_inc_console(struct vc_data *vc)
{
int i, cur = fg_console;
/* Currently switching? Queue this next switch relative to that. */
if (want_console != -1)
cur = want_console;
for (i = cur+1; i != cur; i++) {
if (i == MAX_NR_CONSOLES)
i = 0;
if (vc_cons_allocated(i))
break;
}
set_console(i);
}
static void fn_send_intr(struct vc_data *vc)
{
struct tty_struct *tty = vc->port.tty;
if (!tty)
return;
tty_insert_flip_char(tty, 0, TTY_BREAK);
con_schedule_flip(tty);
}
static void fn_scroll_forw(struct vc_data *vc)
{
scrollfront(vc, 0);
}
static void fn_scroll_back(struct vc_data *vc)
{
scrollback(vc, 0);
}
static void fn_show_mem(struct vc_data *vc)
{
show_mem(0);
}
static void fn_show_state(struct vc_data *vc)
{
show_state();
}
static void fn_boot_it(struct vc_data *vc)
{
ctrl_alt_del();
}
static void fn_compose(struct vc_data *vc)
{
dead_key_next = true;
}
static void fn_spawn_con(struct vc_data *vc)
{
spin_lock(&vt_spawn_con.lock);
if (vt_spawn_con.pid)
if (kill_pid(vt_spawn_con.pid, vt_spawn_con.sig, 1)) {
put_pid(vt_spawn_con.pid);
vt_spawn_con.pid = NULL;
}
spin_unlock(&vt_spawn_con.lock);
}
static void fn_SAK(struct vc_data *vc)
{
struct work_struct *SAK_work = &vc_cons[fg_console].SAK_work;
schedule_work(SAK_work);
}
static void fn_null(struct vc_data *vc)
{
do_compute_shiftstate();
}
/*
* Special key handlers
*/
static void k_ignore(struct vc_data *vc, unsigned char value, char up_flag)
{
}
static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
{
if (up_flag)
return;
if (value >= ARRAY_SIZE(fn_handler))
return;
if ((kbd->kbdmode == VC_RAW ||
kbd->kbdmode == VC_MEDIUMRAW ||
kbd->kbdmode == VC_OFF) &&
value != KVAL(K_SAK))
return; /* SAK is allowed even in raw mode */
fn_handler[value](vc);
}
static void k_lowercase(struct vc_data *vc, unsigned char value, char up_flag)
{
pr_err("k_lowercase was called - impossible\n");
}
static void k_unicode(struct vc_data *vc, unsigned int value, char up_flag)
{
if (up_flag)
return; /* no action, if this is a key release */
if (diacr)
value = handle_diacr(vc, value);
if (dead_key_next) {
dead_key_next = false;
diacr = value;
return;
}
if (kbd->kbdmode == VC_UNICODE)
to_utf8(vc, value);
else {
int c = conv_uni_to_8bit(value);
if (c != -1)
put_queue(vc, c);
}
}
/*
* Handle dead key. Note that we now may have several
* dead keys modifying the same character. Very useful
* for Vietnamese.
*/
static void k_deadunicode(struct vc_data *vc, unsigned int value, char up_flag)
{
if (up_flag)
return;
diacr = (diacr ? handle_diacr(vc, value) : value);
}
static void k_self(struct vc_data *vc, unsigned char value, char up_flag)
{
k_unicode(vc, conv_8bit_to_uni(value), up_flag);
}
static void k_dead2(struct vc_data *vc, unsigned char value, char up_flag)
{
k_deadunicode(vc, value, up_flag);
}
/*
* Obsolete - for backwards compatibility only
*/
static void k_dead(struct vc_data *vc, unsigned char value, char up_flag)
{
static const unsigned char ret_diacr[NR_DEAD] = {'`', '\'', '^', '~', '"', ',' };
k_deadunicode(vc, ret_diacr[value], up_flag);
}
static void k_cons(struct vc_data *vc, unsigned char value, char up_flag)
{
if (up_flag)
return;
set_console(value);
}
static void k_fn(struct vc_data *vc, unsigned char value, char up_flag)
{
if (up_flag)
return;
if ((unsigned)value < ARRAY_SIZE(func_table)) {
if (func_table[value])
puts_queue(vc, func_table[value]);
} else
pr_err("k_fn called with value=%d\n", value);
}
static void k_cur(struct vc_data *vc, unsigned char value, char up_flag)
{
static const char cur_chars[] = "BDCA";
if (up_flag)
return;
applkey(vc, cur_chars[value], vc_kbd_mode(kbd, VC_CKMODE));
}
static void k_pad(struct vc_data *vc, unsigned char value, char up_flag)
{
static const char pad_chars[] = "0123456789+-*/\015,.?()#";
static const char app_map[] = "pqrstuvwxylSRQMnnmPQS";
if (up_flag)
return; /* no action, if this is a key release */
/* kludge... shift forces cursor/number keys */
if (vc_kbd_mode(kbd, VC_APPLIC) && !shift_down[KG_SHIFT]) {
applkey(vc, app_map[value], 1);
return;
}
if (!vc_kbd_led(kbd, VC_NUMLOCK)) {
switch (value) {
case KVAL(K_PCOMMA):
case KVAL(K_PDOT):
k_fn(vc, KVAL(K_REMOVE), 0);
return;
case KVAL(K_P0):
k_fn(vc, KVAL(K_INSERT), 0);
return;
case KVAL(K_P1):
k_fn(vc, KVAL(K_SELECT), 0);
return;
case KVAL(K_P2):
k_cur(vc, KVAL(K_DOWN), 0);
return;
case KVAL(K_P3):
k_fn(vc, KVAL(K_PGDN), 0);
return;
case KVAL(K_P4):
k_cur(vc, KVAL(K_LEFT), 0);
return;
case KVAL(K_P6):
k_cur(vc, KVAL(K_RIGHT), 0);
return;
case KVAL(K_P7):
k_fn(vc, KVAL(K_FIND), 0);
return;
case KVAL(K_P8):
k_cur(vc, KVAL(K_UP), 0);
return;
case KVAL(K_P9):
k_fn(vc, KVAL(K_PGUP), 0);
return;
case KVAL(K_P5):
applkey(vc, 'G', vc_kbd_mode(kbd, VC_APPLIC));
return;
}
}
put_queue(vc, pad_chars[value]);
if (value == KVAL(K_PENTER) && vc_kbd_mode(kbd, VC_CRLF))
put_queue(vc, 10);
}
static void k_shift(struct vc_data *vc, unsigned char value, char up_flag)
{
int old_state = shift_state;
if (rep)
return;
/*
* Mimic typewriter:
* a CapsShift key acts like Shift but undoes CapsLock
*/
if (value == KVAL(K_CAPSSHIFT)) {
value = KVAL(K_SHIFT);
if (!up_flag)
clr_vc_kbd_led(kbd, VC_CAPSLOCK);
}
if (up_flag) {
/*
* handle the case that two shift or control
* keys are depressed simultaneously
*/
if (shift_down[value])
shift_down[value]--;
} else
shift_down[value]++;
if (shift_down[value])
shift_state |= (1 << value);
else
shift_state &= ~(1 << value);
/* kludge */
if (up_flag && shift_state != old_state && npadch != -1) {
if (kbd->kbdmode == VC_UNICODE)
to_utf8(vc, npadch);
else
put_queue(vc, npadch & 0xff);
npadch = -1;
}
}
static void k_meta(struct vc_data *vc, unsigned char value, char up_flag)
{
if (up_flag)
return;
if (vc_kbd_mode(kbd, VC_META)) {
put_queue(vc, '\033');
put_queue(vc, value);
} else
put_queue(vc, value | 0x80);
}
static void k_ascii(struct vc_data *vc, unsigned char value, char up_flag)
{
int base;
if (up_flag)
return;
if (value < 10) {
/* decimal input of code, while Alt depressed */
base = 10;
} else {
/* hexadecimal input of code, while AltGr depressed */
value -= 10;
base = 16;
}
if (npadch == -1)
npadch = value;
else
npadch = npadch * base + value;
}
static void k_lock(struct vc_data *vc, unsigned char value, char up_flag)
{
if (up_flag || rep)
return;
chg_vc_kbd_lock(kbd, value);
}
static void k_slock(struct vc_data *vc, unsigned char value, char up_flag)
{
k_shift(vc, value, up_flag);
if (up_flag || rep)
return;
chg_vc_kbd_slock(kbd, value);
/* try to make Alt, oops, AltGr and such work */
if (!key_maps[kbd->lockstate ^ kbd->slockstate]) {
kbd->slockstate = 0;
chg_vc_kbd_slock(kbd, value);
}
}
/* by default, 300ms interval for combination release */
static unsigned brl_timeout = 300;
MODULE_PARM_DESC(brl_timeout, "Braille keys release delay in ms (0 for commit on first key release)");
module_param(brl_timeout, uint, 0644);
static unsigned brl_nbchords = 1;
MODULE_PARM_DESC(brl_nbchords, "Number of chords that produce a braille pattern (0 for dead chords)");
module_param(brl_nbchords, uint, 0644);
static void k_brlcommit(struct vc_data *vc, unsigned int pattern, char up_flag)
{
static unsigned long chords;
static unsigned committed;
if (!brl_nbchords)
k_deadunicode(vc, BRL_UC_ROW | pattern, up_flag);
else {
committed |= pattern;
chords++;
if (chords == brl_nbchords) {
k_unicode(vc, BRL_UC_ROW | committed, up_flag);
chords = 0;
committed = 0;
}
}
}
static void k_brl(struct vc_data *vc, unsigned char value, char up_flag)
{
static unsigned pressed, committing;
static unsigned long releasestart;
if (kbd->kbdmode != VC_UNICODE) {
if (!up_flag)
pr_warning("keyboard mode must be unicode for braille patterns\n");
return;
}
if (!value) {
k_unicode(vc, BRL_UC_ROW, up_flag);
return;
}
if (value > 8)
return;
if (!up_flag) {
pressed |= 1 << (value - 1);
if (!brl_timeout)
committing = pressed;
} else if (brl_timeout) {
if (!committing ||
time_after(jiffies,
releasestart + msecs_to_jiffies(brl_timeout))) {
committing = pressed;
releasestart = jiffies;
}
pressed &= ~(1 << (value - 1));
if (!pressed && committing) {
k_brlcommit(vc, committing, 0);
committing = 0;
}
} else {
if (committing) {
k_brlcommit(vc, committing, 0);
committing = 0;
}
pressed &= ~(1 << (value - 1));
}
}
/*
* The leds display either (i) the status of NumLock, CapsLock, ScrollLock,
* or (ii) whatever pattern of lights people want to show using KDSETLED,
* or (iii) specified bits of specified words in kernel memory.
*/
unsigned char getledstate(void)
{
return ledstate;
}
void setledstate(struct kbd_struct *kbd, unsigned int led)
{
unsigned long flags;
spin_lock_irqsave(&kbd_event_lock, flags);
if (!(led & ~7)) {
ledioctl = led;
kbd->ledmode = LED_SHOW_IOCTL;
} else
kbd->ledmode = LED_SHOW_FLAGS;
set_leds();
spin_unlock_irqrestore(&kbd_event_lock, flags);
}
static inline unsigned char getleds(void)
{
struct kbd_struct *kbd = kbd_table + fg_console;
unsigned char leds;
int i;
if (kbd->ledmode == LED_SHOW_IOCTL)
return ledioctl;
leds = kbd->ledflagstate;
if (kbd->ledmode == LED_SHOW_MEM) {
for (i = 0; i < 3; i++)
if (ledptrs[i].valid) {
if (*ledptrs[i].addr & ledptrs[i].mask)
leds |= (1 << i);
else
leds &= ~(1 << i);
}
}
return leds;
}
static int kbd_update_leds_helper(struct input_handle *handle, void *data)
{
unsigned char leds = *(unsigned char *)data;
if (test_bit(EV_LED, handle->dev->evbit)) {
input_inject_event(handle, EV_LED, LED_SCROLLL, !!(leds & 0x01));
input_inject_event(handle, EV_LED, LED_NUML, !!(leds & 0x02));
input_inject_event(handle, EV_LED, LED_CAPSL, !!(leds & 0x04));
input_inject_event(handle, EV_SYN, SYN_REPORT, 0);
}
return 0;
}
/**
* vt_get_leds - helper for braille console
* @console: console to read
* @flag: flag we want to check
*
* Check the status of a keyboard led flag and report it back
*/
int vt_get_leds(int console, int flag)
{
unsigned long flags;
struct kbd_struct * kbd = kbd_table + console;
int ret;
spin_lock_irqsave(&kbd_event_lock, flags);
ret = vc_kbd_led(kbd, flag);
spin_unlock_irqrestore(&kbd_event_lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(vt_get_leds);
/**
* vt_set_led_state - set LED state of a console
* @console: console to set
* @leds: LED bits
*
* Set the LEDs on a console. This is a wrapper for the VT layer
* so that we can keep kbd knowledge internal
*/
void vt_set_led_state(int console, int leds)
{
struct kbd_struct * kbd = kbd_table + console;
setledstate(kbd, leds);
}
/**
* vt_kbd_con_start - Keyboard side of console start
* @console: console
*
* Handle console start. This is a wrapper for the VT layer
* so that we can keep kbd knowledge internal
*
* FIXME: We eventually need to hold the kbd lock here to protect
* the LED updating. We can't do it yet because fn_hold calls stop_tty
* and start_tty under the kbd_event_lock, while normal tty paths
* don't hold the lock. We probably need to split out an LED lock
* but not during an -rc release!
*/
void vt_kbd_con_start(int console)
{
struct kbd_struct * kbd = kbd_table + console;
/* unsigned long flags; */
/* spin_lock_irqsave(&kbd_event_lock, flags); */
clr_vc_kbd_led(kbd, VC_SCROLLOCK);
set_leds();
/* spin_unlock_irqrestore(&kbd_event_lock, flags); */
}
/**
* vt_kbd_con_stop - Keyboard side of console stop
* @console: console
*
* Handle console stop. This is a wrapper for the VT layer
* so that we can keep kbd knowledge internal
*
* FIXME: We eventually need to hold the kbd lock here to protect
* the LED updating. We can't do it yet because fn_hold calls stop_tty
* and start_tty under the kbd_event_lock, while normal tty paths
* don't hold the lock. We probably need to split out an LED lock
* but not during an -rc release!
*/
void vt_kbd_con_stop(int console)
{
struct kbd_struct * kbd = kbd_table + console;
/* unsigned long flags; */
/* spin_lock_irqsave(&kbd_event_lock, flags); */
set_vc_kbd_led(kbd, VC_SCROLLOCK);
set_leds();
/* spin_unlock_irqrestore(&kbd_event_lock, flags); */
}
/*
* This is the tasklet that updates LED state on all keyboards
* attached to the box. The reason we use tasklet is that we
* need to handle the scenario when keyboard handler is not
* registered yet but we already getting updates from the VT to
* update led state.
*/
static void kbd_bh(unsigned long dummy)
{
unsigned char leds = getleds();
if (leds != ledstate) {
input_handler_for_each_handle(&kbd_handler, &leds,
kbd_update_leds_helper);
ledstate = leds;
}
}
DECLARE_TASKLET_DISABLED(keyboard_tasklet, kbd_bh, 0);
#if defined(CONFIG_X86) || defined(CONFIG_IA64) || defined(CONFIG_ALPHA) ||\
defined(CONFIG_MIPS) || defined(CONFIG_PPC) || defined(CONFIG_SPARC) ||\
defined(CONFIG_PARISC) || defined(CONFIG_SUPERH) ||\
(defined(CONFIG_ARM) && defined(CONFIG_KEYBOARD_ATKBD) && !defined(CONFIG_ARCH_RPC)) ||\
defined(CONFIG_AVR32)
#define HW_RAW(dev) (test_bit(EV_MSC, dev->evbit) && test_bit(MSC_RAW, dev->mscbit) &&\
((dev)->id.bustype == BUS_I8042) && ((dev)->id.vendor == 0x0001) && ((dev)->id.product == 0x0001))
static const unsigned short x86_keycodes[256] =
{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84,118, 86, 87, 88,115,120,119,121,112,123, 92,
284,285,309, 0,312, 91,327,328,329,331,333,335,336,337,338,339,
367,288,302,304,350, 89,334,326,267,126,268,269,125,347,348,349,
360,261,262,263,268,376,100,101,321,316,373,286,289,102,351,355,
103,104,105,275,287,279,258,106,274,107,294,364,358,363,362,361,
291,108,381,281,290,272,292,305,280, 99,112,257,306,359,113,114,
264,117,271,374,379,265,266, 93, 94, 95, 85,259,375,260, 90,116,
377,109,111,277,278,282,283,295,296,297,299,300,301,293,303,307,
308,310,313,314,315,317,318,319,320,357,322,323,324,325,276,330,
332,340,365,342,343,344,345,346,356,270,341,368,369,370,371,372 };
#ifdef CONFIG_SPARC
static int sparc_l1_a_state;
extern void sun_do_break(void);
#endif
static int emulate_raw(struct vc_data *vc, unsigned int keycode,
unsigned char up_flag)
{
int code;
switch (keycode) {
case KEY_PAUSE:
put_queue(vc, 0xe1);
put_queue(vc, 0x1d | up_flag);
put_queue(vc, 0x45 | up_flag);
break;
case KEY_HANGEUL:
if (!up_flag)
put_queue(vc, 0xf2);
break;
case KEY_HANJA:
if (!up_flag)
put_queue(vc, 0xf1);
break;
case KEY_SYSRQ:
/*
* Real AT keyboards (that's what we're trying
* to emulate here emit 0xe0 0x2a 0xe0 0x37 when
* pressing PrtSc/SysRq alone, but simply 0x54
* when pressing Alt+PrtSc/SysRq.
*/
if (test_bit(KEY_LEFTALT, key_down) ||
test_bit(KEY_RIGHTALT, key_down)) {
put_queue(vc, 0x54 | up_flag);
} else {
put_queue(vc, 0xe0);
put_queue(vc, 0x2a | up_flag);
put_queue(vc, 0xe0);
put_queue(vc, 0x37 | up_flag);
}
break;
default:
if (keycode > 255)
return -1;
code = x86_keycodes[keycode];
if (!code)
return -1;
if (code & 0x100)
put_queue(vc, 0xe0);
put_queue(vc, (code & 0x7f) | up_flag);
break;
}
return 0;
}
#else
#define HW_RAW(dev) 0
static int emulate_raw(struct vc_data *vc, unsigned int keycode, unsigned char up_flag)
{
if (keycode > 127)
return -1;
put_queue(vc, keycode | up_flag);
return 0;
}
#endif
static void kbd_rawcode(unsigned char data)
{
struct vc_data *vc = vc_cons[fg_console].d;
kbd = kbd_table + vc->vc_num;
if (kbd->kbdmode == VC_RAW)
put_queue(vc, data);
}
static void kbd_keycode(unsigned int keycode, int down, int hw_raw)
{
struct vc_data *vc = vc_cons[fg_console].d;
unsigned short keysym, *key_map;
unsigned char type;
bool raw_mode;
struct tty_struct *tty;
int shift_final;
struct keyboard_notifier_param param = { .vc = vc, .value = keycode, .down = down };
int rc;
tty = vc->port.tty;
if (tty && (!tty->driver_data)) {
/* No driver data? Strange. Okay we fix it then. */
tty->driver_data = vc;
}
kbd = kbd_table + vc->vc_num;
#ifdef CONFIG_SPARC
if (keycode == KEY_STOP)
sparc_l1_a_state = down;
#endif
rep = (down == 2);
raw_mode = (kbd->kbdmode == VC_RAW);
if (raw_mode && !hw_raw)
if (emulate_raw(vc, keycode, !down << 7))
if (keycode < BTN_MISC && printk_ratelimit())
pr_warning("can't emulate rawmode for keycode %d\n",
keycode);
#ifdef CONFIG_SPARC
if (keycode == KEY_A && sparc_l1_a_state) {
sparc_l1_a_state = false;
sun_do_break();
}
#endif
if (kbd->kbdmode == VC_MEDIUMRAW) {
/*
* This is extended medium raw mode, with keys above 127
* encoded as 0, high 7 bits, low 7 bits, with the 0 bearing
* the 'up' flag if needed. 0 is reserved, so this shouldn't
* interfere with anything else. The two bytes after 0 will
* always have the up flag set not to interfere with older
* applications. This allows for 16384 different keycodes,
* which should be enough.
*/
if (keycode < 128) {
put_queue(vc, keycode | (!down << 7));
} else {
put_queue(vc, !down << 7);
put_queue(vc, (keycode >> 7) | 0x80);
put_queue(vc, keycode | 0x80);
}
raw_mode = true;
}
if (down)
set_bit(keycode, key_down);
else
clear_bit(keycode, key_down);
if (rep &&
(!vc_kbd_mode(kbd, VC_REPEAT) ||
(tty && !L_ECHO(tty) && tty_chars_in_buffer(tty)))) {
/*
* Don't repeat a key if the input buffers are not empty and the
* characters get aren't echoed locally. This makes key repeat
* usable with slow applications and under heavy loads.
*/
return;
}
param.shift = shift_final = (shift_state | kbd->slockstate) ^ kbd->lockstate;
param.ledstate = kbd->ledflagstate;
key_map = key_maps[shift_final];
rc = atomic_notifier_call_chain(&keyboard_notifier_list,
KBD_KEYCODE, ¶m);
if (rc == NOTIFY_STOP || !key_map) {
atomic_notifier_call_chain(&keyboard_notifier_list,
KBD_UNBOUND_KEYCODE, ¶m);
do_compute_shiftstate();
kbd->slockstate = 0;
return;
}
if (keycode < NR_KEYS)
keysym = key_map[keycode];
else if (keycode >= KEY_BRL_DOT1 && keycode <= KEY_BRL_DOT8)
keysym = U(K(KT_BRL, keycode - KEY_BRL_DOT1 + 1));
else
return;
type = KTYP(keysym);
if (type < 0xf0) {
param.value = keysym;
rc = atomic_notifier_call_chain(&keyboard_notifier_list,
KBD_UNICODE, ¶m);
if (rc != NOTIFY_STOP)
if (down && !raw_mode)
to_utf8(vc, keysym);
return;
}
type -= 0xf0;
if (type == KT_LETTER) {
type = KT_LATIN;
if (vc_kbd_led(kbd, VC_CAPSLOCK)) {
key_map = key_maps[shift_final ^ (1 << KG_SHIFT)];
if (key_map)
keysym = key_map[keycode];
}
}
param.value = keysym;
rc = atomic_notifier_call_chain(&keyboard_notifier_list,
KBD_KEYSYM, ¶m);
if (rc == NOTIFY_STOP)
return;
if ((raw_mode || kbd->kbdmode == VC_OFF) && type != KT_SPEC && type != KT_SHIFT)
return;
(*k_handler[type])(vc, keysym & 0xff, !down);
param.ledstate = kbd->ledflagstate;
atomic_notifier_call_chain(&keyboard_notifier_list, KBD_POST_KEYSYM, ¶m);
if (type != KT_SLOCK)
kbd->slockstate = 0;
}
static void kbd_event(struct input_handle *handle, unsigned int event_type,
unsigned int event_code, int value)
{
/* We are called with interrupts disabled, just take the lock */
spin_lock(&kbd_event_lock);
if (event_type == EV_MSC && event_code == MSC_RAW && HW_RAW(handle->dev))
kbd_rawcode(value);
if (event_type == EV_KEY)
kbd_keycode(event_code, value, HW_RAW(handle->dev));
spin_unlock(&kbd_event_lock);
tasklet_schedule(&keyboard_tasklet);
do_poke_blanked_console = 1;
schedule_console_callback();
}
static bool kbd_match(struct input_handler *handler, struct input_dev *dev)
{
int i;
if (test_bit(EV_SND, dev->evbit))
return true;
if (test_bit(EV_KEY, dev->evbit)) {
for (i = KEY_RESERVED; i < BTN_MISC; i++)
if (test_bit(i, dev->keybit))
return true;
for (i = KEY_BRL_DOT1; i <= KEY_BRL_DOT10; i++)
if (test_bit(i, dev->keybit))
return true;
}
return false;
}
/*
* When a keyboard (or other input device) is found, the kbd_connect
* function is called. The function then looks at the device, and if it
* likes it, it can open it and get events from it. In this (kbd_connect)
* function, we should decide which VT to bind that keyboard to initially.
*/
static int kbd_connect(struct input_handler *handler, struct input_dev *dev,
const struct input_device_id *id)
{
struct input_handle *handle;
int error;
handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
if (!handle)
return -ENOMEM;
handle->dev = dev;
handle->handler = handler;
handle->name = "kbd";
error = input_register_handle(handle);
if (error)
goto err_free_handle;
error = input_open_device(handle);
if (error)
goto err_unregister_handle;
return 0;
err_unregister_handle:
input_unregister_handle(handle);
err_free_handle:
kfree(handle);
return error;
}
static void kbd_disconnect(struct input_handle *handle)
{
input_close_device(handle);
input_unregister_handle(handle);
kfree(handle);
}
/*
* Start keyboard handler on the new keyboard by refreshing LED state to
* match the rest of the system.
*/
static void kbd_start(struct input_handle *handle)
{
tasklet_disable(&keyboard_tasklet);
if (ledstate != 0xff)
kbd_update_leds_helper(handle, &ledstate);
tasklet_enable(&keyboard_tasklet);
}
static const struct input_device_id kbd_ids[] = {
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT,
.evbit = { BIT_MASK(EV_KEY) },
},
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT,
.evbit = { BIT_MASK(EV_SND) },
},
{ }, /* Terminating entry */
};
MODULE_DEVICE_TABLE(input, kbd_ids);
static struct input_handler kbd_handler = {
.event = kbd_event,
.match = kbd_match,
.connect = kbd_connect,
.disconnect = kbd_disconnect,
.start = kbd_start,
.name = "kbd",
.id_table = kbd_ids,
};
int __init kbd_init(void)
{
int i;
int error;
for (i = 0; i < MAX_NR_CONSOLES; i++) {
kbd_table[i].ledflagstate = KBD_DEFLEDS;
kbd_table[i].default_ledflagstate = KBD_DEFLEDS;
kbd_table[i].ledmode = LED_SHOW_FLAGS;
kbd_table[i].lockstate = KBD_DEFLOCK;
kbd_table[i].slockstate = 0;
kbd_table[i].modeflags = KBD_DEFMODE;
kbd_table[i].kbdmode = default_utf8 ? VC_UNICODE : VC_XLATE;
}
error = input_register_handler(&kbd_handler);
if (error)
return error;
tasklet_enable(&keyboard_tasklet);
tasklet_schedule(&keyboard_tasklet);
return 0;
}
/* Ioctl support code */
/**
* vt_do_diacrit - diacritical table updates
* @cmd: ioctl request
* @up: pointer to user data for ioctl
* @perm: permissions check computed by caller
*
* Update the diacritical tables atomically and safely. Lock them
* against simultaneous keypresses
*/
int vt_do_diacrit(unsigned int cmd, void __user *up, int perm)
{
struct kbdiacrs __user *a = up;
unsigned long flags;
int asize;
int ret = 0;
switch (cmd) {
case KDGKBDIACR:
{
struct kbdiacr *diacr;
int i;
diacr = kmalloc(MAX_DIACR * sizeof(struct kbdiacr),
GFP_KERNEL);
if (diacr == NULL)
return -ENOMEM;
/* Lock the diacriticals table, make a copy and then
copy it after we unlock */
spin_lock_irqsave(&kbd_event_lock, flags);
asize = accent_table_size;
for (i = 0; i < asize; i++) {
diacr[i].diacr = conv_uni_to_8bit(
accent_table[i].diacr);
diacr[i].base = conv_uni_to_8bit(
accent_table[i].base);
diacr[i].result = conv_uni_to_8bit(
accent_table[i].result);
}
spin_unlock_irqrestore(&kbd_event_lock, flags);
if (put_user(asize, &a->kb_cnt))
ret = -EFAULT;
else if (copy_to_user(a->kbdiacr, diacr,
asize * sizeof(struct kbdiacr)))
ret = -EFAULT;
kfree(diacr);
return ret;
}
case KDGKBDIACRUC:
{
struct kbdiacrsuc __user *a = up;
void *buf;
buf = kmalloc(MAX_DIACR * sizeof(struct kbdiacruc),
GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
/* Lock the diacriticals table, make a copy and then
copy it after we unlock */
spin_lock_irqsave(&kbd_event_lock, flags);
asize = accent_table_size;
memcpy(buf, accent_table, asize * sizeof(struct kbdiacruc));
spin_unlock_irqrestore(&kbd_event_lock, flags);
if (put_user(asize, &a->kb_cnt))
ret = -EFAULT;
else if (copy_to_user(a->kbdiacruc, buf,
asize*sizeof(struct kbdiacruc)))
ret = -EFAULT;
kfree(buf);
return ret;
}
case KDSKBDIACR:
{
struct kbdiacrs __user *a = up;
struct kbdiacr *diacr = NULL;
unsigned int ct;
int i;
if (!perm)
return -EPERM;
if (get_user(ct, &a->kb_cnt))
return -EFAULT;
if (ct >= MAX_DIACR)
return -EINVAL;
if (ct) {
diacr = kmalloc(sizeof(struct kbdiacr) * ct,
GFP_KERNEL);
if (diacr == NULL)
return -ENOMEM;
if (copy_from_user(diacr, a->kbdiacr,
sizeof(struct kbdiacr) * ct)) {
kfree(diacr);
return -EFAULT;
}
}
spin_lock_irqsave(&kbd_event_lock, flags);
accent_table_size = ct;
for (i = 0; i < ct; i++) {
accent_table[i].diacr =
conv_8bit_to_uni(diacr[i].diacr);
accent_table[i].base =
conv_8bit_to_uni(diacr[i].base);
accent_table[i].result =
conv_8bit_to_uni(diacr[i].result);
}
spin_unlock_irqrestore(&kbd_event_lock, flags);
kfree(diacr);
return 0;
}
case KDSKBDIACRUC:
{
struct kbdiacrsuc __user *a = up;
unsigned int ct;
void *buf = NULL;
if (!perm)
return -EPERM;
if (get_user(ct, &a->kb_cnt))
return -EFAULT;
if (ct >= MAX_DIACR)
return -EINVAL;
if (ct) {
buf = kmalloc(ct * sizeof(struct kbdiacruc),
GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
if (copy_from_user(buf, a->kbdiacruc,
ct * sizeof(struct kbdiacruc))) {
kfree(buf);
return -EFAULT;
}
}
spin_lock_irqsave(&kbd_event_lock, flags);
if (ct)
memcpy(accent_table, buf,
ct * sizeof(struct kbdiacruc));
accent_table_size = ct;
spin_unlock_irqrestore(&kbd_event_lock, flags);
kfree(buf);
return 0;
}
}
return ret;
}
/**
* vt_do_kdskbmode - set keyboard mode ioctl
* @console: the console to use
* @arg: the requested mode
*
* Update the keyboard mode bits while holding the correct locks.
* Return 0 for success or an error code.
*/
int vt_do_kdskbmode(int console, unsigned int arg)
{
struct kbd_struct * kbd = kbd_table + console;
int ret = 0;
unsigned long flags;
spin_lock_irqsave(&kbd_event_lock, flags);
switch(arg) {
case K_RAW:
kbd->kbdmode = VC_RAW;
break;
case K_MEDIUMRAW:
kbd->kbdmode = VC_MEDIUMRAW;
break;
case K_XLATE:
kbd->kbdmode = VC_XLATE;
do_compute_shiftstate();
break;
case K_UNICODE:
kbd->kbdmode = VC_UNICODE;
do_compute_shiftstate();
break;
case K_OFF:
kbd->kbdmode = VC_OFF;
break;
default:
ret = -EINVAL;
}
spin_unlock_irqrestore(&kbd_event_lock, flags);
return ret;
}
/**
* vt_do_kdskbmeta - set keyboard meta state
* @console: the console to use
* @arg: the requested meta state
*
* Update the keyboard meta bits while holding the correct locks.
* Return 0 for success or an error code.
*/
int vt_do_kdskbmeta(int console, unsigned int arg)
{
struct kbd_struct * kbd = kbd_table + console;
int ret = 0;
unsigned long flags;
spin_lock_irqsave(&kbd_event_lock, flags);
switch(arg) {
case K_METABIT:
clr_vc_kbd_mode(kbd, VC_META);
break;
case K_ESCPREFIX:
set_vc_kbd_mode(kbd, VC_META);
break;
default:
ret = -EINVAL;
}
spin_unlock_irqrestore(&kbd_event_lock, flags);
return ret;
}
int vt_do_kbkeycode_ioctl(int cmd, struct kbkeycode __user *user_kbkc,
int perm)
{
struct kbkeycode tmp;
int kc = 0;
if (copy_from_user(&tmp, user_kbkc, sizeof(struct kbkeycode)))
return -EFAULT;
switch (cmd) {
case KDGETKEYCODE:
kc = getkeycode(tmp.scancode);
if (kc >= 0)
kc = put_user(kc, &user_kbkc->keycode);
break;
case KDSETKEYCODE:
if (!perm)
return -EPERM;
kc = setkeycode(tmp.scancode, tmp.keycode);
break;
}
return kc;
}
#define i (tmp.kb_index)
#define s (tmp.kb_table)
#define v (tmp.kb_value)
int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
int console)
{
struct kbd_struct * kbd = kbd_table + console;
struct kbentry tmp;
ushort *key_map, *new_map, val, ov;
unsigned long flags;
if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
return -EFAULT;
if (!capable(CAP_SYS_TTY_CONFIG))
perm = 0;
switch (cmd) {
case KDGKBENT:
/* Ensure another thread doesn't free it under us */
spin_lock_irqsave(&kbd_event_lock, flags);
key_map = key_maps[s];
if (key_map) {
val = U(key_map[i]);
if (kbd->kbdmode != VC_UNICODE && KTYP(val) >= NR_TYPES)
val = K_HOLE;
} else
val = (i ? K_HOLE : K_NOSUCHMAP);
spin_unlock_irqrestore(&kbd_event_lock, flags);
return put_user(val, &user_kbe->kb_value);
case KDSKBENT:
if (!perm)
return -EPERM;
if (!i && v == K_NOSUCHMAP) {
spin_lock_irqsave(&kbd_event_lock, flags);
/* deallocate map */
key_map = key_maps[s];
if (s && key_map) {
key_maps[s] = NULL;
if (key_map[0] == U(K_ALLOCATED)) {
kfree(key_map);
keymap_count--;
}
}
spin_unlock_irqrestore(&kbd_event_lock, flags);
break;
}
if (KTYP(v) < NR_TYPES) {
if (KVAL(v) > max_vals[KTYP(v)])
return -EINVAL;
} else
if (kbd->kbdmode != VC_UNICODE)
return -EINVAL;
/* ++Geert: non-PC keyboards may generate keycode zero */
#if !defined(__mc68000__) && !defined(__powerpc__)
/* assignment to entry 0 only tests validity of args */
if (!i)
break;
#endif
new_map = kmalloc(sizeof(plain_map), GFP_KERNEL);
if (!new_map)
return -ENOMEM;
spin_lock_irqsave(&kbd_event_lock, flags);
key_map = key_maps[s];
if (key_map == NULL) {
int j;
if (keymap_count >= MAX_NR_OF_USER_KEYMAPS &&
!capable(CAP_SYS_RESOURCE)) {
spin_unlock_irqrestore(&kbd_event_lock, flags);
kfree(new_map);
return -EPERM;
}
key_maps[s] = new_map;
key_map = new_map;
key_map[0] = U(K_ALLOCATED);
for (j = 1; j < NR_KEYS; j++)
key_map[j] = U(K_HOLE);
keymap_count++;
} else
kfree(new_map);
ov = U(key_map[i]);
if (v == ov)
goto out;
/*
* Attention Key.
*/
if (((ov == K_SAK) || (v == K_SAK)) && !capable(CAP_SYS_ADMIN)) {
spin_unlock_irqrestore(&kbd_event_lock, flags);
return -EPERM;
}
key_map[i] = U(v);
if (!s && (KTYP(ov) == KT_SHIFT || KTYP(v) == KT_SHIFT))
do_compute_shiftstate();
out:
spin_unlock_irqrestore(&kbd_event_lock, flags);
break;
}
return 0;
}
#undef i
#undef s
#undef v
/* FIXME: This one needs untangling and locking */
int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
{
struct kbsentry *kbs;
char *p;
u_char *q;
u_char __user *up;
int sz;
int delta;
char *first_free, *fj, *fnw;
int i, j, k;
int ret;
if (!capable(CAP_SYS_TTY_CONFIG))
perm = 0;
kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
if (!kbs) {
ret = -ENOMEM;
goto reterr;
}
/* we mostly copy too much here (512bytes), but who cares ;) */
if (copy_from_user(kbs, user_kdgkb, sizeof(struct kbsentry))) {
ret = -EFAULT;
goto reterr;
}
kbs->kb_string[sizeof(kbs->kb_string)-1] = '\0';
i = kbs->kb_func;
switch (cmd) {
case KDGKBSENT:
sz = sizeof(kbs->kb_string) - 1; /* sz should have been
a struct member */
up = user_kdgkb->kb_string;
p = func_table[i];
if(p)
for ( ; *p && sz; p++, sz--)
if (put_user(*p, up++)) {
ret = -EFAULT;
goto reterr;
}
if (put_user('\0', up)) {
ret = -EFAULT;
goto reterr;
}
kfree(kbs);
return ((p && *p) ? -EOVERFLOW : 0);
case KDSKBSENT:
if (!perm) {
ret = -EPERM;
goto reterr;
}
q = func_table[i];
first_free = funcbufptr + (funcbufsize - funcbufleft);
for (j = i+1; j < MAX_NR_FUNC && !func_table[j]; j++)
;
if (j < MAX_NR_FUNC)
fj = func_table[j];
else
fj = first_free;
delta = (q ? -strlen(q) : 1) + strlen(kbs->kb_string);
if (delta <= funcbufleft) { /* it fits in current buf */
if (j < MAX_NR_FUNC) {
memmove(fj + delta, fj, first_free - fj);
for (k = j; k < MAX_NR_FUNC; k++)
if (func_table[k])
func_table[k] += delta;
}
if (!q)
func_table[i] = fj;
funcbufleft -= delta;
} else { /* allocate a larger buffer */
sz = 256;
while (sz < funcbufsize - funcbufleft + delta)
sz <<= 1;
fnw = kmalloc(sz, GFP_KERNEL);
if(!fnw) {
ret = -ENOMEM;
goto reterr;
}
if (!q)
func_table[i] = fj;
if (fj > funcbufptr)
memmove(fnw, funcbufptr, fj - funcbufptr);
for (k = 0; k < j; k++)
if (func_table[k])
func_table[k] = fnw + (func_table[k] - funcbufptr);
if (first_free > fj) {
memmove(fnw + (fj - funcbufptr) + delta, fj, first_free - fj);
for (k = j; k < MAX_NR_FUNC; k++)
if (func_table[k])
func_table[k] = fnw + (func_table[k] - funcbufptr) + delta;
}
if (funcbufptr != func_buf)
kfree(funcbufptr);
funcbufptr = fnw;
funcbufleft = funcbufleft - delta + sz - funcbufsize;
funcbufsize = sz;
}
strcpy(func_table[i], kbs->kb_string);
break;
}
ret = 0;
reterr:
kfree(kbs);
return ret;
}
int vt_do_kdskled(int console, int cmd, unsigned long arg, int perm)
{
struct kbd_struct * kbd = kbd_table + console;
unsigned long flags;
unsigned char ucval;
switch(cmd) {
/* the ioctls below read/set the flags usually shown in the leds */
/* don't use them - they will go away without warning */
case KDGKBLED:
spin_lock_irqsave(&kbd_event_lock, flags);
ucval = kbd->ledflagstate | (kbd->default_ledflagstate << 4);
spin_unlock_irqrestore(&kbd_event_lock, flags);
return put_user(ucval, (char __user *)arg);
case KDSKBLED:
if (!perm)
return -EPERM;
if (arg & ~0x77)
return -EINVAL;
spin_lock_irqsave(&kbd_event_lock, flags);
kbd->ledflagstate = (arg & 7);
kbd->default_ledflagstate = ((arg >> 4) & 7);
set_leds();
spin_unlock_irqrestore(&kbd_event_lock, flags);
return 0;
/* the ioctls below only set the lights, not the functions */
/* for those, see KDGKBLED and KDSKBLED above */
case KDGETLED:
ucval = getledstate();
return put_user(ucval, (char __user *)arg);
case KDSETLED:
if (!perm)
return -EPERM;
setledstate(kbd, arg);
return 0;
}
return -ENOIOCTLCMD;
}
int vt_do_kdgkbmode(int console)
{
struct kbd_struct * kbd = kbd_table + console;
/* This is a spot read so needs no locking */
switch (kbd->kbdmode) {
case VC_RAW:
return K_RAW;
case VC_MEDIUMRAW:
return K_MEDIUMRAW;
case VC_UNICODE:
return K_UNICODE;
case VC_OFF:
return K_OFF;
default:
return K_XLATE;
}
}
/**
* vt_do_kdgkbmeta - report meta status
* @console: console to report
*
* Report the meta flag status of this console
*/
int vt_do_kdgkbmeta(int console)
{
struct kbd_struct * kbd = kbd_table + console;
/* Again a spot read so no locking */
return vc_kbd_mode(kbd, VC_META) ? K_ESCPREFIX : K_METABIT;
}
/**
* vt_reset_unicode - reset the unicode status
* @console: console being reset
*
* Restore the unicode console state to its default
*/
void vt_reset_unicode(int console)
{
unsigned long flags;
spin_lock_irqsave(&kbd_event_lock, flags);
kbd_table[console].kbdmode = default_utf8 ? VC_UNICODE : VC_XLATE;
spin_unlock_irqrestore(&kbd_event_lock, flags);
}
/**
* vt_get_shiftstate - shift bit state
*
* Report the shift bits from the keyboard state. We have to export
* this to support some oddities in the vt layer.
*/
int vt_get_shift_state(void)
{
/* Don't lock as this is a transient report */
return shift_state;
}
/**
* vt_reset_keyboard - reset keyboard state
* @console: console to reset
*
* Reset the keyboard bits for a console as part of a general console
* reset event
*/
void vt_reset_keyboard(int console)
{
struct kbd_struct * kbd = kbd_table + console;
unsigned long flags;
spin_lock_irqsave(&kbd_event_lock, flags);
set_vc_kbd_mode(kbd, VC_REPEAT);
clr_vc_kbd_mode(kbd, VC_CKMODE);
clr_vc_kbd_mode(kbd, VC_APPLIC);
clr_vc_kbd_mode(kbd, VC_CRLF);
kbd->lockstate = 0;
kbd->slockstate = 0;
kbd->ledmode = LED_SHOW_FLAGS;
kbd->ledflagstate = kbd->default_ledflagstate;
/* do not do set_leds here because this causes an endless tasklet loop
when the keyboard hasn't been initialized yet */
spin_unlock_irqrestore(&kbd_event_lock, flags);
}
/**
* vt_get_kbd_mode_bit - read keyboard status bits
* @console: console to read from
* @bit: mode bit to read
*
* Report back a vt mode bit. We do this without locking so the
* caller must be sure that there are no synchronization needs
*/
int vt_get_kbd_mode_bit(int console, int bit)
{
struct kbd_struct * kbd = kbd_table + console;
return vc_kbd_mode(kbd, bit);
}
/**
* vt_set_kbd_mode_bit - read keyboard status bits
* @console: console to read from
* @bit: mode bit to read
*
* Set a vt mode bit. We do this without locking so the
* caller must be sure that there are no synchronization needs
*/
void vt_set_kbd_mode_bit(int console, int bit)
{
struct kbd_struct * kbd = kbd_table + console;
unsigned long flags;
spin_lock_irqsave(&kbd_event_lock, flags);
set_vc_kbd_mode(kbd, bit);
spin_unlock_irqrestore(&kbd_event_lock, flags);
}
/**
* vt_clr_kbd_mode_bit - read keyboard status bits
* @console: console to read from
* @bit: mode bit to read
*
* Report back a vt mode bit. We do this without locking so the
* caller must be sure that there are no synchronization needs
*/
void vt_clr_kbd_mode_bit(int console, int bit)
{
struct kbd_struct * kbd = kbd_table + console;
unsigned long flags;
spin_lock_irqsave(&kbd_event_lock, flags);
clr_vc_kbd_mode(kbd, bit);
spin_unlock_irqrestore(&kbd_event_lock, flags);
}
| gpl-2.0 |
SlimRoms/kernel_lge_hammerhead | fs/cifs/inode.c | 3292 | 61057 | /*
* fs/cifs/inode.c
*
* Copyright (C) International Business Machines Corp., 2002,2010
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation; either version 2.1 of the License, or
* (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/fs.h>
#include <linux/stat.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <asm/div64.h>
#include "cifsfs.h"
#include "cifspdu.h"
#include "cifsglob.h"
#include "cifsproto.h"
#include "cifs_debug.h"
#include "cifs_fs_sb.h"
#include "fscache.h"
static void cifs_set_ops(struct inode *inode)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
switch (inode->i_mode & S_IFMT) {
case S_IFREG:
inode->i_op = &cifs_file_inode_ops;
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
inode->i_fop = &cifs_file_direct_nobrl_ops;
else
inode->i_fop = &cifs_file_direct_ops;
} else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
inode->i_fop = &cifs_file_strict_nobrl_ops;
else
inode->i_fop = &cifs_file_strict_ops;
} else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
inode->i_fop = &cifs_file_nobrl_ops;
else { /* not direct, send byte range locks */
inode->i_fop = &cifs_file_ops;
}
/* check if server can support readpages */
if (cifs_sb_master_tcon(cifs_sb)->ses->server->maxBuf <
PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE)
inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
else
inode->i_data.a_ops = &cifs_addr_ops;
break;
case S_IFDIR:
#ifdef CONFIG_CIFS_DFS_UPCALL
if (IS_AUTOMOUNT(inode)) {
inode->i_op = &cifs_dfs_referral_inode_operations;
} else {
#else /* NO DFS support, treat as a directory */
{
#endif
inode->i_op = &cifs_dir_inode_ops;
inode->i_fop = &cifs_dir_ops;
}
break;
case S_IFLNK:
inode->i_op = &cifs_symlink_inode_ops;
break;
default:
init_special_inode(inode, inode->i_mode, inode->i_rdev);
break;
}
}
/* check inode attributes against fattr. If they don't match, tag the
* inode for cache invalidation
*/
static void
cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr)
{
struct cifsInodeInfo *cifs_i = CIFS_I(inode);
cFYI(1, "%s: revalidating inode %llu", __func__, cifs_i->uniqueid);
if (inode->i_state & I_NEW) {
cFYI(1, "%s: inode %llu is new", __func__, cifs_i->uniqueid);
return;
}
/* don't bother with revalidation if we have an oplock */
if (cifs_i->clientCanCacheRead) {
cFYI(1, "%s: inode %llu is oplocked", __func__,
cifs_i->uniqueid);
return;
}
/* revalidate if mtime or size have changed */
if (timespec_equal(&inode->i_mtime, &fattr->cf_mtime) &&
cifs_i->server_eof == fattr->cf_eof) {
cFYI(1, "%s: inode %llu is unchanged", __func__,
cifs_i->uniqueid);
return;
}
cFYI(1, "%s: invalidating inode %llu mapping", __func__,
cifs_i->uniqueid);
cifs_i->invalid_mapping = true;
}
/* populate an inode with info from a cifs_fattr struct */
void
cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
{
struct cifsInodeInfo *cifs_i = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
unsigned long oldtime = cifs_i->time;
cifs_revalidate_cache(inode, fattr);
inode->i_atime = fattr->cf_atime;
inode->i_mtime = fattr->cf_mtime;
inode->i_ctime = fattr->cf_ctime;
inode->i_rdev = fattr->cf_rdev;
set_nlink(inode, fattr->cf_nlink);
inode->i_uid = fattr->cf_uid;
inode->i_gid = fattr->cf_gid;
/* if dynperm is set, don't clobber existing mode */
if (inode->i_state & I_NEW ||
!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM))
inode->i_mode = fattr->cf_mode;
cifs_i->cifsAttrs = fattr->cf_cifsattrs;
if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL)
cifs_i->time = 0;
else
cifs_i->time = jiffies;
cFYI(1, "inode 0x%p old_time=%ld new_time=%ld", inode,
oldtime, cifs_i->time);
cifs_i->delete_pending = fattr->cf_flags & CIFS_FATTR_DELETE_PENDING;
cifs_i->server_eof = fattr->cf_eof;
/*
* Can't safely change the file size here if the client is writing to
* it due to potential races.
*/
spin_lock(&inode->i_lock);
if (is_size_safe_to_change(cifs_i, fattr->cf_eof)) {
i_size_write(inode, fattr->cf_eof);
/*
* i_blocks is not related to (i_size / i_blksize),
* but instead 512 byte (2**9) size is required for
* calculating num blocks.
*/
inode->i_blocks = (512 - 1 + fattr->cf_bytes) >> 9;
}
spin_unlock(&inode->i_lock);
if (fattr->cf_flags & CIFS_FATTR_DFS_REFERRAL)
inode->i_flags |= S_AUTOMOUNT;
cifs_set_ops(inode);
}
void
cifs_fill_uniqueid(struct super_block *sb, struct cifs_fattr *fattr)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
return;
fattr->cf_uniqueid = iunique(sb, ROOT_I);
}
/* Fill a cifs_fattr struct with info from FILE_UNIX_BASIC_INFO. */
void
cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, FILE_UNIX_BASIC_INFO *info,
struct cifs_sb_info *cifs_sb)
{
memset(fattr, 0, sizeof(*fattr));
fattr->cf_uniqueid = le64_to_cpu(info->UniqueId);
fattr->cf_bytes = le64_to_cpu(info->NumOfBytes);
fattr->cf_eof = le64_to_cpu(info->EndOfFile);
fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime);
fattr->cf_mtime = cifs_NTtimeToUnix(info->LastModificationTime);
fattr->cf_ctime = cifs_NTtimeToUnix(info->LastStatusChange);
fattr->cf_mode = le64_to_cpu(info->Permissions);
/*
* Since we set the inode type below we need to mask off
* to avoid strange results if bits set above.
*/
fattr->cf_mode &= ~S_IFMT;
switch (le32_to_cpu(info->Type)) {
case UNIX_FILE:
fattr->cf_mode |= S_IFREG;
fattr->cf_dtype = DT_REG;
break;
case UNIX_SYMLINK:
fattr->cf_mode |= S_IFLNK;
fattr->cf_dtype = DT_LNK;
break;
case UNIX_DIR:
fattr->cf_mode |= S_IFDIR;
fattr->cf_dtype = DT_DIR;
break;
case UNIX_CHARDEV:
fattr->cf_mode |= S_IFCHR;
fattr->cf_dtype = DT_CHR;
fattr->cf_rdev = MKDEV(le64_to_cpu(info->DevMajor),
le64_to_cpu(info->DevMinor) & MINORMASK);
break;
case UNIX_BLOCKDEV:
fattr->cf_mode |= S_IFBLK;
fattr->cf_dtype = DT_BLK;
fattr->cf_rdev = MKDEV(le64_to_cpu(info->DevMajor),
le64_to_cpu(info->DevMinor) & MINORMASK);
break;
case UNIX_FIFO:
fattr->cf_mode |= S_IFIFO;
fattr->cf_dtype = DT_FIFO;
break;
case UNIX_SOCKET:
fattr->cf_mode |= S_IFSOCK;
fattr->cf_dtype = DT_SOCK;
break;
default:
/* safest to call it a file if we do not know */
fattr->cf_mode |= S_IFREG;
fattr->cf_dtype = DT_REG;
cFYI(1, "unknown type %d", le32_to_cpu(info->Type));
break;
}
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
fattr->cf_uid = cifs_sb->mnt_uid;
else
fattr->cf_uid = le64_to_cpu(info->Uid);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
fattr->cf_gid = cifs_sb->mnt_gid;
else
fattr->cf_gid = le64_to_cpu(info->Gid);
fattr->cf_nlink = le64_to_cpu(info->Nlinks);
}
/*
* Fill a cifs_fattr struct with fake inode info.
*
* Needed to setup cifs_fattr data for the directory which is the
* junction to the new submount (ie to setup the fake directory
* which represents a DFS referral).
*/
static void
cifs_create_dfs_fattr(struct cifs_fattr *fattr, struct super_block *sb)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
cFYI(1, "creating fake fattr for DFS referral");
memset(fattr, 0, sizeof(*fattr));
fattr->cf_mode = S_IFDIR | S_IXUGO | S_IRWXU;
fattr->cf_uid = cifs_sb->mnt_uid;
fattr->cf_gid = cifs_sb->mnt_gid;
fattr->cf_atime = CURRENT_TIME;
fattr->cf_ctime = CURRENT_TIME;
fattr->cf_mtime = CURRENT_TIME;
fattr->cf_nlink = 2;
fattr->cf_flags |= CIFS_FATTR_DFS_REFERRAL;
}
int cifs_get_file_info_unix(struct file *filp)
{
int rc;
int xid;
FILE_UNIX_BASIC_INFO find_data;
struct cifs_fattr fattr;
struct inode *inode = filp->f_path.dentry->d_inode;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifsFileInfo *cfile = filp->private_data;
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
xid = GetXid();
rc = CIFSSMBUnixQFileInfo(xid, tcon, cfile->netfid, &find_data);
if (!rc) {
cifs_unix_basic_to_fattr(&fattr, &find_data, cifs_sb);
} else if (rc == -EREMOTE) {
cifs_create_dfs_fattr(&fattr, inode->i_sb);
rc = 0;
}
cifs_fattr_to_inode(inode, &fattr);
FreeXid(xid);
return rc;
}
int cifs_get_inode_info_unix(struct inode **pinode,
const unsigned char *full_path,
struct super_block *sb, int xid)
{
int rc;
FILE_UNIX_BASIC_INFO find_data;
struct cifs_fattr fattr;
struct cifs_tcon *tcon;
struct tcon_link *tlink;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
cFYI(1, "Getting info on %s", full_path);
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
return PTR_ERR(tlink);
tcon = tlink_tcon(tlink);
/* could have done a find first instead but this returns more info */
rc = CIFSSMBUnixQPathInfo(xid, tcon, full_path, &find_data,
cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
cifs_put_tlink(tlink);
if (!rc) {
cifs_unix_basic_to_fattr(&fattr, &find_data, cifs_sb);
} else if (rc == -EREMOTE) {
cifs_create_dfs_fattr(&fattr, sb);
rc = 0;
} else {
return rc;
}
/* check for Minshall+French symlinks */
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
int tmprc = CIFSCheckMFSymlink(&fattr, full_path, cifs_sb, xid);
if (tmprc)
cFYI(1, "CIFSCheckMFSymlink: %d", tmprc);
}
if (*pinode == NULL) {
/* get new inode */
cifs_fill_uniqueid(sb, &fattr);
*pinode = cifs_iget(sb, &fattr);
if (!*pinode)
rc = -ENOMEM;
} else {
/* we already have inode, update it */
cifs_fattr_to_inode(*pinode, &fattr);
}
return rc;
}
static int
cifs_sfu_type(struct cifs_fattr *fattr, const unsigned char *path,
struct cifs_sb_info *cifs_sb, int xid)
{
int rc;
int oplock = 0;
__u16 netfid;
struct tcon_link *tlink;
struct cifs_tcon *tcon;
struct cifs_io_parms io_parms;
char buf[24];
unsigned int bytes_read;
char *pbuf;
pbuf = buf;
fattr->cf_mode &= ~S_IFMT;
if (fattr->cf_eof == 0) {
fattr->cf_mode |= S_IFIFO;
fattr->cf_dtype = DT_FIFO;
return 0;
} else if (fattr->cf_eof < 8) {
fattr->cf_mode |= S_IFREG;
fattr->cf_dtype = DT_REG;
return -EINVAL; /* EOPNOTSUPP? */
}
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
return PTR_ERR(tlink);
tcon = tlink_tcon(tlink);
rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, GENERIC_READ,
CREATE_NOT_DIR, &netfid, &oplock, NULL,
cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc == 0) {
int buf_type = CIFS_NO_BUFFER;
/* Read header */
io_parms.netfid = netfid;
io_parms.pid = current->tgid;
io_parms.tcon = tcon;
io_parms.offset = 0;
io_parms.length = 24;
rc = CIFSSMBRead(xid, &io_parms, &bytes_read, &pbuf,
&buf_type);
if ((rc == 0) && (bytes_read >= 8)) {
if (memcmp("IntxBLK", pbuf, 8) == 0) {
cFYI(1, "Block device");
fattr->cf_mode |= S_IFBLK;
fattr->cf_dtype = DT_BLK;
if (bytes_read == 24) {
/* we have enough to decode dev num */
__u64 mjr; /* major */
__u64 mnr; /* minor */
mjr = le64_to_cpu(*(__le64 *)(pbuf+8));
mnr = le64_to_cpu(*(__le64 *)(pbuf+16));
fattr->cf_rdev = MKDEV(mjr, mnr);
}
} else if (memcmp("IntxCHR", pbuf, 8) == 0) {
cFYI(1, "Char device");
fattr->cf_mode |= S_IFCHR;
fattr->cf_dtype = DT_CHR;
if (bytes_read == 24) {
/* we have enough to decode dev num */
__u64 mjr; /* major */
__u64 mnr; /* minor */
mjr = le64_to_cpu(*(__le64 *)(pbuf+8));
mnr = le64_to_cpu(*(__le64 *)(pbuf+16));
fattr->cf_rdev = MKDEV(mjr, mnr);
}
} else if (memcmp("IntxLNK", pbuf, 7) == 0) {
cFYI(1, "Symlink");
fattr->cf_mode |= S_IFLNK;
fattr->cf_dtype = DT_LNK;
} else {
fattr->cf_mode |= S_IFREG; /* file? */
fattr->cf_dtype = DT_REG;
rc = -EOPNOTSUPP;
}
} else {
fattr->cf_mode |= S_IFREG; /* then it is a file */
fattr->cf_dtype = DT_REG;
rc = -EOPNOTSUPP; /* or some unknown SFU type */
}
CIFSSMBClose(xid, tcon, netfid);
}
cifs_put_tlink(tlink);
return rc;
}
#define SFBITS_MASK (S_ISVTX | S_ISGID | S_ISUID) /* SETFILEBITS valid bits */
/*
* Fetch mode bits as provided by SFU.
*
* FIXME: Doesn't this clobber the type bit we got from cifs_sfu_type ?
*/
static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path,
struct cifs_sb_info *cifs_sb, int xid)
{
#ifdef CONFIG_CIFS_XATTR
ssize_t rc;
char ea_value[4];
__u32 mode;
struct tcon_link *tlink;
struct cifs_tcon *tcon;
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
return PTR_ERR(tlink);
tcon = tlink_tcon(tlink);
rc = CIFSSMBQAllEAs(xid, tcon, path, "SETFILEBITS",
ea_value, 4 /* size of buf */, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
cifs_put_tlink(tlink);
if (rc < 0)
return (int)rc;
else if (rc > 3) {
mode = le32_to_cpu(*((__le32 *)ea_value));
fattr->cf_mode &= ~SFBITS_MASK;
cFYI(1, "special bits 0%o org mode 0%o", mode,
fattr->cf_mode);
fattr->cf_mode = (mode & SFBITS_MASK) | fattr->cf_mode;
cFYI(1, "special mode bits 0%o", mode);
}
return 0;
#else
return -EOPNOTSUPP;
#endif
}
/* Fill a cifs_fattr struct with info from FILE_ALL_INFO */
static void
cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
struct cifs_sb_info *cifs_sb, bool adjust_tz)
{
struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
memset(fattr, 0, sizeof(*fattr));
fattr->cf_cifsattrs = le32_to_cpu(info->Attributes);
if (info->DeletePending)
fattr->cf_flags |= CIFS_FATTR_DELETE_PENDING;
if (info->LastAccessTime)
fattr->cf_atime = cifs_NTtimeToUnix(info->LastAccessTime);
else
fattr->cf_atime = CURRENT_TIME;
fattr->cf_ctime = cifs_NTtimeToUnix(info->ChangeTime);
fattr->cf_mtime = cifs_NTtimeToUnix(info->LastWriteTime);
if (adjust_tz) {
fattr->cf_ctime.tv_sec += tcon->ses->server->timeAdj;
fattr->cf_mtime.tv_sec += tcon->ses->server->timeAdj;
}
fattr->cf_eof = le64_to_cpu(info->EndOfFile);
fattr->cf_bytes = le64_to_cpu(info->AllocationSize);
fattr->cf_createtime = le64_to_cpu(info->CreationTime);
if (fattr->cf_cifsattrs & ATTR_DIRECTORY) {
fattr->cf_mode = S_IFDIR | cifs_sb->mnt_dir_mode;
fattr->cf_dtype = DT_DIR;
/*
* Server can return wrong NumberOfLinks value for directories
* when Unix extensions are disabled - fake it.
*/
fattr->cf_nlink = 2;
} else {
fattr->cf_mode = S_IFREG | cifs_sb->mnt_file_mode;
fattr->cf_dtype = DT_REG;
/* clear write bits if ATTR_READONLY is set */
if (fattr->cf_cifsattrs & ATTR_READONLY)
fattr->cf_mode &= ~(S_IWUGO);
fattr->cf_nlink = le32_to_cpu(info->NumberOfLinks);
}
fattr->cf_uid = cifs_sb->mnt_uid;
fattr->cf_gid = cifs_sb->mnt_gid;
}
int cifs_get_file_info(struct file *filp)
{
int rc;
int xid;
FILE_ALL_INFO find_data;
struct cifs_fattr fattr;
struct inode *inode = filp->f_path.dentry->d_inode;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifsFileInfo *cfile = filp->private_data;
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
xid = GetXid();
rc = CIFSSMBQFileInfo(xid, tcon, cfile->netfid, &find_data);
switch (rc) {
case 0:
cifs_all_info_to_fattr(&fattr, &find_data, cifs_sb, false);
break;
case -EREMOTE:
cifs_create_dfs_fattr(&fattr, inode->i_sb);
rc = 0;
break;
case -EOPNOTSUPP:
case -EINVAL:
/*
* FIXME: legacy server -- fall back to path-based call?
* for now, just skip revalidating and mark inode for
* immediate reval.
*/
rc = 0;
CIFS_I(inode)->time = 0;
default:
goto cgfi_exit;
}
/*
* don't bother with SFU junk here -- just mark inode as needing
* revalidation.
*/
fattr.cf_uniqueid = CIFS_I(inode)->uniqueid;
fattr.cf_flags |= CIFS_FATTR_NEED_REVAL;
cifs_fattr_to_inode(inode, &fattr);
cgfi_exit:
FreeXid(xid);
return rc;
}
int cifs_get_inode_info(struct inode **pinode,
const unsigned char *full_path, FILE_ALL_INFO *pfindData,
struct super_block *sb, int xid, const __u16 *pfid)
{
int rc = 0, tmprc;
struct cifs_tcon *pTcon;
struct tcon_link *tlink;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
char *buf = NULL;
bool adjustTZ = false;
struct cifs_fattr fattr;
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
return PTR_ERR(tlink);
pTcon = tlink_tcon(tlink);
cFYI(1, "Getting info on %s", full_path);
if ((pfindData == NULL) && (*pinode != NULL)) {
if (CIFS_I(*pinode)->clientCanCacheRead) {
cFYI(1, "No need to revalidate cached inode sizes");
goto cgii_exit;
}
}
/* if file info not passed in then get it from server */
if (pfindData == NULL) {
buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
if (buf == NULL) {
rc = -ENOMEM;
goto cgii_exit;
}
pfindData = (FILE_ALL_INFO *)buf;
/* could do find first instead but this returns more info */
rc = CIFSSMBQPathInfo(xid, pTcon, full_path, pfindData,
0 /* not legacy */,
cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
/* BB optimize code so we do not make the above call
when server claims no NT SMB support and the above call
failed at least once - set flag in tcon or mount */
if ((rc == -EOPNOTSUPP) || (rc == -EINVAL)) {
rc = SMBQueryInformation(xid, pTcon, full_path,
pfindData, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
adjustTZ = true;
}
}
if (!rc) {
cifs_all_info_to_fattr(&fattr, (FILE_ALL_INFO *) pfindData,
cifs_sb, adjustTZ);
} else if (rc == -EREMOTE) {
cifs_create_dfs_fattr(&fattr, sb);
rc = 0;
} else {
goto cgii_exit;
}
/*
* If an inode wasn't passed in, then get the inode number
*
* Is an i_ino of zero legal? Can we use that to check if the server
* supports returning inode numbers? Are there other sanity checks we
* can use to ensure that the server is really filling in that field?
*
* We can not use the IndexNumber field by default from Windows or
* Samba (in ALL_INFO buf) but we can request it explicitly. The SNIA
* CIFS spec claims that this value is unique within the scope of a
* share, and the windows docs hint that it's actually unique
* per-machine.
*
* There may be higher info levels that work but are there Windows
* server or network appliances for which IndexNumber field is not
* guaranteed unique?
*/
if (*pinode == NULL) {
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
int rc1 = 0;
rc1 = CIFSGetSrvInodeNumber(xid, pTcon,
full_path, &fattr.cf_uniqueid,
cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc1 || !fattr.cf_uniqueid) {
cFYI(1, "GetSrvInodeNum rc %d", rc1);
fattr.cf_uniqueid = iunique(sb, ROOT_I);
cifs_autodisable_serverino(cifs_sb);
}
} else {
fattr.cf_uniqueid = iunique(sb, ROOT_I);
}
} else {
fattr.cf_uniqueid = CIFS_I(*pinode)->uniqueid;
}
/* query for SFU type info if supported and needed */
if (fattr.cf_cifsattrs & ATTR_SYSTEM &&
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
tmprc = cifs_sfu_type(&fattr, full_path, cifs_sb, xid);
if (tmprc)
cFYI(1, "cifs_sfu_type failed: %d", tmprc);
}
#ifdef CONFIG_CIFS_ACL
/* fill in 0777 bits from ACL */
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
rc = cifs_acl_to_fattr(cifs_sb, &fattr, *pinode, full_path,
pfid);
if (rc) {
cFYI(1, "%s: Getting ACL failed with error: %d",
__func__, rc);
goto cgii_exit;
}
}
#endif /* CONFIG_CIFS_ACL */
/* fill in remaining high mode bits e.g. SUID, VTX */
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
cifs_sfu_mode(&fattr, full_path, cifs_sb, xid);
/* check for Minshall+French symlinks */
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
tmprc = CIFSCheckMFSymlink(&fattr, full_path, cifs_sb, xid);
if (tmprc)
cFYI(1, "CIFSCheckMFSymlink: %d", tmprc);
}
if (!*pinode) {
*pinode = cifs_iget(sb, &fattr);
if (!*pinode)
rc = -ENOMEM;
} else {
cifs_fattr_to_inode(*pinode, &fattr);
}
cgii_exit:
kfree(buf);
cifs_put_tlink(tlink);
return rc;
}
static const struct inode_operations cifs_ipc_inode_ops = {
.lookup = cifs_lookup,
};
char *cifs_build_path_to_root(struct smb_vol *vol, struct cifs_sb_info *cifs_sb,
struct cifs_tcon *tcon)
{
int pplen = vol->prepath ? strlen(vol->prepath) : 0;
int dfsplen;
char *full_path = NULL;
/* if no prefix path, simply set path to the root of share to "" */
if (pplen == 0) {
full_path = kmalloc(1, GFP_KERNEL);
if (full_path)
full_path[0] = 0;
return full_path;
}
if (tcon->Flags & SMB_SHARE_IS_IN_DFS)
dfsplen = strnlen(tcon->treeName, MAX_TREE_SIZE + 1);
else
dfsplen = 0;
full_path = kmalloc(dfsplen + pplen + 1, GFP_KERNEL);
if (full_path == NULL)
return full_path;
if (dfsplen)
strncpy(full_path, tcon->treeName, dfsplen);
strncpy(full_path + dfsplen, vol->prepath, pplen);
convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb));
full_path[dfsplen + pplen] = 0; /* add trailing null */
return full_path;
}
static int
cifs_find_inode(struct inode *inode, void *opaque)
{
struct cifs_fattr *fattr = (struct cifs_fattr *) opaque;
/* don't match inode with different uniqueid */
if (CIFS_I(inode)->uniqueid != fattr->cf_uniqueid)
return 0;
/* use createtime like an i_generation field */
if (CIFS_I(inode)->createtime != fattr->cf_createtime)
return 0;
/* don't match inode of different type */
if ((inode->i_mode & S_IFMT) != (fattr->cf_mode & S_IFMT))
return 0;
/* if it's not a directory or has no dentries, then flag it */
if (S_ISDIR(inode->i_mode) && !list_empty(&inode->i_dentry))
fattr->cf_flags |= CIFS_FATTR_INO_COLLISION;
return 1;
}
static int
cifs_init_inode(struct inode *inode, void *opaque)
{
struct cifs_fattr *fattr = (struct cifs_fattr *) opaque;
CIFS_I(inode)->uniqueid = fattr->cf_uniqueid;
CIFS_I(inode)->createtime = fattr->cf_createtime;
return 0;
}
/*
* walk dentry list for an inode and report whether it has aliases that
* are hashed. We use this to determine if a directory inode can actually
* be used.
*/
static bool
inode_has_hashed_dentries(struct inode *inode)
{
struct dentry *dentry;
spin_lock(&inode->i_lock);
list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
if (!d_unhashed(dentry) || IS_ROOT(dentry)) {
spin_unlock(&inode->i_lock);
return true;
}
}
spin_unlock(&inode->i_lock);
return false;
}
/* Given fattrs, get a corresponding inode */
struct inode *
cifs_iget(struct super_block *sb, struct cifs_fattr *fattr)
{
unsigned long hash;
struct inode *inode;
retry_iget5_locked:
cFYI(1, "looking for uniqueid=%llu", fattr->cf_uniqueid);
/* hash down to 32-bits on 32-bit arch */
hash = cifs_uniqueid_to_ino_t(fattr->cf_uniqueid);
inode = iget5_locked(sb, hash, cifs_find_inode, cifs_init_inode, fattr);
if (inode) {
/* was there a potentially problematic inode collision? */
if (fattr->cf_flags & CIFS_FATTR_INO_COLLISION) {
fattr->cf_flags &= ~CIFS_FATTR_INO_COLLISION;
if (inode_has_hashed_dentries(inode)) {
cifs_autodisable_serverino(CIFS_SB(sb));
iput(inode);
fattr->cf_uniqueid = iunique(sb, ROOT_I);
goto retry_iget5_locked;
}
}
cifs_fattr_to_inode(inode, fattr);
if (sb->s_flags & MS_NOATIME)
inode->i_flags |= S_NOATIME | S_NOCMTIME;
if (inode->i_state & I_NEW) {
inode->i_ino = hash;
if (S_ISREG(inode->i_mode))
inode->i_data.backing_dev_info = sb->s_bdi;
#ifdef CONFIG_CIFS_FSCACHE
/* initialize per-inode cache cookie pointer */
CIFS_I(inode)->fscache = NULL;
#endif
unlock_new_inode(inode);
}
}
return inode;
}
/* gets root inode */
struct inode *cifs_root_iget(struct super_block *sb)
{
int xid;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
struct inode *inode = NULL;
long rc;
struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
xid = GetXid();
if (tcon->unix_ext)
rc = cifs_get_inode_info_unix(&inode, "", sb, xid);
else
rc = cifs_get_inode_info(&inode, "", NULL, sb, xid, NULL);
if (!inode) {
inode = ERR_PTR(rc);
goto out;
}
#ifdef CONFIG_CIFS_FSCACHE
/* populate tcon->resource_id */
tcon->resource_id = CIFS_I(inode)->uniqueid;
#endif
if (rc && tcon->ipc) {
cFYI(1, "ipc connection - fake read inode");
inode->i_mode |= S_IFDIR;
set_nlink(inode, 2);
inode->i_op = &cifs_ipc_inode_ops;
inode->i_fop = &simple_dir_operations;
inode->i_uid = cifs_sb->mnt_uid;
inode->i_gid = cifs_sb->mnt_gid;
} else if (rc) {
iget_failed(inode);
inode = ERR_PTR(rc);
}
out:
/* can not call macro FreeXid here since in a void func
* TODO: This is no longer true
*/
_FreeXid(xid);
return inode;
}
static int
cifs_set_file_info(struct inode *inode, struct iattr *attrs, int xid,
char *full_path, __u32 dosattr)
{
int rc;
int oplock = 0;
__u16 netfid;
__u32 netpid;
bool set_time = false;
struct cifsFileInfo *open_file;
struct cifsInodeInfo *cifsInode = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink = NULL;
struct cifs_tcon *pTcon;
FILE_BASIC_INFO info_buf;
if (attrs == NULL)
return -EINVAL;
if (attrs->ia_valid & ATTR_ATIME) {
set_time = true;
info_buf.LastAccessTime =
cpu_to_le64(cifs_UnixTimeToNT(attrs->ia_atime));
} else
info_buf.LastAccessTime = 0;
if (attrs->ia_valid & ATTR_MTIME) {
set_time = true;
info_buf.LastWriteTime =
cpu_to_le64(cifs_UnixTimeToNT(attrs->ia_mtime));
} else
info_buf.LastWriteTime = 0;
/*
* Samba throws this field away, but windows may actually use it.
* Do not set ctime unless other time stamps are changed explicitly
* (i.e. by utimes()) since we would then have a mix of client and
* server times.
*/
if (set_time && (attrs->ia_valid & ATTR_CTIME)) {
cFYI(1, "CIFS - CTIME changed");
info_buf.ChangeTime =
cpu_to_le64(cifs_UnixTimeToNT(attrs->ia_ctime));
} else
info_buf.ChangeTime = 0;
info_buf.CreationTime = 0; /* don't change */
info_buf.Attributes = cpu_to_le32(dosattr);
/*
* If the file is already open for write, just use that fileid
*/
open_file = find_writable_file(cifsInode, true);
if (open_file) {
netfid = open_file->netfid;
netpid = open_file->pid;
pTcon = tlink_tcon(open_file->tlink);
goto set_via_filehandle;
}
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink)) {
rc = PTR_ERR(tlink);
tlink = NULL;
goto out;
}
pTcon = tlink_tcon(tlink);
/*
* NT4 apparently returns success on this call, but it doesn't
* really work.
*/
if (!(pTcon->ses->flags & CIFS_SES_NT4)) {
rc = CIFSSMBSetPathInfo(xid, pTcon, full_path,
&info_buf, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc == 0) {
cifsInode->cifsAttrs = dosattr;
goto out;
} else if (rc != -EOPNOTSUPP && rc != -EINVAL)
goto out;
}
cFYI(1, "calling SetFileInfo since SetPathInfo for "
"times not supported by this server");
rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_OPEN,
SYNCHRONIZE | FILE_WRITE_ATTRIBUTES,
CREATE_NOT_DIR, &netfid, &oplock,
NULL, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc != 0) {
if (rc == -EIO)
rc = -EINVAL;
goto out;
}
netpid = current->tgid;
set_via_filehandle:
rc = CIFSSMBSetFileInfo(xid, pTcon, &info_buf, netfid, netpid);
if (!rc)
cifsInode->cifsAttrs = dosattr;
if (open_file == NULL)
CIFSSMBClose(xid, pTcon, netfid);
else
cifsFileInfo_put(open_file);
out:
if (tlink != NULL)
cifs_put_tlink(tlink);
return rc;
}
/*
* open the given file (if it isn't already), set the DELETE_ON_CLOSE bit
* and rename it to a random name that hopefully won't conflict with
* anything else.
*/
static int
cifs_rename_pending_delete(char *full_path, struct dentry *dentry, int xid)
{
int oplock = 0;
int rc;
__u16 netfid;
struct inode *inode = dentry->d_inode;
struct cifsInodeInfo *cifsInode = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink;
struct cifs_tcon *tcon;
__u32 dosattr, origattr;
FILE_BASIC_INFO *info_buf = NULL;
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
return PTR_ERR(tlink);
tcon = tlink_tcon(tlink);
rc = CIFSSMBOpen(xid, tcon, full_path, FILE_OPEN,
DELETE|FILE_WRITE_ATTRIBUTES, CREATE_NOT_DIR,
&netfid, &oplock, NULL, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc != 0)
goto out;
origattr = cifsInode->cifsAttrs;
if (origattr == 0)
origattr |= ATTR_NORMAL;
dosattr = origattr & ~ATTR_READONLY;
if (dosattr == 0)
dosattr |= ATTR_NORMAL;
dosattr |= ATTR_HIDDEN;
/* set ATTR_HIDDEN and clear ATTR_READONLY, but only if needed */
if (dosattr != origattr) {
info_buf = kzalloc(sizeof(*info_buf), GFP_KERNEL);
if (info_buf == NULL) {
rc = -ENOMEM;
goto out_close;
}
info_buf->Attributes = cpu_to_le32(dosattr);
rc = CIFSSMBSetFileInfo(xid, tcon, info_buf, netfid,
current->tgid);
/* although we would like to mark the file hidden
if that fails we will still try to rename it */
if (rc != 0)
cifsInode->cifsAttrs = dosattr;
else
dosattr = origattr; /* since not able to change them */
}
/* rename the file */
rc = CIFSSMBRenameOpenFile(xid, tcon, netfid, NULL, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc != 0) {
rc = -ETXTBSY;
goto undo_setattr;
}
/* try to set DELETE_ON_CLOSE */
if (!cifsInode->delete_pending) {
rc = CIFSSMBSetFileDisposition(xid, tcon, true, netfid,
current->tgid);
/*
* some samba versions return -ENOENT when we try to set the
* file disposition here. Likely a samba bug, but work around
* it for now. This means that some cifsXXX files may hang
* around after they shouldn't.
*
* BB: remove this hack after more servers have the fix
*/
if (rc == -ENOENT)
rc = 0;
else if (rc != 0) {
rc = -ETXTBSY;
goto undo_rename;
}
cifsInode->delete_pending = true;
}
out_close:
CIFSSMBClose(xid, tcon, netfid);
out:
kfree(info_buf);
cifs_put_tlink(tlink);
return rc;
/*
* reset everything back to the original state. Don't bother
* dealing with errors here since we can't do anything about
* them anyway.
*/
undo_rename:
CIFSSMBRenameOpenFile(xid, tcon, netfid, dentry->d_name.name,
cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
undo_setattr:
if (dosattr != origattr) {
info_buf->Attributes = cpu_to_le32(origattr);
if (!CIFSSMBSetFileInfo(xid, tcon, info_buf, netfid,
current->tgid))
cifsInode->cifsAttrs = origattr;
}
goto out_close;
}
/*
* If dentry->d_inode is null (usually meaning the cached dentry
* is a negative dentry) then we would attempt a standard SMB delete, but
* if that fails we can not attempt the fall back mechanisms on EACCESS
* but will return the EACCESS to the caller. Note that the VFS does not call
* unlink on negative dentries currently.
*/
int cifs_unlink(struct inode *dir, struct dentry *dentry)
{
int rc = 0;
int xid;
char *full_path = NULL;
struct inode *inode = dentry->d_inode;
struct cifsInodeInfo *cifs_inode;
struct super_block *sb = dir->i_sb;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
struct tcon_link *tlink;
struct cifs_tcon *tcon;
struct iattr *attrs = NULL;
__u32 dosattr = 0, origattr = 0;
cFYI(1, "cifs_unlink, dir=0x%p, dentry=0x%p", dir, dentry);
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
return PTR_ERR(tlink);
tcon = tlink_tcon(tlink);
xid = GetXid();
/* Unlink can be called from rename so we can not take the
* sb->s_vfs_rename_mutex here */
full_path = build_path_from_dentry(dentry);
if (full_path == NULL) {
rc = -ENOMEM;
goto unlink_out;
}
if ((tcon->ses->capabilities & CAP_UNIX) &&
(CIFS_UNIX_POSIX_PATH_OPS_CAP &
le64_to_cpu(tcon->fsUnixInfo.Capability))) {
rc = CIFSPOSIXDelFile(xid, tcon, full_path,
SMB_POSIX_UNLINK_FILE_TARGET, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
cFYI(1, "posix del rc %d", rc);
if ((rc == 0) || (rc == -ENOENT))
goto psx_del_no_retry;
}
retry_std_delete:
rc = CIFSSMBDelFile(xid, tcon, full_path, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
psx_del_no_retry:
if (!rc) {
if (inode)
drop_nlink(inode);
} else if (rc == -ENOENT) {
d_drop(dentry);
} else if (rc == -ETXTBSY) {
rc = cifs_rename_pending_delete(full_path, dentry, xid);
if (rc == 0)
drop_nlink(inode);
} else if ((rc == -EACCES) && (dosattr == 0) && inode) {
attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
if (attrs == NULL) {
rc = -ENOMEM;
goto out_reval;
}
/* try to reset dos attributes */
cifs_inode = CIFS_I(inode);
origattr = cifs_inode->cifsAttrs;
if (origattr == 0)
origattr |= ATTR_NORMAL;
dosattr = origattr & ~ATTR_READONLY;
if (dosattr == 0)
dosattr |= ATTR_NORMAL;
dosattr |= ATTR_HIDDEN;
rc = cifs_set_file_info(inode, attrs, xid, full_path, dosattr);
if (rc != 0)
goto out_reval;
goto retry_std_delete;
}
/* undo the setattr if we errored out and it's needed */
if (rc != 0 && dosattr != 0)
cifs_set_file_info(inode, attrs, xid, full_path, origattr);
out_reval:
if (inode) {
cifs_inode = CIFS_I(inode);
cifs_inode->time = 0; /* will force revalidate to get info
when needed */
inode->i_ctime = current_fs_time(sb);
}
dir->i_ctime = dir->i_mtime = current_fs_time(sb);
cifs_inode = CIFS_I(dir);
CIFS_I(dir)->time = 0; /* force revalidate of dir as well */
unlink_out:
kfree(full_path);
kfree(attrs);
FreeXid(xid);
cifs_put_tlink(tlink);
return rc;
}
int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode)
{
int rc = 0, tmprc;
int xid;
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
struct cifs_tcon *pTcon;
char *full_path = NULL;
struct inode *newinode = NULL;
struct cifs_fattr fattr;
cFYI(1, "In cifs_mkdir, mode = 0x%hx inode = 0x%p", mode, inode);
cifs_sb = CIFS_SB(inode->i_sb);
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
return PTR_ERR(tlink);
pTcon = tlink_tcon(tlink);
xid = GetXid();
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
rc = -ENOMEM;
goto mkdir_out;
}
if ((pTcon->ses->capabilities & CAP_UNIX) &&
(CIFS_UNIX_POSIX_PATH_OPS_CAP &
le64_to_cpu(pTcon->fsUnixInfo.Capability))) {
u32 oplock = 0;
FILE_UNIX_BASIC_INFO *pInfo =
kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
if (pInfo == NULL) {
rc = -ENOMEM;
goto mkdir_out;
}
mode &= ~current_umask();
rc = CIFSPOSIXCreate(xid, pTcon, SMB_O_DIRECTORY | SMB_O_CREAT,
mode, NULL /* netfid */, pInfo, &oplock,
full_path, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc == -EOPNOTSUPP) {
kfree(pInfo);
goto mkdir_retry_old;
} else if (rc) {
cFYI(1, "posix mkdir returned 0x%x", rc);
d_drop(direntry);
} else {
if (pInfo->Type == cpu_to_le32(-1)) {
/* no return info, go query for it */
kfree(pInfo);
goto mkdir_get_info;
}
/*BB check (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID ) to see if need
to set uid/gid */
cifs_unix_basic_to_fattr(&fattr, pInfo, cifs_sb);
cifs_fill_uniqueid(inode->i_sb, &fattr);
newinode = cifs_iget(inode->i_sb, &fattr);
if (!newinode) {
kfree(pInfo);
goto mkdir_get_info;
}
d_instantiate(direntry, newinode);
#ifdef CONFIG_CIFS_DEBUG2
cFYI(1, "instantiated dentry %p %s to inode %p",
direntry, direntry->d_name.name, newinode);
if (newinode->i_nlink != 2)
cFYI(1, "unexpected number of links %d",
newinode->i_nlink);
#endif
}
kfree(pInfo);
goto mkdir_out;
}
mkdir_retry_old:
/* BB add setting the equivalent of mode via CreateX w/ACLs */
rc = CIFSSMBMkDir(xid, pTcon, full_path, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc) {
cFYI(1, "cifs_mkdir returned 0x%x", rc);
d_drop(direntry);
} else {
mkdir_get_info:
if (pTcon->unix_ext)
rc = cifs_get_inode_info_unix(&newinode, full_path,
inode->i_sb, xid);
else
rc = cifs_get_inode_info(&newinode, full_path, NULL,
inode->i_sb, xid, NULL);
d_instantiate(direntry, newinode);
/* setting nlink not necessary except in cases where we
* failed to get it from the server or was set bogus */
if ((direntry->d_inode) && (direntry->d_inode->i_nlink < 2))
set_nlink(direntry->d_inode, 2);
mode &= ~current_umask();
/* must turn on setgid bit if parent dir has it */
if (inode->i_mode & S_ISGID)
mode |= S_ISGID;
if (pTcon->unix_ext) {
struct cifs_unix_set_info_args args = {
.mode = mode,
.ctime = NO_CHANGE_64,
.atime = NO_CHANGE_64,
.mtime = NO_CHANGE_64,
.device = 0,
};
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
args.uid = (__u64)current_fsuid();
if (inode->i_mode & S_ISGID)
args.gid = (__u64)inode->i_gid;
else
args.gid = (__u64)current_fsgid();
} else {
args.uid = NO_CHANGE_64;
args.gid = NO_CHANGE_64;
}
CIFSSMBUnixSetPathInfo(xid, pTcon, full_path, &args,
cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
} else {
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) &&
(mode & S_IWUGO) == 0) {
FILE_BASIC_INFO pInfo;
struct cifsInodeInfo *cifsInode;
u32 dosattrs;
memset(&pInfo, 0, sizeof(pInfo));
cifsInode = CIFS_I(newinode);
dosattrs = cifsInode->cifsAttrs|ATTR_READONLY;
pInfo.Attributes = cpu_to_le32(dosattrs);
tmprc = CIFSSMBSetPathInfo(xid, pTcon,
full_path, &pInfo,
cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (tmprc == 0)
cifsInode->cifsAttrs = dosattrs;
}
if (direntry->d_inode) {
if (cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_DYNPERM)
direntry->d_inode->i_mode =
(mode | S_IFDIR);
if (cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_SET_UID) {
direntry->d_inode->i_uid =
current_fsuid();
if (inode->i_mode & S_ISGID)
direntry->d_inode->i_gid =
inode->i_gid;
else
direntry->d_inode->i_gid =
current_fsgid();
}
}
}
}
mkdir_out:
/*
* Force revalidate to get parent dir info when needed since cached
* attributes are invalid now.
*/
CIFS_I(inode)->time = 0;
kfree(full_path);
FreeXid(xid);
cifs_put_tlink(tlink);
return rc;
}
int cifs_rmdir(struct inode *inode, struct dentry *direntry)
{
int rc = 0;
int xid;
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
struct cifs_tcon *pTcon;
char *full_path = NULL;
struct cifsInodeInfo *cifsInode;
cFYI(1, "cifs_rmdir, inode = 0x%p", inode);
xid = GetXid();
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
rc = -ENOMEM;
goto rmdir_exit;
}
cifs_sb = CIFS_SB(inode->i_sb);
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink)) {
rc = PTR_ERR(tlink);
goto rmdir_exit;
}
pTcon = tlink_tcon(tlink);
rc = CIFSSMBRmDir(xid, pTcon, full_path, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
cifs_put_tlink(tlink);
if (!rc) {
spin_lock(&direntry->d_inode->i_lock);
i_size_write(direntry->d_inode, 0);
clear_nlink(direntry->d_inode);
spin_unlock(&direntry->d_inode->i_lock);
}
cifsInode = CIFS_I(direntry->d_inode);
/* force revalidate to go get info when needed */
cifsInode->time = 0;
cifsInode = CIFS_I(inode);
/*
* Force revalidate to get parent dir info when needed since cached
* attributes are invalid now.
*/
cifsInode->time = 0;
direntry->d_inode->i_ctime = inode->i_ctime = inode->i_mtime =
current_fs_time(inode->i_sb);
rmdir_exit:
kfree(full_path);
FreeXid(xid);
return rc;
}
static int
cifs_do_rename(int xid, struct dentry *from_dentry, const char *fromPath,
struct dentry *to_dentry, const char *toPath)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(from_dentry->d_sb);
struct tcon_link *tlink;
struct cifs_tcon *pTcon;
__u16 srcfid;
int oplock, rc;
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
return PTR_ERR(tlink);
pTcon = tlink_tcon(tlink);
/* try path-based rename first */
rc = CIFSSMBRename(xid, pTcon, fromPath, toPath, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
/*
* don't bother with rename by filehandle unless file is busy and
* source Note that cross directory moves do not work with
* rename by filehandle to various Windows servers.
*/
if (rc == 0 || rc != -ETXTBSY)
goto do_rename_exit;
/* open-file renames don't work across directories */
if (to_dentry->d_parent != from_dentry->d_parent)
goto do_rename_exit;
/* open the file to be renamed -- we need DELETE perms */
rc = CIFSSMBOpen(xid, pTcon, fromPath, FILE_OPEN, DELETE,
CREATE_NOT_DIR, &srcfid, &oplock, NULL,
cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc == 0) {
rc = CIFSSMBRenameOpenFile(xid, pTcon, srcfid,
(const char *) to_dentry->d_name.name,
cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
CIFSSMBClose(xid, pTcon, srcfid);
}
do_rename_exit:
cifs_put_tlink(tlink);
return rc;
}
int cifs_rename(struct inode *source_dir, struct dentry *source_dentry,
struct inode *target_dir, struct dentry *target_dentry)
{
char *fromName = NULL;
char *toName = NULL;
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
struct cifs_tcon *tcon;
FILE_UNIX_BASIC_INFO *info_buf_source = NULL;
FILE_UNIX_BASIC_INFO *info_buf_target;
int xid, rc, tmprc;
cifs_sb = CIFS_SB(source_dir->i_sb);
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
return PTR_ERR(tlink);
tcon = tlink_tcon(tlink);
xid = GetXid();
/*
* we already have the rename sem so we do not need to
* grab it again here to protect the path integrity
*/
fromName = build_path_from_dentry(source_dentry);
if (fromName == NULL) {
rc = -ENOMEM;
goto cifs_rename_exit;
}
toName = build_path_from_dentry(target_dentry);
if (toName == NULL) {
rc = -ENOMEM;
goto cifs_rename_exit;
}
rc = cifs_do_rename(xid, source_dentry, fromName,
target_dentry, toName);
if (rc == -EEXIST && tcon->unix_ext) {
/*
* Are src and dst hardlinks of same inode? We can
* only tell with unix extensions enabled
*/
info_buf_source =
kmalloc(2 * sizeof(FILE_UNIX_BASIC_INFO),
GFP_KERNEL);
if (info_buf_source == NULL) {
rc = -ENOMEM;
goto cifs_rename_exit;
}
info_buf_target = info_buf_source + 1;
tmprc = CIFSSMBUnixQPathInfo(xid, tcon, fromName,
info_buf_source,
cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (tmprc != 0)
goto unlink_target;
tmprc = CIFSSMBUnixQPathInfo(xid, tcon, toName,
info_buf_target,
cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (tmprc == 0 && (info_buf_source->UniqueId ==
info_buf_target->UniqueId)) {
/* same file, POSIX says that this is a noop */
rc = 0;
goto cifs_rename_exit;
}
} /* else ... BB we could add the same check for Windows by
checking the UniqueId via FILE_INTERNAL_INFO */
unlink_target:
/* Try unlinking the target dentry if it's not negative */
if (target_dentry->d_inode && (rc == -EACCES || rc == -EEXIST)) {
tmprc = cifs_unlink(target_dir, target_dentry);
if (tmprc)
goto cifs_rename_exit;
rc = cifs_do_rename(xid, source_dentry, fromName,
target_dentry, toName);
}
cifs_rename_exit:
kfree(info_buf_source);
kfree(fromName);
kfree(toName);
FreeXid(xid);
cifs_put_tlink(tlink);
return rc;
}
static bool
cifs_inode_needs_reval(struct inode *inode)
{
struct cifsInodeInfo *cifs_i = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
if (cifs_i->clientCanCacheRead)
return false;
if (!lookupCacheEnabled)
return true;
if (cifs_i->time == 0)
return true;
if (!time_in_range(jiffies, cifs_i->time,
cifs_i->time + cifs_sb->actimeo))
return true;
/* hardlinked files w/ noserverino get "special" treatment */
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) &&
S_ISREG(inode->i_mode) && inode->i_nlink != 1)
return true;
return false;
}
/*
* Zap the cache. Called when invalid_mapping flag is set.
*/
int
cifs_invalidate_mapping(struct inode *inode)
{
int rc = 0;
struct cifsInodeInfo *cifs_i = CIFS_I(inode);
cifs_i->invalid_mapping = false;
if (inode->i_mapping && inode->i_mapping->nrpages != 0) {
rc = invalidate_inode_pages2(inode->i_mapping);
if (rc) {
cERROR(1, "%s: could not invalidate inode %p", __func__,
inode);
cifs_i->invalid_mapping = true;
}
}
cifs_fscache_reset_inode_cookie(inode);
return rc;
}
int cifs_revalidate_file_attr(struct file *filp)
{
int rc = 0;
struct inode *inode = filp->f_path.dentry->d_inode;
struct cifsFileInfo *cfile = (struct cifsFileInfo *) filp->private_data;
if (!cifs_inode_needs_reval(inode))
return rc;
if (tlink_tcon(cfile->tlink)->unix_ext)
rc = cifs_get_file_info_unix(filp);
else
rc = cifs_get_file_info(filp);
return rc;
}
int cifs_revalidate_dentry_attr(struct dentry *dentry)
{
int xid;
int rc = 0;
struct inode *inode = dentry->d_inode;
struct super_block *sb = dentry->d_sb;
char *full_path = NULL;
if (inode == NULL)
return -ENOENT;
if (!cifs_inode_needs_reval(inode))
return rc;
xid = GetXid();
/* can not safely grab the rename sem here if rename calls revalidate
since that would deadlock */
full_path = build_path_from_dentry(dentry);
if (full_path == NULL) {
rc = -ENOMEM;
goto out;
}
cFYI(1, "Update attributes: %s inode 0x%p count %d dentry: 0x%p d_time "
"%ld jiffies %ld", full_path, inode, inode->i_count.counter,
dentry, dentry->d_time, jiffies);
if (cifs_sb_master_tcon(CIFS_SB(sb))->unix_ext)
rc = cifs_get_inode_info_unix(&inode, full_path, sb, xid);
else
rc = cifs_get_inode_info(&inode, full_path, NULL, sb,
xid, NULL);
out:
kfree(full_path);
FreeXid(xid);
return rc;
}
int cifs_revalidate_file(struct file *filp)
{
int rc;
struct inode *inode = filp->f_path.dentry->d_inode;
rc = cifs_revalidate_file_attr(filp);
if (rc)
return rc;
if (CIFS_I(inode)->invalid_mapping)
rc = cifs_invalidate_mapping(inode);
return rc;
}
/* revalidate a dentry's inode attributes */
int cifs_revalidate_dentry(struct dentry *dentry)
{
int rc;
struct inode *inode = dentry->d_inode;
rc = cifs_revalidate_dentry_attr(dentry);
if (rc)
return rc;
if (CIFS_I(inode)->invalid_mapping)
rc = cifs_invalidate_mapping(inode);
return rc;
}
int cifs_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(dentry->d_sb);
struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
struct inode *inode = dentry->d_inode;
int rc;
/*
* We need to be sure that all dirty pages are written and the server
* has actual ctime, mtime and file length.
*/
if (!CIFS_I(inode)->clientCanCacheRead && inode->i_mapping &&
inode->i_mapping->nrpages != 0) {
rc = filemap_fdatawait(inode->i_mapping);
if (rc) {
mapping_set_error(inode->i_mapping, rc);
return rc;
}
}
rc = cifs_revalidate_dentry_attr(dentry);
if (rc)
return rc;
generic_fillattr(inode, stat);
stat->blksize = CIFS_MAX_MSGSIZE;
stat->ino = CIFS_I(inode)->uniqueid;
/*
* If on a multiuser mount without unix extensions, and the admin hasn't
* overridden them, set the ownership to the fsuid/fsgid of the current
* process.
*/
if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER) &&
!tcon->unix_ext) {
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID))
stat->uid = current_fsuid();
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID))
stat->gid = current_fsgid();
}
return rc;
}
static int cifs_truncate_page(struct address_space *mapping, loff_t from)
{
pgoff_t index = from >> PAGE_CACHE_SHIFT;
unsigned offset = from & (PAGE_CACHE_SIZE - 1);
struct page *page;
int rc = 0;
page = grab_cache_page(mapping, index);
if (!page)
return -ENOMEM;
zero_user_segment(page, offset, PAGE_CACHE_SIZE);
unlock_page(page);
page_cache_release(page);
return rc;
}
static void cifs_setsize(struct inode *inode, loff_t offset)
{
loff_t oldsize;
spin_lock(&inode->i_lock);
oldsize = inode->i_size;
i_size_write(inode, offset);
spin_unlock(&inode->i_lock);
truncate_pagecache(inode, oldsize, offset);
}
static int
cifs_set_file_size(struct inode *inode, struct iattr *attrs,
int xid, char *full_path)
{
int rc;
struct cifsFileInfo *open_file;
struct cifsInodeInfo *cifsInode = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink = NULL;
struct cifs_tcon *pTcon = NULL;
struct cifs_io_parms io_parms;
/*
* To avoid spurious oplock breaks from server, in the case of
* inodes that we already have open, avoid doing path based
* setting of file size if we can do it by handle.
* This keeps our caching token (oplock) and avoids timeouts
* when the local oplock break takes longer to flush
* writebehind data than the SMB timeout for the SetPathInfo
* request would allow
*/
open_file = find_writable_file(cifsInode, true);
if (open_file) {
__u16 nfid = open_file->netfid;
__u32 npid = open_file->pid;
pTcon = tlink_tcon(open_file->tlink);
rc = CIFSSMBSetFileSize(xid, pTcon, attrs->ia_size, nfid,
npid, false);
cifsFileInfo_put(open_file);
cFYI(1, "SetFSize for attrs rc = %d", rc);
if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
unsigned int bytes_written;
io_parms.netfid = nfid;
io_parms.pid = npid;
io_parms.tcon = pTcon;
io_parms.offset = 0;
io_parms.length = attrs->ia_size;
rc = CIFSSMBWrite(xid, &io_parms, &bytes_written,
NULL, NULL, 1);
cFYI(1, "Wrt seteof rc %d", rc);
}
} else
rc = -EINVAL;
if (rc != 0) {
if (pTcon == NULL) {
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
return PTR_ERR(tlink);
pTcon = tlink_tcon(tlink);
}
/* Set file size by pathname rather than by handle
either because no valid, writeable file handle for
it was found or because there was an error setting
it by handle */
rc = CIFSSMBSetEOF(xid, pTcon, full_path, attrs->ia_size,
false, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
cFYI(1, "SetEOF by path (setattrs) rc = %d", rc);
if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
__u16 netfid;
int oplock = 0;
rc = SMBLegacyOpen(xid, pTcon, full_path,
FILE_OPEN, GENERIC_WRITE,
CREATE_NOT_DIR, &netfid, &oplock, NULL,
cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc == 0) {
unsigned int bytes_written;
io_parms.netfid = netfid;
io_parms.pid = current->tgid;
io_parms.tcon = pTcon;
io_parms.offset = 0;
io_parms.length = attrs->ia_size;
rc = CIFSSMBWrite(xid, &io_parms,
&bytes_written,
NULL, NULL, 1);
cFYI(1, "wrt seteof rc %d", rc);
CIFSSMBClose(xid, pTcon, netfid);
}
}
if (tlink)
cifs_put_tlink(tlink);
}
if (rc == 0) {
cifsInode->server_eof = attrs->ia_size;
cifs_setsize(inode, attrs->ia_size);
cifs_truncate_page(inode->i_mapping, inode->i_size);
}
return rc;
}
static int
cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
{
int rc;
int xid;
char *full_path = NULL;
struct inode *inode = direntry->d_inode;
struct cifsInodeInfo *cifsInode = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink;
struct cifs_tcon *pTcon;
struct cifs_unix_set_info_args *args = NULL;
struct cifsFileInfo *open_file;
cFYI(1, "setattr_unix on file %s attrs->ia_valid=0x%x",
direntry->d_name.name, attrs->ia_valid);
xid = GetXid();
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
attrs->ia_valid |= ATTR_FORCE;
rc = inode_change_ok(inode, attrs);
if (rc < 0)
goto out;
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
rc = -ENOMEM;
goto out;
}
/*
* Attempt to flush data before changing attributes. We need to do
* this for ATTR_SIZE and ATTR_MTIME for sure, and if we change the
* ownership or mode then we may also need to do this. Here, we take
* the safe way out and just do the flush on all setattr requests. If
* the flush returns error, store it to report later and continue.
*
* BB: This should be smarter. Why bother flushing pages that
* will be truncated anyway? Also, should we error out here if
* the flush returns error?
*/
rc = filemap_write_and_wait(inode->i_mapping);
mapping_set_error(inode->i_mapping, rc);
rc = 0;
if (attrs->ia_valid & ATTR_SIZE) {
rc = cifs_set_file_size(inode, attrs, xid, full_path);
if (rc != 0)
goto out;
}
/* skip mode change if it's just for clearing setuid/setgid */
if (attrs->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID))
attrs->ia_valid &= ~ATTR_MODE;
args = kmalloc(sizeof(*args), GFP_KERNEL);
if (args == NULL) {
rc = -ENOMEM;
goto out;
}
/* set up the struct */
if (attrs->ia_valid & ATTR_MODE)
args->mode = attrs->ia_mode;
else
args->mode = NO_CHANGE_64;
if (attrs->ia_valid & ATTR_UID)
args->uid = attrs->ia_uid;
else
args->uid = NO_CHANGE_64;
if (attrs->ia_valid & ATTR_GID)
args->gid = attrs->ia_gid;
else
args->gid = NO_CHANGE_64;
if (attrs->ia_valid & ATTR_ATIME)
args->atime = cifs_UnixTimeToNT(attrs->ia_atime);
else
args->atime = NO_CHANGE_64;
if (attrs->ia_valid & ATTR_MTIME)
args->mtime = cifs_UnixTimeToNT(attrs->ia_mtime);
else
args->mtime = NO_CHANGE_64;
if (attrs->ia_valid & ATTR_CTIME)
args->ctime = cifs_UnixTimeToNT(attrs->ia_ctime);
else
args->ctime = NO_CHANGE_64;
args->device = 0;
open_file = find_writable_file(cifsInode, true);
if (open_file) {
u16 nfid = open_file->netfid;
u32 npid = open_file->pid;
pTcon = tlink_tcon(open_file->tlink);
rc = CIFSSMBUnixSetFileInfo(xid, pTcon, args, nfid, npid);
cifsFileInfo_put(open_file);
} else {
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink)) {
rc = PTR_ERR(tlink);
goto out;
}
pTcon = tlink_tcon(tlink);
rc = CIFSSMBUnixSetPathInfo(xid, pTcon, full_path, args,
cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
cifs_put_tlink(tlink);
}
if (rc)
goto out;
if ((attrs->ia_valid & ATTR_SIZE) &&
attrs->ia_size != i_size_read(inode))
truncate_setsize(inode, attrs->ia_size);
setattr_copy(inode, attrs);
mark_inode_dirty(inode);
/* force revalidate when any of these times are set since some
of the fs types (eg ext3, fat) do not have fine enough
time granularity to match protocol, and we do not have a
a way (yet) to query the server fs's time granularity (and
whether it rounds times down).
*/
if (attrs->ia_valid & (ATTR_MTIME | ATTR_CTIME))
cifsInode->time = 0;
out:
kfree(args);
kfree(full_path);
FreeXid(xid);
return rc;
}
static int
cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
{
int xid;
uid_t uid = NO_CHANGE_32;
gid_t gid = NO_CHANGE_32;
struct inode *inode = direntry->d_inode;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifsInodeInfo *cifsInode = CIFS_I(inode);
char *full_path = NULL;
int rc = -EACCES;
__u32 dosattr = 0;
__u64 mode = NO_CHANGE_64;
xid = GetXid();
cFYI(1, "setattr on file %s attrs->iavalid 0x%x",
direntry->d_name.name, attrs->ia_valid);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
attrs->ia_valid |= ATTR_FORCE;
rc = inode_change_ok(inode, attrs);
if (rc < 0) {
FreeXid(xid);
return rc;
}
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
rc = -ENOMEM;
FreeXid(xid);
return rc;
}
/*
* Attempt to flush data before changing attributes. We need to do
* this for ATTR_SIZE and ATTR_MTIME for sure, and if we change the
* ownership or mode then we may also need to do this. Here, we take
* the safe way out and just do the flush on all setattr requests. If
* the flush returns error, store it to report later and continue.
*
* BB: This should be smarter. Why bother flushing pages that
* will be truncated anyway? Also, should we error out here if
* the flush returns error?
*/
rc = filemap_write_and_wait(inode->i_mapping);
mapping_set_error(inode->i_mapping, rc);
rc = 0;
if (attrs->ia_valid & ATTR_SIZE) {
rc = cifs_set_file_size(inode, attrs, xid, full_path);
if (rc != 0)
goto cifs_setattr_exit;
}
if (attrs->ia_valid & ATTR_UID)
uid = attrs->ia_uid;
if (attrs->ia_valid & ATTR_GID)
gid = attrs->ia_gid;
#ifdef CONFIG_CIFS_ACL
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
if (uid != NO_CHANGE_32 || gid != NO_CHANGE_32) {
rc = id_mode_to_cifs_acl(inode, full_path, NO_CHANGE_64,
uid, gid);
if (rc) {
cFYI(1, "%s: Setting id failed with error: %d",
__func__, rc);
goto cifs_setattr_exit;
}
}
} else
#endif /* CONFIG_CIFS_ACL */
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID))
attrs->ia_valid &= ~(ATTR_UID | ATTR_GID);
/* skip mode change if it's just for clearing setuid/setgid */
if (attrs->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID))
attrs->ia_valid &= ~ATTR_MODE;
if (attrs->ia_valid & ATTR_MODE) {
mode = attrs->ia_mode;
rc = 0;
#ifdef CONFIG_CIFS_ACL
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
rc = id_mode_to_cifs_acl(inode, full_path, mode,
NO_CHANGE_32, NO_CHANGE_32);
if (rc) {
cFYI(1, "%s: Setting ACL failed with error: %d",
__func__, rc);
goto cifs_setattr_exit;
}
} else
#endif /* CONFIG_CIFS_ACL */
if (((mode & S_IWUGO) == 0) &&
(cifsInode->cifsAttrs & ATTR_READONLY) == 0) {
dosattr = cifsInode->cifsAttrs | ATTR_READONLY;
/* fix up mode if we're not using dynperm */
if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM) == 0)
attrs->ia_mode = inode->i_mode & ~S_IWUGO;
} else if ((mode & S_IWUGO) &&
(cifsInode->cifsAttrs & ATTR_READONLY)) {
dosattr = cifsInode->cifsAttrs & ~ATTR_READONLY;
/* Attributes of 0 are ignored */
if (dosattr == 0)
dosattr |= ATTR_NORMAL;
/* reset local inode permissions to normal */
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)) {
attrs->ia_mode &= ~(S_IALLUGO);
if (S_ISDIR(inode->i_mode))
attrs->ia_mode |=
cifs_sb->mnt_dir_mode;
else
attrs->ia_mode |=
cifs_sb->mnt_file_mode;
}
} else if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)) {
/* ignore mode change - ATTR_READONLY hasn't changed */
attrs->ia_valid &= ~ATTR_MODE;
}
}
if (attrs->ia_valid & (ATTR_MTIME|ATTR_ATIME|ATTR_CTIME) ||
((attrs->ia_valid & ATTR_MODE) && dosattr)) {
rc = cifs_set_file_info(inode, attrs, xid, full_path, dosattr);
/* BB: check for rc = -EOPNOTSUPP and switch to legacy mode */
/* Even if error on time set, no sense failing the call if
the server would set the time to a reasonable value anyway,
and this check ensures that we are not being called from
sys_utimes in which case we ought to fail the call back to
the user when the server rejects the call */
if ((rc) && (attrs->ia_valid &
(ATTR_MODE | ATTR_GID | ATTR_UID | ATTR_SIZE)))
rc = 0;
}
/* do not need local check to inode_check_ok since the server does
that */
if (rc)
goto cifs_setattr_exit;
if ((attrs->ia_valid & ATTR_SIZE) &&
attrs->ia_size != i_size_read(inode))
truncate_setsize(inode, attrs->ia_size);
setattr_copy(inode, attrs);
mark_inode_dirty(inode);
cifs_setattr_exit:
kfree(full_path);
FreeXid(xid);
return rc;
}
int
cifs_setattr(struct dentry *direntry, struct iattr *attrs)
{
struct inode *inode = direntry->d_inode;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifs_tcon *pTcon = cifs_sb_master_tcon(cifs_sb);
if (pTcon->unix_ext)
return cifs_setattr_unix(direntry, attrs);
return cifs_setattr_nounix(direntry, attrs);
/* BB: add cifs_setattr_legacy for really old servers */
}
#if 0
void cifs_delete_inode(struct inode *inode)
{
cFYI(1, "In cifs_delete_inode, inode = 0x%p", inode);
/* may have to add back in if and when safe distributed caching of
directories added e.g. via FindNotify */
}
#endif
| gpl-2.0 |
sub77-bkp/slim_kernel_samsung_matisse | arch/powerpc/platforms/52xx/mpc52xx_pic.c | 4572 | 16971 | /*
*
* Programmable Interrupt Controller functions for the Freescale MPC52xx.
*
* Copyright (C) 2008 Secret Lab Technologies Ltd.
* Copyright (C) 2006 bplan GmbH
* Copyright (C) 2004 Sylvain Munaut <tnt@246tNt.com>
* Copyright (C) 2003 Montavista Software, Inc
*
* Based on the code from the 2.4 kernel by
* Dale Farnsworth <dfarnsworth@mvista.com> and Kent Borg.
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*
*/
/*
* This is the device driver for the MPC5200 interrupt controller.
*
* hardware overview
* -----------------
* The MPC5200 interrupt controller groups the all interrupt sources into
* three groups called 'critical', 'main', and 'peripheral'. The critical
* group has 3 irqs, External IRQ0, slice timer 0 irq, and wake from deep
* sleep. Main group include the other 3 external IRQs, slice timer 1, RTC,
* gpios, and the general purpose timers. Peripheral group contains the
* remaining irq sources from all of the on-chip peripherals (PSCs, Ethernet,
* USB, DMA, etc).
*
* virqs
* -----
* The Linux IRQ subsystem requires that each irq source be assigned a
* system wide unique IRQ number starting at 1 (0 means no irq). Since
* systems can have multiple interrupt controllers, the virtual IRQ (virq)
* infrastructure lets each interrupt controller to define a local set
* of IRQ numbers and the virq infrastructure maps those numbers into
* a unique range of the global IRQ# space.
*
* To define a range of virq numbers for this controller, this driver first
* assigns a number to each of the irq groups (called the level 1 or L1
* value). Within each group individual irq sources are also assigned a
* number, as defined by the MPC5200 user guide, and refers to it as the
* level 2 or L2 value. The virq number is determined by shifting up the
* L1 value by MPC52xx_IRQ_L1_OFFSET and ORing it with the L2 value.
*
* For example, the TMR0 interrupt is irq 9 in the main group. The
* virq for TMR0 is calculated by ((1 << MPC52xx_IRQ_L1_OFFSET) | 9).
*
* The observant reader will also notice that this driver defines a 4th
* interrupt group called 'bestcomm'. The bestcomm group isn't physically
* part of the MPC5200 interrupt controller, but it is used here to assign
* a separate virq number for each bestcomm task (since any of the 16
* bestcomm tasks can cause the bestcomm interrupt to be raised). When a
* bestcomm interrupt occurs (peripheral group, irq 0) this driver determines
* which task needs servicing and returns the irq number for that task. This
* allows drivers which use bestcomm to define their own interrupt handlers.
*
* irq_chip structures
* -------------------
* For actually manipulating IRQs (masking, enabling, clearing, etc) this
* driver defines four separate 'irq_chip' structures, one for the main
* group, one for the peripherals group, one for the bestcomm group and one
* for external interrupts. The irq_chip structures provide the hooks needed
* to manipulate each IRQ source, and since each group is has a separate set
* of registers for controlling the irq, it makes sense to divide up the
* hooks along those lines.
*
* You'll notice that there is not an irq_chip for the critical group and
* you'll also notice that there is an irq_chip defined for external
* interrupts even though there is no external interrupt group. The reason
* for this is that the four external interrupts are all managed with the same
* register even though one of the external IRQs is in the critical group and
* the other three are in the main group. For this reason it makes sense for
* the 4 external irqs to be managed using a separate set of hooks. The
* reason there is no crit irq_chip is that of the 3 irqs in the critical
* group, only external interrupt is actually support at this time by this
* driver and since external interrupt is the only one used, it can just
* be directed to make use of the external irq irq_chip.
*
* device tree bindings
* --------------------
* The device tree bindings for this controller reflect the two level
* organization of irqs in the device. #interrupt-cells = <3> where the
* first cell is the group number [0..3], the second cell is the irq
* number in the group, and the third cell is the sense type (level/edge).
* For reference, the following is a list of the interrupt property values
* associated with external interrupt sources on the MPC5200 (just because
* it is non-obvious to determine what the interrupts property should be
* when reading the mpc5200 manual and it is a frequently asked question).
*
* External interrupts:
* <0 0 n> external irq0, n is sense (n=0: level high,
* <1 1 n> external irq1, n is sense n=1: edge rising,
* <1 2 n> external irq2, n is sense n=2: edge falling,
* <1 3 n> external irq3, n is sense n=3: level low)
*/
#undef DEBUG
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/of.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/mpc52xx.h>
/* HW IRQ mapping */
#define MPC52xx_IRQ_L1_CRIT (0)
#define MPC52xx_IRQ_L1_MAIN (1)
#define MPC52xx_IRQ_L1_PERP (2)
#define MPC52xx_IRQ_L1_SDMA (3)
#define MPC52xx_IRQ_L1_OFFSET (6)
#define MPC52xx_IRQ_L1_MASK (0x00c0)
#define MPC52xx_IRQ_L2_MASK (0x003f)
#define MPC52xx_IRQ_HIGHTESTHWIRQ (0xd0)
/* MPC5200 device tree match tables */
static struct of_device_id mpc52xx_pic_ids[] __initdata = {
{ .compatible = "fsl,mpc5200-pic", },
{ .compatible = "mpc5200-pic", },
{}
};
static struct of_device_id mpc52xx_sdma_ids[] __initdata = {
{ .compatible = "fsl,mpc5200-bestcomm", },
{ .compatible = "mpc5200-bestcomm", },
{}
};
static struct mpc52xx_intr __iomem *intr;
static struct mpc52xx_sdma __iomem *sdma;
static struct irq_domain *mpc52xx_irqhost = NULL;
static unsigned char mpc52xx_map_senses[4] = {
IRQ_TYPE_LEVEL_HIGH,
IRQ_TYPE_EDGE_RISING,
IRQ_TYPE_EDGE_FALLING,
IRQ_TYPE_LEVEL_LOW,
};
/* Utility functions */
static inline void io_be_setbit(u32 __iomem *addr, int bitno)
{
out_be32(addr, in_be32(addr) | (1 << bitno));
}
static inline void io_be_clrbit(u32 __iomem *addr, int bitno)
{
out_be32(addr, in_be32(addr) & ~(1 << bitno));
}
/*
* IRQ[0-3] interrupt irq_chip
*/
static void mpc52xx_extirq_mask(struct irq_data *d)
{
int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
io_be_clrbit(&intr->ctrl, 11 - l2irq);
}
static void mpc52xx_extirq_unmask(struct irq_data *d)
{
int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
io_be_setbit(&intr->ctrl, 11 - l2irq);
}
static void mpc52xx_extirq_ack(struct irq_data *d)
{
int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
io_be_setbit(&intr->ctrl, 27-l2irq);
}
static int mpc52xx_extirq_set_type(struct irq_data *d, unsigned int flow_type)
{
u32 ctrl_reg, type;
int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
void *handler = handle_level_irq;
pr_debug("%s: irq=%x. l2=%d flow_type=%d\n", __func__,
(int) irqd_to_hwirq(d), l2irq, flow_type);
switch (flow_type) {
case IRQF_TRIGGER_HIGH: type = 0; break;
case IRQF_TRIGGER_RISING: type = 1; handler = handle_edge_irq; break;
case IRQF_TRIGGER_FALLING: type = 2; handler = handle_edge_irq; break;
case IRQF_TRIGGER_LOW: type = 3; break;
default:
type = 0;
}
ctrl_reg = in_be32(&intr->ctrl);
ctrl_reg &= ~(0x3 << (22 - (l2irq * 2)));
ctrl_reg |= (type << (22 - (l2irq * 2)));
out_be32(&intr->ctrl, ctrl_reg);
__irq_set_handler_locked(d->irq, handler);
return 0;
}
static struct irq_chip mpc52xx_extirq_irqchip = {
.name = "MPC52xx External",
.irq_mask = mpc52xx_extirq_mask,
.irq_unmask = mpc52xx_extirq_unmask,
.irq_ack = mpc52xx_extirq_ack,
.irq_set_type = mpc52xx_extirq_set_type,
};
/*
* Main interrupt irq_chip
*/
static int mpc52xx_null_set_type(struct irq_data *d, unsigned int flow_type)
{
return 0; /* Do nothing so that the sense mask will get updated */
}
static void mpc52xx_main_mask(struct irq_data *d)
{
int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
io_be_setbit(&intr->main_mask, 16 - l2irq);
}
static void mpc52xx_main_unmask(struct irq_data *d)
{
int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
io_be_clrbit(&intr->main_mask, 16 - l2irq);
}
static struct irq_chip mpc52xx_main_irqchip = {
.name = "MPC52xx Main",
.irq_mask = mpc52xx_main_mask,
.irq_mask_ack = mpc52xx_main_mask,
.irq_unmask = mpc52xx_main_unmask,
.irq_set_type = mpc52xx_null_set_type,
};
/*
* Peripherals interrupt irq_chip
*/
static void mpc52xx_periph_mask(struct irq_data *d)
{
int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
io_be_setbit(&intr->per_mask, 31 - l2irq);
}
static void mpc52xx_periph_unmask(struct irq_data *d)
{
int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
io_be_clrbit(&intr->per_mask, 31 - l2irq);
}
static struct irq_chip mpc52xx_periph_irqchip = {
.name = "MPC52xx Peripherals",
.irq_mask = mpc52xx_periph_mask,
.irq_mask_ack = mpc52xx_periph_mask,
.irq_unmask = mpc52xx_periph_unmask,
.irq_set_type = mpc52xx_null_set_type,
};
/*
* SDMA interrupt irq_chip
*/
static void mpc52xx_sdma_mask(struct irq_data *d)
{
int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
io_be_setbit(&sdma->IntMask, l2irq);
}
static void mpc52xx_sdma_unmask(struct irq_data *d)
{
int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
io_be_clrbit(&sdma->IntMask, l2irq);
}
static void mpc52xx_sdma_ack(struct irq_data *d)
{
int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK;
out_be32(&sdma->IntPend, 1 << l2irq);
}
static struct irq_chip mpc52xx_sdma_irqchip = {
.name = "MPC52xx SDMA",
.irq_mask = mpc52xx_sdma_mask,
.irq_unmask = mpc52xx_sdma_unmask,
.irq_ack = mpc52xx_sdma_ack,
.irq_set_type = mpc52xx_null_set_type,
};
/**
* mpc52xx_is_extirq - Returns true if hwirq number is for an external IRQ
*/
static int mpc52xx_is_extirq(int l1, int l2)
{
return ((l1 == 0) && (l2 == 0)) ||
((l1 == 1) && (l2 >= 1) && (l2 <= 3));
}
/**
* mpc52xx_irqhost_xlate - translate virq# from device tree interrupts property
*/
static int mpc52xx_irqhost_xlate(struct irq_domain *h, struct device_node *ct,
const u32 *intspec, unsigned int intsize,
irq_hw_number_t *out_hwirq,
unsigned int *out_flags)
{
int intrvect_l1;
int intrvect_l2;
int intrvect_type;
int intrvect_linux;
if (intsize != 3)
return -1;
intrvect_l1 = (int)intspec[0];
intrvect_l2 = (int)intspec[1];
intrvect_type = (int)intspec[2] & 0x3;
intrvect_linux = (intrvect_l1 << MPC52xx_IRQ_L1_OFFSET) &
MPC52xx_IRQ_L1_MASK;
intrvect_linux |= intrvect_l2 & MPC52xx_IRQ_L2_MASK;
*out_hwirq = intrvect_linux;
*out_flags = IRQ_TYPE_LEVEL_LOW;
if (mpc52xx_is_extirq(intrvect_l1, intrvect_l2))
*out_flags = mpc52xx_map_senses[intrvect_type];
pr_debug("return %x, l1=%d, l2=%d\n", intrvect_linux, intrvect_l1,
intrvect_l2);
return 0;
}
/**
* mpc52xx_irqhost_map - Hook to map from virq to an irq_chip structure
*/
static int mpc52xx_irqhost_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t irq)
{
int l1irq;
int l2irq;
struct irq_chip *irqchip;
void *hndlr;
int type;
u32 reg;
l1irq = (irq & MPC52xx_IRQ_L1_MASK) >> MPC52xx_IRQ_L1_OFFSET;
l2irq = irq & MPC52xx_IRQ_L2_MASK;
/*
* External IRQs are handled differently by the hardware so they are
* handled by a dedicated irq_chip structure.
*/
if (mpc52xx_is_extirq(l1irq, l2irq)) {
reg = in_be32(&intr->ctrl);
type = mpc52xx_map_senses[(reg >> (22 - l2irq * 2)) & 0x3];
if ((type == IRQ_TYPE_EDGE_FALLING) ||
(type == IRQ_TYPE_EDGE_RISING))
hndlr = handle_edge_irq;
else
hndlr = handle_level_irq;
irq_set_chip_and_handler(virq, &mpc52xx_extirq_irqchip, hndlr);
pr_debug("%s: External IRQ%i virq=%x, hw=%x. type=%x\n",
__func__, l2irq, virq, (int)irq, type);
return 0;
}
/* It is an internal SOC irq. Choose the correct irq_chip */
switch (l1irq) {
case MPC52xx_IRQ_L1_MAIN: irqchip = &mpc52xx_main_irqchip; break;
case MPC52xx_IRQ_L1_PERP: irqchip = &mpc52xx_periph_irqchip; break;
case MPC52xx_IRQ_L1_SDMA: irqchip = &mpc52xx_sdma_irqchip; break;
default:
pr_err("%s: invalid irq: virq=%i, l1=%i, l2=%i\n",
__func__, virq, l1irq, l2irq);
return -EINVAL;
}
irq_set_chip_and_handler(virq, irqchip, handle_level_irq);
pr_debug("%s: virq=%x, l1=%i, l2=%i\n", __func__, virq, l1irq, l2irq);
return 0;
}
static const struct irq_domain_ops mpc52xx_irqhost_ops = {
.xlate = mpc52xx_irqhost_xlate,
.map = mpc52xx_irqhost_map,
};
/**
* mpc52xx_init_irq - Initialize and register with the virq subsystem
*
* Hook for setting up IRQs on an mpc5200 system. A pointer to this function
* is to be put into the machine definition structure.
*
* This function searches the device tree for an MPC5200 interrupt controller,
* initializes it, and registers it with the virq subsystem.
*/
void __init mpc52xx_init_irq(void)
{
u32 intr_ctrl;
struct device_node *picnode;
struct device_node *np;
/* Remap the necessary zones */
picnode = of_find_matching_node(NULL, mpc52xx_pic_ids);
intr = of_iomap(picnode, 0);
if (!intr)
panic(__FILE__ ": find_and_map failed on 'mpc5200-pic'. "
"Check node !");
np = of_find_matching_node(NULL, mpc52xx_sdma_ids);
sdma = of_iomap(np, 0);
of_node_put(np);
if (!sdma)
panic(__FILE__ ": find_and_map failed on 'mpc5200-bestcomm'. "
"Check node !");
pr_debug("MPC5200 IRQ controller mapped to 0x%p\n", intr);
/* Disable all interrupt sources. */
out_be32(&sdma->IntPend, 0xffffffff); /* 1 means clear pending */
out_be32(&sdma->IntMask, 0xffffffff); /* 1 means disabled */
out_be32(&intr->per_mask, 0x7ffffc00); /* 1 means disabled */
out_be32(&intr->main_mask, 0x00010fff); /* 1 means disabled */
intr_ctrl = in_be32(&intr->ctrl);
intr_ctrl &= 0x00ff0000; /* Keeps IRQ[0-3] config */
intr_ctrl |= 0x0f000000 | /* clear IRQ 0-3 */
0x00001000 | /* MEE master external enable */
0x00000000 | /* 0 means disable IRQ 0-3 */
0x00000001; /* CEb route critical normally */
out_be32(&intr->ctrl, intr_ctrl);
/* Zero a bunch of the priority settings. */
out_be32(&intr->per_pri1, 0);
out_be32(&intr->per_pri2, 0);
out_be32(&intr->per_pri3, 0);
out_be32(&intr->main_pri1, 0);
out_be32(&intr->main_pri2, 0);
/*
* As last step, add an irq host to translate the real
* hw irq information provided by the ofw to linux virq
*/
mpc52xx_irqhost = irq_domain_add_linear(picnode,
MPC52xx_IRQ_HIGHTESTHWIRQ,
&mpc52xx_irqhost_ops, NULL);
if (!mpc52xx_irqhost)
panic(__FILE__ ": Cannot allocate the IRQ host\n");
irq_set_default_host(mpc52xx_irqhost);
pr_info("MPC52xx PIC is up and running!\n");
}
/**
* mpc52xx_get_irq - Get pending interrupt number hook function
*
* Called by the interrupt handler to determine what IRQ handler needs to be
* executed.
*
* Status of pending interrupts is determined by reading the encoded status
* register. The encoded status register has three fields; one for each of the
* types of interrupts defined by the controller - 'critical', 'main' and
* 'peripheral'. This function reads the status register and returns the IRQ
* number associated with the highest priority pending interrupt. 'Critical'
* interrupts have the highest priority, followed by 'main' interrupts, and
* then 'peripheral'.
*
* The mpc5200 interrupt controller can be configured to boost the priority
* of individual 'peripheral' interrupts. If this is the case then a special
* value will appear in either the crit or main fields indicating a high
* or medium priority peripheral irq has occurred.
*
* This function checks each of the 3 irq request fields and returns the
* first pending interrupt that it finds.
*
* This function also identifies a 4th type of interrupt; 'bestcomm'. Each
* bestcomm DMA task can raise the bestcomm peripheral interrupt. When this
* occurs at task-specific IRQ# is decoded so that each task can have its
* own IRQ handler.
*/
unsigned int mpc52xx_get_irq(void)
{
u32 status;
int irq;
status = in_be32(&intr->enc_status);
if (status & 0x00000400) { /* critical */
irq = (status >> 8) & 0x3;
if (irq == 2) /* high priority peripheral */
goto peripheral;
irq |= (MPC52xx_IRQ_L1_CRIT << MPC52xx_IRQ_L1_OFFSET);
} else if (status & 0x00200000) { /* main */
irq = (status >> 16) & 0x1f;
if (irq == 4) /* low priority peripheral */
goto peripheral;
irq |= (MPC52xx_IRQ_L1_MAIN << MPC52xx_IRQ_L1_OFFSET);
} else if (status & 0x20000000) { /* peripheral */
peripheral:
irq = (status >> 24) & 0x1f;
if (irq == 0) { /* bestcomm */
status = in_be32(&sdma->IntPend);
irq = ffs(status) - 1;
irq |= (MPC52xx_IRQ_L1_SDMA << MPC52xx_IRQ_L1_OFFSET);
} else {
irq |= (MPC52xx_IRQ_L1_PERP << MPC52xx_IRQ_L1_OFFSET);
}
} else {
return NO_IRQ;
}
return irq_linear_revmap(mpc52xx_irqhost, irq);
}
| gpl-2.0 |
EPDCenter/android_kernel_woxter_nimbus_98q | net/bluetooth/hidp/sock.c | 7644 | 7095 | /*
HIDP implementation for Linux Bluetooth stack (BlueZ).
Copyright (C) 2003-2004 Marcel Holtmann <marcel@holtmann.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation;
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
SOFTWARE IS DISCLAIMED.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/poll.h>
#include <linux/fcntl.h>
#include <linux/skbuff.h>
#include <linux/socket.h>
#include <linux/ioctl.h>
#include <linux/file.h>
#include <linux/init.h>
#include <linux/compat.h>
#include <linux/gfp.h>
#include <net/sock.h>
#include "hidp.h"
static int hidp_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
BT_DBG("sock %p sk %p", sock, sk);
if (!sk)
return 0;
sock_orphan(sk);
sock_put(sk);
return 0;
}
static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
void __user *argp = (void __user *) arg;
struct hidp_connadd_req ca;
struct hidp_conndel_req cd;
struct hidp_connlist_req cl;
struct hidp_conninfo ci;
struct socket *csock;
struct socket *isock;
int err;
BT_DBG("cmd %x arg %lx", cmd, arg);
switch (cmd) {
case HIDPCONNADD:
if (!capable(CAP_NET_ADMIN))
return -EACCES;
if (copy_from_user(&ca, argp, sizeof(ca)))
return -EFAULT;
csock = sockfd_lookup(ca.ctrl_sock, &err);
if (!csock)
return err;
isock = sockfd_lookup(ca.intr_sock, &err);
if (!isock) {
sockfd_put(csock);
return err;
}
if (csock->sk->sk_state != BT_CONNECTED ||
isock->sk->sk_state != BT_CONNECTED) {
sockfd_put(csock);
sockfd_put(isock);
return -EBADFD;
}
err = hidp_add_connection(&ca, csock, isock);
if (!err) {
if (copy_to_user(argp, &ca, sizeof(ca)))
err = -EFAULT;
} else {
sockfd_put(csock);
sockfd_put(isock);
}
return err;
case HIDPCONNDEL:
if (!capable(CAP_NET_ADMIN))
return -EACCES;
if (copy_from_user(&cd, argp, sizeof(cd)))
return -EFAULT;
return hidp_del_connection(&cd);
case HIDPGETCONNLIST:
if (copy_from_user(&cl, argp, sizeof(cl)))
return -EFAULT;
if (cl.cnum <= 0)
return -EINVAL;
err = hidp_get_connlist(&cl);
if (!err && copy_to_user(argp, &cl, sizeof(cl)))
return -EFAULT;
return err;
case HIDPGETCONNINFO:
if (copy_from_user(&ci, argp, sizeof(ci)))
return -EFAULT;
err = hidp_get_conninfo(&ci);
if (!err && copy_to_user(argp, &ci, sizeof(ci)))
return -EFAULT;
return err;
}
return -EINVAL;
}
#ifdef CONFIG_COMPAT
struct compat_hidp_connadd_req {
int ctrl_sock; /* Connected control socket */
int intr_sock; /* Connected interrupt socket */
__u16 parser;
__u16 rd_size;
compat_uptr_t rd_data;
__u8 country;
__u8 subclass;
__u16 vendor;
__u16 product;
__u16 version;
__u32 flags;
__u32 idle_to;
char name[128];
};
static int hidp_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
if (cmd == HIDPGETCONNLIST) {
struct hidp_connlist_req cl;
uint32_t uci;
int err;
if (get_user(cl.cnum, (uint32_t __user *) arg) ||
get_user(uci, (u32 __user *) (arg + 4)))
return -EFAULT;
cl.ci = compat_ptr(uci);
if (cl.cnum <= 0)
return -EINVAL;
err = hidp_get_connlist(&cl);
if (!err && put_user(cl.cnum, (uint32_t __user *) arg))
err = -EFAULT;
return err;
} else if (cmd == HIDPCONNADD) {
struct compat_hidp_connadd_req ca;
struct hidp_connadd_req __user *uca;
uca = compat_alloc_user_space(sizeof(*uca));
if (copy_from_user(&ca, (void __user *) arg, sizeof(ca)))
return -EFAULT;
if (put_user(ca.ctrl_sock, &uca->ctrl_sock) ||
put_user(ca.intr_sock, &uca->intr_sock) ||
put_user(ca.parser, &uca->parser) ||
put_user(ca.rd_size, &uca->rd_size) ||
put_user(compat_ptr(ca.rd_data), &uca->rd_data) ||
put_user(ca.country, &uca->country) ||
put_user(ca.subclass, &uca->subclass) ||
put_user(ca.vendor, &uca->vendor) ||
put_user(ca.product, &uca->product) ||
put_user(ca.version, &uca->version) ||
put_user(ca.flags, &uca->flags) ||
put_user(ca.idle_to, &uca->idle_to) ||
copy_to_user(&uca->name[0], &ca.name[0], 128))
return -EFAULT;
arg = (unsigned long) uca;
/* Fall through. We don't actually write back any _changes_
to the structure anyway, so there's no need to copy back
into the original compat version */
}
return hidp_sock_ioctl(sock, cmd, arg);
}
#endif
static const struct proto_ops hidp_sock_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
.release = hidp_sock_release,
.ioctl = hidp_sock_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = hidp_sock_compat_ioctl,
#endif
.bind = sock_no_bind,
.getname = sock_no_getname,
.sendmsg = sock_no_sendmsg,
.recvmsg = sock_no_recvmsg,
.poll = sock_no_poll,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = sock_no_setsockopt,
.getsockopt = sock_no_getsockopt,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.mmap = sock_no_mmap
};
static struct proto hidp_proto = {
.name = "HIDP",
.owner = THIS_MODULE,
.obj_size = sizeof(struct bt_sock)
};
static int hidp_sock_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
BT_DBG("sock %p", sock);
if (sock->type != SOCK_RAW)
return -ESOCKTNOSUPPORT;
sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hidp_proto);
if (!sk)
return -ENOMEM;
sock_init_data(sock, sk);
sock->ops = &hidp_sock_ops;
sock->state = SS_UNCONNECTED;
sock_reset_flag(sk, SOCK_ZAPPED);
sk->sk_protocol = protocol;
sk->sk_state = BT_OPEN;
return 0;
}
static const struct net_proto_family hidp_sock_family_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
.create = hidp_sock_create
};
int __init hidp_init_sockets(void)
{
int err;
err = proto_register(&hidp_proto, 0);
if (err < 0)
return err;
err = bt_sock_register(BTPROTO_HIDP, &hidp_sock_family_ops);
if (err < 0)
goto error;
return 0;
error:
BT_ERR("Can't register HIDP socket");
proto_unregister(&hidp_proto);
return err;
}
void __exit hidp_cleanup_sockets(void)
{
if (bt_sock_unregister(BTPROTO_HIDP) < 0)
BT_ERR("Can't unregister HIDP socket");
proto_unregister(&hidp_proto);
}
| gpl-2.0 |
SM-G920P/SM-G920P-Kernel | crypto/async_tx/async_pq.c | 7644 | 12825 | /*
* Copyright(c) 2007 Yuri Tikhonov <yur@emcraft.com>
* Copyright(c) 2009 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/raid/pq.h>
#include <linux/async_tx.h>
#include <linux/gfp.h>
/**
* pq_scribble_page - space to hold throwaway P or Q buffer for
* synchronous gen_syndrome
*/
static struct page *pq_scribble_page;
/* the struct page *blocks[] parameter passed to async_gen_syndrome()
* and async_syndrome_val() contains the 'P' destination address at
* blocks[disks-2] and the 'Q' destination address at blocks[disks-1]
*
* note: these are macros as they are used as lvalues
*/
#define P(b, d) (b[d-2])
#define Q(b, d) (b[d-1])
/**
* do_async_gen_syndrome - asynchronously calculate P and/or Q
*/
static __async_inline struct dma_async_tx_descriptor *
do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks,
const unsigned char *scfs, unsigned int offset, int disks,
size_t len, dma_addr_t *dma_src,
struct async_submit_ctl *submit)
{
struct dma_async_tx_descriptor *tx = NULL;
struct dma_device *dma = chan->device;
enum dma_ctrl_flags dma_flags = 0;
enum async_tx_flags flags_orig = submit->flags;
dma_async_tx_callback cb_fn_orig = submit->cb_fn;
dma_async_tx_callback cb_param_orig = submit->cb_param;
int src_cnt = disks - 2;
unsigned char coefs[src_cnt];
unsigned short pq_src_cnt;
dma_addr_t dma_dest[2];
int src_off = 0;
int idx;
int i;
/* DMAs use destinations as sources, so use BIDIRECTIONAL mapping */
if (P(blocks, disks))
dma_dest[0] = dma_map_page(dma->dev, P(blocks, disks), offset,
len, DMA_BIDIRECTIONAL);
else
dma_flags |= DMA_PREP_PQ_DISABLE_P;
if (Q(blocks, disks))
dma_dest[1] = dma_map_page(dma->dev, Q(blocks, disks), offset,
len, DMA_BIDIRECTIONAL);
else
dma_flags |= DMA_PREP_PQ_DISABLE_Q;
/* convert source addresses being careful to collapse 'empty'
* sources and update the coefficients accordingly
*/
for (i = 0, idx = 0; i < src_cnt; i++) {
if (blocks[i] == NULL)
continue;
dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len,
DMA_TO_DEVICE);
coefs[idx] = scfs[i];
idx++;
}
src_cnt = idx;
while (src_cnt > 0) {
submit->flags = flags_orig;
pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags));
/* if we are submitting additional pqs, leave the chain open,
* clear the callback parameters, and leave the destination
* buffers mapped
*/
if (src_cnt > pq_src_cnt) {
submit->flags &= ~ASYNC_TX_ACK;
submit->flags |= ASYNC_TX_FENCE;
dma_flags |= DMA_COMPL_SKIP_DEST_UNMAP;
submit->cb_fn = NULL;
submit->cb_param = NULL;
} else {
dma_flags &= ~DMA_COMPL_SKIP_DEST_UNMAP;
submit->cb_fn = cb_fn_orig;
submit->cb_param = cb_param_orig;
if (cb_fn_orig)
dma_flags |= DMA_PREP_INTERRUPT;
}
if (submit->flags & ASYNC_TX_FENCE)
dma_flags |= DMA_PREP_FENCE;
/* Since we have clobbered the src_list we are committed
* to doing this asynchronously. Drivers force forward
* progress in case they can not provide a descriptor
*/
for (;;) {
tx = dma->device_prep_dma_pq(chan, dma_dest,
&dma_src[src_off],
pq_src_cnt,
&coefs[src_off], len,
dma_flags);
if (likely(tx))
break;
async_tx_quiesce(&submit->depend_tx);
dma_async_issue_pending(chan);
}
async_tx_submit(chan, tx, submit);
submit->depend_tx = tx;
/* drop completed sources */
src_cnt -= pq_src_cnt;
src_off += pq_src_cnt;
dma_flags |= DMA_PREP_CONTINUE;
}
return tx;
}
/**
* do_sync_gen_syndrome - synchronously calculate a raid6 syndrome
*/
static void
do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
size_t len, struct async_submit_ctl *submit)
{
void **srcs;
int i;
if (submit->scribble)
srcs = submit->scribble;
else
srcs = (void **) blocks;
for (i = 0; i < disks; i++) {
if (blocks[i] == NULL) {
BUG_ON(i > disks - 3); /* P or Q can't be zero */
srcs[i] = (void*)raid6_empty_zero_page;
} else
srcs[i] = page_address(blocks[i]) + offset;
}
raid6_call.gen_syndrome(disks, len, srcs);
async_tx_sync_epilog(submit);
}
/**
* async_gen_syndrome - asynchronously calculate a raid6 syndrome
* @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
* @offset: common offset into each block (src and dest) to start transaction
* @disks: number of blocks (including missing P or Q, see below)
* @len: length of operation in bytes
* @submit: submission/completion modifiers
*
* General note: This routine assumes a field of GF(2^8) with a
* primitive polynomial of 0x11d and a generator of {02}.
*
* 'disks' note: callers can optionally omit either P or Q (but not
* both) from the calculation by setting blocks[disks-2] or
* blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <=
* PAGE_SIZE as a temporary buffer of this size is used in the
* synchronous path. 'disks' always accounts for both destination
* buffers. If any source buffers (blocks[i] where i < disks - 2) are
* set to NULL those buffers will be replaced with the raid6_zero_page
* in the synchronous path and omitted in the hardware-asynchronous
* path.
*
* 'blocks' note: if submit->scribble is NULL then the contents of
* 'blocks' may be overwritten to perform address conversions
* (dma_map_page() or page_address()).
*/
struct dma_async_tx_descriptor *
async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
size_t len, struct async_submit_ctl *submit)
{
int src_cnt = disks - 2;
struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
&P(blocks, disks), 2,
blocks, src_cnt, len);
struct dma_device *device = chan ? chan->device : NULL;
dma_addr_t *dma_src = NULL;
BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks)));
if (submit->scribble)
dma_src = submit->scribble;
else if (sizeof(dma_addr_t) <= sizeof(struct page *))
dma_src = (dma_addr_t *) blocks;
if (dma_src && device &&
(src_cnt <= dma_maxpq(device, 0) ||
dma_maxpq(device, DMA_PREP_CONTINUE) > 0) &&
is_dma_pq_aligned(device, offset, 0, len)) {
/* run the p+q asynchronously */
pr_debug("%s: (async) disks: %d len: %zu\n",
__func__, disks, len);
return do_async_gen_syndrome(chan, blocks, raid6_gfexp, offset,
disks, len, dma_src, submit);
}
/* run the pq synchronously */
pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len);
/* wait for any prerequisite operations */
async_tx_quiesce(&submit->depend_tx);
if (!P(blocks, disks)) {
P(blocks, disks) = pq_scribble_page;
BUG_ON(len + offset > PAGE_SIZE);
}
if (!Q(blocks, disks)) {
Q(blocks, disks) = pq_scribble_page;
BUG_ON(len + offset > PAGE_SIZE);
}
do_sync_gen_syndrome(blocks, offset, disks, len, submit);
return NULL;
}
EXPORT_SYMBOL_GPL(async_gen_syndrome);
static inline struct dma_chan *
pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len)
{
#ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
return NULL;
#endif
return async_tx_find_channel(submit, DMA_PQ_VAL, NULL, 0, blocks,
disks, len);
}
/**
* async_syndrome_val - asynchronously validate a raid6 syndrome
* @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
* @offset: common offset into each block (src and dest) to start transaction
* @disks: number of blocks (including missing P or Q, see below)
* @len: length of operation in bytes
* @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set
* @spare: temporary result buffer for the synchronous case
* @submit: submission / completion modifiers
*
* The same notes from async_gen_syndrome apply to the 'blocks',
* and 'disks' parameters of this routine. The synchronous path
* requires a temporary result buffer and submit->scribble to be
* specified.
*/
struct dma_async_tx_descriptor *
async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
size_t len, enum sum_check_flags *pqres, struct page *spare,
struct async_submit_ctl *submit)
{
struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
struct dma_device *device = chan ? chan->device : NULL;
struct dma_async_tx_descriptor *tx;
unsigned char coefs[disks-2];
enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
dma_addr_t *dma_src = NULL;
int src_cnt = 0;
BUG_ON(disks < 4);
if (submit->scribble)
dma_src = submit->scribble;
else if (sizeof(dma_addr_t) <= sizeof(struct page *))
dma_src = (dma_addr_t *) blocks;
if (dma_src && device && disks <= dma_maxpq(device, 0) &&
is_dma_pq_aligned(device, offset, 0, len)) {
struct device *dev = device->dev;
dma_addr_t *pq = &dma_src[disks-2];
int i;
pr_debug("%s: (async) disks: %d len: %zu\n",
__func__, disks, len);
if (!P(blocks, disks))
dma_flags |= DMA_PREP_PQ_DISABLE_P;
else
pq[0] = dma_map_page(dev, P(blocks, disks),
offset, len,
DMA_TO_DEVICE);
if (!Q(blocks, disks))
dma_flags |= DMA_PREP_PQ_DISABLE_Q;
else
pq[1] = dma_map_page(dev, Q(blocks, disks),
offset, len,
DMA_TO_DEVICE);
if (submit->flags & ASYNC_TX_FENCE)
dma_flags |= DMA_PREP_FENCE;
for (i = 0; i < disks-2; i++)
if (likely(blocks[i])) {
dma_src[src_cnt] = dma_map_page(dev, blocks[i],
offset, len,
DMA_TO_DEVICE);
coefs[src_cnt] = raid6_gfexp[i];
src_cnt++;
}
for (;;) {
tx = device->device_prep_dma_pq_val(chan, pq, dma_src,
src_cnt,
coefs,
len, pqres,
dma_flags);
if (likely(tx))
break;
async_tx_quiesce(&submit->depend_tx);
dma_async_issue_pending(chan);
}
async_tx_submit(chan, tx, submit);
return tx;
} else {
struct page *p_src = P(blocks, disks);
struct page *q_src = Q(blocks, disks);
enum async_tx_flags flags_orig = submit->flags;
dma_async_tx_callback cb_fn_orig = submit->cb_fn;
void *scribble = submit->scribble;
void *cb_param_orig = submit->cb_param;
void *p, *q, *s;
pr_debug("%s: (sync) disks: %d len: %zu\n",
__func__, disks, len);
/* caller must provide a temporary result buffer and
* allow the input parameters to be preserved
*/
BUG_ON(!spare || !scribble);
/* wait for any prerequisite operations */
async_tx_quiesce(&submit->depend_tx);
/* recompute p and/or q into the temporary buffer and then
* check to see the result matches the current value
*/
tx = NULL;
*pqres = 0;
if (p_src) {
init_async_submit(submit, ASYNC_TX_XOR_ZERO_DST, NULL,
NULL, NULL, scribble);
tx = async_xor(spare, blocks, offset, disks-2, len, submit);
async_tx_quiesce(&tx);
p = page_address(p_src) + offset;
s = page_address(spare) + offset;
*pqres |= !!memcmp(p, s, len) << SUM_CHECK_P;
}
if (q_src) {
P(blocks, disks) = NULL;
Q(blocks, disks) = spare;
init_async_submit(submit, 0, NULL, NULL, NULL, scribble);
tx = async_gen_syndrome(blocks, offset, disks, len, submit);
async_tx_quiesce(&tx);
q = page_address(q_src) + offset;
s = page_address(spare) + offset;
*pqres |= !!memcmp(q, s, len) << SUM_CHECK_Q;
}
/* restore P, Q and submit */
P(blocks, disks) = p_src;
Q(blocks, disks) = q_src;
submit->cb_fn = cb_fn_orig;
submit->cb_param = cb_param_orig;
submit->flags = flags_orig;
async_tx_sync_epilog(submit);
return NULL;
}
}
EXPORT_SYMBOL_GPL(async_syndrome_val);
static int __init async_pq_init(void)
{
pq_scribble_page = alloc_page(GFP_KERNEL);
if (pq_scribble_page)
return 0;
pr_err("%s: failed to allocate required spare page\n", __func__);
return -ENOMEM;
}
static void __exit async_pq_exit(void)
{
put_page(pq_scribble_page);
}
module_init(async_pq_init);
module_exit(async_pq_exit);
MODULE_DESCRIPTION("asynchronous raid6 syndrome generation/validation");
MODULE_LICENSE("GPL");
| gpl-2.0 |
noobnl/android_kernel_samsung_d2-jb_2.5.1 | arch/mips/cavium-octeon/executive/cvmx-helper-errata.c | 13532 | 2592 | /***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/**
*
* Fixes and workaround for Octeon chip errata. This file
* contains functions called by cvmx-helper to workaround known
* chip errata. For the most part, code doesn't need to call
* these functions directly.
*
*/
#include <linux/module.h>
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-helper-jtag.h>
/**
* Due to errata G-720, the 2nd order CDR circuit on CN52XX pass
* 1 doesn't work properly. The following code disables 2nd order
* CDR for the specified QLM.
*
* @qlm: QLM to disable 2nd order CDR for.
*/
void __cvmx_helper_errata_qlm_disable_2nd_order_cdr(int qlm)
{
int lane;
cvmx_helper_qlm_jtag_init();
/* We need to load all four lanes of the QLM, a total of 1072 bits */
for (lane = 0; lane < 4; lane++) {
/*
* Each lane has 268 bits. We need to set
* cfg_cdr_incx<67:64> = 3 and cfg_cdr_secord<77> =
* 1. All other bits are zero. Bits go in LSB first,
* so start off with the zeros for bits <63:0>.
*/
cvmx_helper_qlm_jtag_shift_zeros(qlm, 63 - 0 + 1);
/* cfg_cdr_incx<67:64>=3 */
cvmx_helper_qlm_jtag_shift(qlm, 67 - 64 + 1, 3);
/* Zeros for bits <76:68> */
cvmx_helper_qlm_jtag_shift_zeros(qlm, 76 - 68 + 1);
/* cfg_cdr_secord<77>=1 */
cvmx_helper_qlm_jtag_shift(qlm, 77 - 77 + 1, 1);
/* Zeros for bits <267:78> */
cvmx_helper_qlm_jtag_shift_zeros(qlm, 267 - 78 + 1);
}
cvmx_helper_qlm_jtag_update(qlm);
}
EXPORT_SYMBOL(__cvmx_helper_errata_qlm_disable_2nd_order_cdr);
| gpl-2.0 |
omegamoon/rockchip-rk3188-generic | drivers/scsi/fnic/vnic_wq.c | 14044 | 4356 | /*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include "vnic_dev.h"
#include "vnic_wq.h"
static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
{
struct vnic_wq_buf *buf;
struct vnic_dev *vdev;
unsigned int i, j, count = wq->ring.desc_count;
unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
vdev = wq->vdev;
for (i = 0; i < blks; i++) {
wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC);
if (!wq->bufs[i]) {
printk(KERN_ERR "Failed to alloc wq_bufs\n");
return -ENOMEM;
}
}
for (i = 0; i < blks; i++) {
buf = wq->bufs[i];
for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES; j++) {
buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES + j;
buf->desc = (u8 *)wq->ring.descs +
wq->ring.desc_size * buf->index;
if (buf->index + 1 == count) {
buf->next = wq->bufs[0];
break;
} else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES) {
buf->next = wq->bufs[i + 1];
} else {
buf->next = buf + 1;
buf++;
}
}
}
wq->to_use = wq->to_clean = wq->bufs[0];
return 0;
}
void vnic_wq_free(struct vnic_wq *wq)
{
struct vnic_dev *vdev;
unsigned int i;
vdev = wq->vdev;
vnic_dev_free_desc_ring(vdev, &wq->ring);
for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) {
kfree(wq->bufs[i]);
wq->bufs[i] = NULL;
}
wq->ctrl = NULL;
}
int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
unsigned int desc_count, unsigned int desc_size)
{
int err;
wq->index = index;
wq->vdev = vdev;
wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
if (!wq->ctrl) {
printk(KERN_ERR "Failed to hook WQ[%d] resource\n", index);
return -EINVAL;
}
vnic_wq_disable(wq);
err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
if (err)
return err;
err = vnic_wq_alloc_bufs(wq);
if (err) {
vnic_wq_free(wq);
return err;
}
return 0;
}
void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset)
{
u64 paddr;
paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
writeq(paddr, &wq->ctrl->ring_base);
iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size);
iowrite32(0, &wq->ctrl->fetch_index);
iowrite32(0, &wq->ctrl->posted_index);
iowrite32(cq_index, &wq->ctrl->cq_index);
iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
iowrite32(0, &wq->ctrl->error_status);
}
unsigned int vnic_wq_error_status(struct vnic_wq *wq)
{
return ioread32(&wq->ctrl->error_status);
}
void vnic_wq_enable(struct vnic_wq *wq)
{
iowrite32(1, &wq->ctrl->enable);
}
int vnic_wq_disable(struct vnic_wq *wq)
{
unsigned int wait;
iowrite32(0, &wq->ctrl->enable);
/* Wait for HW to ACK disable request */
for (wait = 0; wait < 100; wait++) {
if (!(ioread32(&wq->ctrl->running)))
return 0;
udelay(1);
}
printk(KERN_ERR "Failed to disable WQ[%d]\n", wq->index);
return -ETIMEDOUT;
}
void vnic_wq_clean(struct vnic_wq *wq,
void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf))
{
struct vnic_wq_buf *buf;
BUG_ON(ioread32(&wq->ctrl->enable));
buf = wq->to_clean;
while (vnic_wq_desc_used(wq) > 0) {
(*buf_clean)(wq, buf);
buf = wq->to_clean = buf->next;
wq->ring.desc_avail++;
}
wq->to_use = wq->to_clean = wq->bufs[0];
iowrite32(0, &wq->ctrl->fetch_index);
iowrite32(0, &wq->ctrl->posted_index);
iowrite32(0, &wq->ctrl->error_status);
vnic_dev_clear_desc_ring(&wq->ring);
}
| gpl-2.0 |
zhang-xin/kdi | drivers/net/usb/net1080.c | 221 | 15748 | /*
* Net1080 based USB host-to-host cables
* Copyright (C) 2000-2005 by David Brownell
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
// #define DEBUG // error path messages, extra info
// #define VERBOSE // more; success messages
#include <linux/module.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/workqueue.h>
#include <linux/mii.h>
#include <linux/usb.h>
#include <linux/usb/usbnet.h>
#include <asm/unaligned.h>
/*
* Netchip 1080 driver ... http://www.netchip.com
* (Sept 2004: End-of-life announcement has been sent.)
* Used in (some) LapLink cables
*/
#define frame_errors data[1]
/*
* NetChip framing of ethernet packets, supporting additional error
* checks for links that may drop bulk packets from inside messages.
* Odd USB length == always short read for last usb packet.
* - nc_header
* - Ethernet header (14 bytes)
* - payload
* - (optional padding byte, if needed so length becomes odd)
* - nc_trailer
*
* This framing is to be avoided for non-NetChip devices.
*/
struct nc_header { // packed:
__le16 hdr_len; // sizeof nc_header (LE, all)
__le16 packet_len; // payload size (including ethhdr)
__le16 packet_id; // detects dropped packets
#define MIN_HEADER 6
// all else is optional, and must start with:
// __le16 vendorId; // from usb-if
// __le16 productId;
} __attribute__((__packed__));
#define PAD_BYTE ((unsigned char)0xAC)
struct nc_trailer {
__le16 packet_id;
} __attribute__((__packed__));
// packets may use FLAG_FRAMING_NC and optional pad
#define FRAMED_SIZE(mtu) (sizeof (struct nc_header) \
+ sizeof (struct ethhdr) \
+ (mtu) \
+ 1 \
+ sizeof (struct nc_trailer))
#define MIN_FRAMED FRAMED_SIZE(0)
/* packets _could_ be up to 64KB... */
#define NC_MAX_PACKET 32767
/*
* Zero means no timeout; else, how long a 64 byte bulk packet may be queued
* before the hardware drops it. If that's done, the driver will need to
* frame network packets to guard against the dropped USB packets. The win32
* driver sets this for both sides of the link.
*/
#define NC_READ_TTL_MS ((u8)255) // ms
/*
* We ignore most registers and EEPROM contents.
*/
#define REG_USBCTL ((u8)0x04)
#define REG_TTL ((u8)0x10)
#define REG_STATUS ((u8)0x11)
/*
* Vendor specific requests to read/write data
*/
#define REQUEST_REGISTER ((u8)0x10)
#define REQUEST_EEPROM ((u8)0x11)
static int
nc_vendor_read(struct usbnet *dev, u8 req, u8 regnum, u16 *retval_ptr)
{
int status = usb_control_msg(dev->udev,
usb_rcvctrlpipe(dev->udev, 0),
req,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, regnum,
retval_ptr, sizeof *retval_ptr,
USB_CTRL_GET_TIMEOUT);
if (status > 0)
status = 0;
if (!status)
le16_to_cpus(retval_ptr);
return status;
}
static inline int
nc_register_read(struct usbnet *dev, u8 regnum, u16 *retval_ptr)
{
return nc_vendor_read(dev, REQUEST_REGISTER, regnum, retval_ptr);
}
// no retval ... can become async, usable in_interrupt()
static void
nc_vendor_write(struct usbnet *dev, u8 req, u8 regnum, u16 value)
{
usb_control_msg(dev->udev,
usb_sndctrlpipe(dev->udev, 0),
req,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, regnum,
NULL, 0, // data is in setup packet
USB_CTRL_SET_TIMEOUT);
}
static inline void
nc_register_write(struct usbnet *dev, u8 regnum, u16 value)
{
nc_vendor_write(dev, REQUEST_REGISTER, regnum, value);
}
#if 0
static void nc_dump_registers(struct usbnet *dev)
{
u8 reg;
u16 *vp = kmalloc(sizeof (u16));
if (!vp) {
dbg("no memory?");
return;
}
dbg("%s registers:", dev->net->name);
for (reg = 0; reg < 0x20; reg++) {
int retval;
// reading some registers is trouble
if (reg >= 0x08 && reg <= 0xf)
continue;
if (reg >= 0x12 && reg <= 0x1e)
continue;
retval = nc_register_read(dev, reg, vp);
if (retval < 0)
dbg("%s reg [0x%x] ==> error %d",
dev->net->name, reg, retval);
else
dbg("%s reg [0x%x] = 0x%x",
dev->net->name, reg, *vp);
}
kfree(vp);
}
#endif
/*-------------------------------------------------------------------------*/
/*
* Control register
*/
#define USBCTL_WRITABLE_MASK 0x1f0f
// bits 15-13 reserved, r/o
#define USBCTL_ENABLE_LANG (1 << 12)
#define USBCTL_ENABLE_MFGR (1 << 11)
#define USBCTL_ENABLE_PROD (1 << 10)
#define USBCTL_ENABLE_SERIAL (1 << 9)
#define USBCTL_ENABLE_DEFAULTS (1 << 8)
// bits 7-4 reserved, r/o
#define USBCTL_FLUSH_OTHER (1 << 3)
#define USBCTL_FLUSH_THIS (1 << 2)
#define USBCTL_DISCONN_OTHER (1 << 1)
#define USBCTL_DISCONN_THIS (1 << 0)
static inline void nc_dump_usbctl(struct usbnet *dev, u16 usbctl)
{
if (!netif_msg_link(dev))
return;
devdbg(dev, "net1080 %s-%s usbctl 0x%x:%s%s%s%s%s;"
" this%s%s;"
" other%s%s; r/o 0x%x",
dev->udev->bus->bus_name, dev->udev->devpath,
usbctl,
(usbctl & USBCTL_ENABLE_LANG) ? " lang" : "",
(usbctl & USBCTL_ENABLE_MFGR) ? " mfgr" : "",
(usbctl & USBCTL_ENABLE_PROD) ? " prod" : "",
(usbctl & USBCTL_ENABLE_SERIAL) ? " serial" : "",
(usbctl & USBCTL_ENABLE_DEFAULTS) ? " defaults" : "",
(usbctl & USBCTL_FLUSH_OTHER) ? " FLUSH" : "",
(usbctl & USBCTL_DISCONN_OTHER) ? " DIS" : "",
(usbctl & USBCTL_FLUSH_THIS) ? " FLUSH" : "",
(usbctl & USBCTL_DISCONN_THIS) ? " DIS" : "",
usbctl & ~USBCTL_WRITABLE_MASK
);
}
/*-------------------------------------------------------------------------*/
/*
* Status register
*/
#define STATUS_PORT_A (1 << 15)
#define STATUS_CONN_OTHER (1 << 14)
#define STATUS_SUSPEND_OTHER (1 << 13)
#define STATUS_MAILBOX_OTHER (1 << 12)
#define STATUS_PACKETS_OTHER(n) (((n) >> 8) & 0x03)
#define STATUS_CONN_THIS (1 << 6)
#define STATUS_SUSPEND_THIS (1 << 5)
#define STATUS_MAILBOX_THIS (1 << 4)
#define STATUS_PACKETS_THIS(n) (((n) >> 0) & 0x03)
#define STATUS_UNSPEC_MASK 0x0c8c
#define STATUS_NOISE_MASK ((u16)~(0x0303|STATUS_UNSPEC_MASK))
static inline void nc_dump_status(struct usbnet *dev, u16 status)
{
if (!netif_msg_link(dev))
return;
devdbg(dev, "net1080 %s-%s status 0x%x:"
" this (%c) PKT=%d%s%s%s;"
" other PKT=%d%s%s%s; unspec 0x%x",
dev->udev->bus->bus_name, dev->udev->devpath,
status,
// XXX the packet counts don't seem right
// (1 at reset, not 0); maybe UNSPEC too
(status & STATUS_PORT_A) ? 'A' : 'B',
STATUS_PACKETS_THIS(status),
(status & STATUS_CONN_THIS) ? " CON" : "",
(status & STATUS_SUSPEND_THIS) ? " SUS" : "",
(status & STATUS_MAILBOX_THIS) ? " MBOX" : "",
STATUS_PACKETS_OTHER(status),
(status & STATUS_CONN_OTHER) ? " CON" : "",
(status & STATUS_SUSPEND_OTHER) ? " SUS" : "",
(status & STATUS_MAILBOX_OTHER) ? " MBOX" : "",
status & STATUS_UNSPEC_MASK
);
}
/*-------------------------------------------------------------------------*/
/*
* TTL register
*/
#define TTL_THIS(ttl) (0x00ff & ttl)
#define TTL_OTHER(ttl) (0x00ff & (ttl >> 8))
#define MK_TTL(this,other) ((u16)(((other)<<8)|(0x00ff&(this))))
static inline void nc_dump_ttl(struct usbnet *dev, u16 ttl)
{
if (netif_msg_link(dev))
devdbg(dev, "net1080 %s-%s ttl 0x%x this = %d, other = %d",
dev->udev->bus->bus_name, dev->udev->devpath,
ttl, TTL_THIS(ttl), TTL_OTHER(ttl));
}
/*-------------------------------------------------------------------------*/
static int net1080_reset(struct usbnet *dev)
{
u16 usbctl, status, ttl;
u16 *vp = kmalloc(sizeof (u16), GFP_KERNEL);
int retval;
if (!vp)
return -ENOMEM;
// nc_dump_registers(dev);
if ((retval = nc_register_read(dev, REG_STATUS, vp)) < 0) {
dbg("can't read %s-%s status: %d",
dev->udev->bus->bus_name, dev->udev->devpath, retval);
goto done;
}
status = *vp;
nc_dump_status(dev, status);
if ((retval = nc_register_read(dev, REG_USBCTL, vp)) < 0) {
dbg("can't read USBCTL, %d", retval);
goto done;
}
usbctl = *vp;
nc_dump_usbctl(dev, usbctl);
nc_register_write(dev, REG_USBCTL,
USBCTL_FLUSH_THIS | USBCTL_FLUSH_OTHER);
if ((retval = nc_register_read(dev, REG_TTL, vp)) < 0) {
dbg("can't read TTL, %d", retval);
goto done;
}
ttl = *vp;
// nc_dump_ttl(dev, ttl);
nc_register_write(dev, REG_TTL,
MK_TTL(NC_READ_TTL_MS, TTL_OTHER(ttl)) );
dbg("%s: assigned TTL, %d ms", dev->net->name, NC_READ_TTL_MS);
if (netif_msg_link(dev))
devinfo(dev, "port %c, peer %sconnected",
(status & STATUS_PORT_A) ? 'A' : 'B',
(status & STATUS_CONN_OTHER) ? "" : "dis"
);
retval = 0;
done:
kfree(vp);
return retval;
}
static int net1080_check_connect(struct usbnet *dev)
{
int retval;
u16 status;
u16 *vp = kmalloc(sizeof (u16), GFP_KERNEL);
if (!vp)
return -ENOMEM;
retval = nc_register_read(dev, REG_STATUS, vp);
status = *vp;
kfree(vp);
if (retval != 0) {
dbg("%s net1080_check_conn read - %d", dev->net->name, retval);
return retval;
}
if ((status & STATUS_CONN_OTHER) != STATUS_CONN_OTHER)
return -ENOLINK;
return 0;
}
static void nc_flush_complete(struct urb *urb)
{
kfree(urb->context);
usb_free_urb(urb);
}
static void nc_ensure_sync(struct usbnet *dev)
{
dev->frame_errors++;
if (dev->frame_errors > 5) {
struct urb *urb;
struct usb_ctrlrequest *req;
int status;
/* Send a flush */
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb)
return;
req = kmalloc(sizeof *req, GFP_ATOMIC);
if (!req) {
usb_free_urb(urb);
return;
}
req->bRequestType = USB_DIR_OUT
| USB_TYPE_VENDOR
| USB_RECIP_DEVICE;
req->bRequest = REQUEST_REGISTER;
req->wValue = cpu_to_le16(USBCTL_FLUSH_THIS
| USBCTL_FLUSH_OTHER);
req->wIndex = cpu_to_le16(REG_USBCTL);
req->wLength = cpu_to_le16(0);
/* queue an async control request, we don't need
* to do anything when it finishes except clean up.
*/
usb_fill_control_urb(urb, dev->udev,
usb_sndctrlpipe(dev->udev, 0),
(unsigned char *) req,
NULL, 0,
nc_flush_complete, req);
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status) {
kfree(req);
usb_free_urb(urb);
return;
}
if (netif_msg_rx_err(dev))
devdbg(dev, "flush net1080; too many framing errors");
dev->frame_errors = 0;
}
}
static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
struct nc_header *header;
struct nc_trailer *trailer;
u16 hdr_len, packet_len;
if (!(skb->len & 0x01)) {
#ifdef DEBUG
struct net_device *net = dev->net;
dbg("rx framesize %d range %d..%d mtu %d", skb->len,
net->hard_header_len, dev->hard_mtu, net->mtu);
#endif
dev->stats.rx_frame_errors++;
nc_ensure_sync(dev);
return 0;
}
header = (struct nc_header *) skb->data;
hdr_len = le16_to_cpup(&header->hdr_len);
packet_len = le16_to_cpup(&header->packet_len);
if (FRAMED_SIZE(packet_len) > NC_MAX_PACKET) {
dev->stats.rx_frame_errors++;
dbg("packet too big, %d", packet_len);
nc_ensure_sync(dev);
return 0;
} else if (hdr_len < MIN_HEADER) {
dev->stats.rx_frame_errors++;
dbg("header too short, %d", hdr_len);
nc_ensure_sync(dev);
return 0;
} else if (hdr_len > MIN_HEADER) {
// out of band data for us?
dbg("header OOB, %d bytes", hdr_len - MIN_HEADER);
nc_ensure_sync(dev);
// switch (vendor/product ids) { ... }
}
skb_pull(skb, hdr_len);
trailer = (struct nc_trailer *)
(skb->data + skb->len - sizeof *trailer);
skb_trim(skb, skb->len - sizeof *trailer);
if ((packet_len & 0x01) == 0) {
if (skb->data [packet_len] != PAD_BYTE) {
dev->stats.rx_frame_errors++;
dbg("bad pad");
return 0;
}
skb_trim(skb, skb->len - 1);
}
if (skb->len != packet_len) {
dev->stats.rx_frame_errors++;
dbg("bad packet len %d (expected %d)",
skb->len, packet_len);
nc_ensure_sync(dev);
return 0;
}
if (header->packet_id != get_unaligned(&trailer->packet_id)) {
dev->stats.rx_fifo_errors++;
dbg("(2+ dropped) rx packet_id mismatch 0x%x 0x%x",
le16_to_cpu(header->packet_id),
le16_to_cpu(trailer->packet_id));
return 0;
}
#if 0
devdbg(dev, "frame <rx h %d p %d id %d", header->hdr_len,
header->packet_len, header->packet_id);
#endif
dev->frame_errors = 0;
return 1;
}
static struct sk_buff *
net1080_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
{
struct sk_buff *skb2;
struct nc_header *header = NULL;
struct nc_trailer *trailer = NULL;
int padlen = sizeof (struct nc_trailer);
int len = skb->len;
if (!((len + padlen + sizeof (struct nc_header)) & 0x01))
padlen++;
if (!skb_cloned(skb)) {
int headroom = skb_headroom(skb);
int tailroom = skb_tailroom(skb);
if (padlen <= tailroom &&
sizeof(struct nc_header) <= headroom)
/* There's enough head and tail room */
goto encapsulate;
if ((sizeof (struct nc_header) + padlen) <
(headroom + tailroom)) {
/* There's enough total room, so just readjust */
skb->data = memmove(skb->head
+ sizeof (struct nc_header),
skb->data, skb->len);
skb_set_tail_pointer(skb, len);
goto encapsulate;
}
}
/* Create a new skb to use with the correct size */
skb2 = skb_copy_expand(skb,
sizeof (struct nc_header),
padlen,
flags);
dev_kfree_skb_any(skb);
if (!skb2)
return skb2;
skb = skb2;
encapsulate:
/* header first */
header = (struct nc_header *) skb_push(skb, sizeof *header);
header->hdr_len = cpu_to_le16(sizeof (*header));
header->packet_len = cpu_to_le16(len);
header->packet_id = cpu_to_le16((u16)dev->xid++);
/* maybe pad; then trailer */
if (!((skb->len + sizeof *trailer) & 0x01))
*skb_put(skb, 1) = PAD_BYTE;
trailer = (struct nc_trailer *) skb_put(skb, sizeof *trailer);
put_unaligned(header->packet_id, &trailer->packet_id);
#if 0
devdbg(dev, "frame >tx h %d p %d id %d",
header->hdr_len, header->packet_len,
header->packet_id);
#endif
return skb;
}
static int net1080_bind(struct usbnet *dev, struct usb_interface *intf)
{
unsigned extra = sizeof (struct nc_header)
+ 1
+ sizeof (struct nc_trailer);
dev->net->hard_header_len += extra;
dev->rx_urb_size = dev->net->hard_header_len + dev->net->mtu;
dev->hard_mtu = NC_MAX_PACKET;
return usbnet_get_endpoints (dev, intf);
}
static const struct driver_info net1080_info = {
.description = "NetChip TurboCONNECT",
.flags = FLAG_FRAMING_NC,
.bind = net1080_bind,
.reset = net1080_reset,
.check_connect = net1080_check_connect,
.rx_fixup = net1080_rx_fixup,
.tx_fixup = net1080_tx_fixup,
};
static const struct usb_device_id products [] = {
{
USB_DEVICE(0x0525, 0x1080), // NetChip ref design
.driver_info = (unsigned long) &net1080_info,
}, {
USB_DEVICE(0x06D0, 0x0622), // Laplink Gold
.driver_info = (unsigned long) &net1080_info,
},
{ }, // END
};
MODULE_DEVICE_TABLE(usb, products);
static struct usb_driver net1080_driver = {
.name = "net1080",
.id_table = products,
.probe = usbnet_probe,
.disconnect = usbnet_disconnect,
.suspend = usbnet_suspend,
.resume = usbnet_resume,
};
static int __init net1080_init(void)
{
return usb_register(&net1080_driver);
}
module_init(net1080_init);
static void __exit net1080_exit(void)
{
usb_deregister(&net1080_driver);
}
module_exit(net1080_exit);
MODULE_AUTHOR("David Brownell");
MODULE_DESCRIPTION("NetChip 1080 based USB Host-to-Host Links");
MODULE_LICENSE("GPL");
| gpl-2.0 |
IllusionRom/android_kernel_lge_hammerhead | drivers/media/platform/msm/camera_v1/vx6953_reg.c | 1757 | 3642 | /* Copyright (c) 2010, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include "vx6953.h"
const struct reg_struct_init vx6953_reg_init[1] = {
{
10, /*REG = 0x0112 , 10 bit */
10, /*REG = 0x0113*/
9, /*REG = 0x0301 vt_pix_clk_div*/
4, /*REG = 0x0305 pre_pll_clk_div*/
133, /*REG = 0x0307 pll_multiplier*/
10, /*REG = 0x0309 op_pix_clk_div*/
0x08, /*REG = 0x3030*/
0x02, /*REG = 0x0111*/
0x01, /*REG = 0x0b00 ,lens shading off */
0x30, /*REG = 0x3001*/
0x33, /*REG = 0x3004*/
0x09, /*REG = 0x3007*/
0x1F, /*REG = 0x3016*/
0x03, /*REG = 0x301d*/
0x11, /*REG = 0x317E*/
0x09, /*REG = 0x317F*/
0x38, /*REG = 0x3400*/
0x00, /*REG_0x0b06*/
0x80, /*REG_0x0b07*/
0x01, /*REG_0x0b08*/
0x4F, /*REG_0x0b09*/
0x18, /*REG_0x0136*/
0x00, /*/REG_0x0137*/
0x20, /*REG = 0x0b83*/
0x90, /*REG = 0x0b84*/
0x20, /*REG = 0x0b85*/
0x80, /*REG = 0x0b88*/
0x00, /*REG = 0x0b89*/
0x00, /*REG = 0x0b8a*/
}
};
const struct reg_struct vx6953_reg_pat[2] = {
{/* Preview */
0x03, /*REG = 0x0202 coarse integration_time_hi*/
0xd0, /*REG = 0x0203 coarse_integration_time_lo*/
0xc0, /*REG = 0x0205 analogue_gain_code_global*/
0x03, /*REG = 0x0340 frame_length_lines_hi*/
0xf0, /*REG = 0x0341 frame_length_lines_lo*/
0x0b, /*REG = 0x0342 line_length_pck_hi*/
0x74, /*REG = 0x0343 line_length_pck_lo*/
0x03, /*REG = 0x3005*/
0x00, /*REG = 0x3010*/
0x01, /*REG = 0x3011*/
0x6a, /*REG = 0x301a*/
0x03, /*REG = 0x3035*/
0x2c, /*REG = 0x3036*/
0x00, /*REG = 0x3041*/
0x24, /*REG = 0x3042*/
0x81, /*REG = 0x3045*/
0x02, /*REG = 0x0b80 edof estimate*/
0x01, /*REG = 0x0900*/
0x22, /*REG = 0x0901*/
0x04, /*REG = 0x0902*/
0x03, /*REG = 0x0383*/
0x03, /*REG = 0x0387*/
0x05, /*REG = 0x034c*/
0x18, /*REG = 0x034d*/
0x03, /*REG = 0x034e*/
0xd4, /*REG = 0x034f*/
0x02, /*0x1716*/
0x04, /*0x1717*/
0x08, /*0x1718*/
0x2c, /*0x1719*/
0x01, /*0x3210*/
0x02, /*0x111*/
0x01, /*0x3410*/
0x01, /*0x3098*/
0x05, /*0x309D*/
0x02,
0x04,
},
{ /* Snapshot */
0x07,/*REG = 0x0202 coarse_integration_time_hi*/
0x00,/*REG = 0x0203 coarse_integration_time_lo*/
0xc0,/*REG = 0x0205 analogue_gain_code_global*/
0x07,/*REG = 0x0340 frame_length_lines_hi*/
0xd0,/*REG = 0x0341 frame_length_lines_lo*/
0x0b,/*REG = 0x0342 line_length_pck_hi*/
0x8c,/*REG = 0x0343 line_length_pck_lo*/
0x01,/*REG = 0x3005*/
0x00,/*REG = 0x3010*/
0x00,/*REG = 0x3011*/
0x55,/*REG = 0x301a*/
0x01,/*REG = 0x3035*/
0x23,/*REG = 0x3036*/
0x00,/*REG = 0x3041*/
0x24,/*REG = 0x3042*/
0xb7,/*REG = 0x3045*/
0x01,/*REG = 0x0b80 edof application*/
0x00,/*REG = 0x0900*/
0x00,/*REG = 0x0901*/
0x00,/*REG = 0x0902*/
0x01,/*REG = 0x0383*/
0x01,/*REG = 0x0387*/
0x0A,/*REG = 0x034c*/
0x30,/*REG = 0x034d*/
0x07,/*REG = 0x034e*/
0xA8,/*REG = 0x034f*/
0x02,/*0x1716*/
0x0d,/*0x1717*/
0x07,/*0x1718*/
0x7d,/*0x1719*/
0x01,/*0x3210*/
0x02,/*0x111*/
0x01,/*0x3410*/
0x01,/*0x3098*/
0x05, /*0x309D*/
0x02,
0x00,
}
};
struct vx6953_reg vx6953_regs = {
.reg_pat_init = &vx6953_reg_init[0],
.reg_pat = &vx6953_reg_pat[0],
};
| gpl-2.0 |
Split-Screen/android_kernel_samsung_trlte | drivers/gpu/drm/nouveau/core/engine/vp/nve0.c | 2269 | 3346 | /*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include <core/falcon.h>
#include <engine/vp.h>
struct nve0_vp_priv {
struct nouveau_falcon base;
};
/*******************************************************************************
* VP object classes
******************************************************************************/
static struct nouveau_oclass
nve0_vp_sclass[] = {
{ 0x95b2, &nouveau_object_ofuncs },
{},
};
/*******************************************************************************
* PVP context
******************************************************************************/
static struct nouveau_oclass
nve0_vp_cclass = {
.handle = NV_ENGCTX(VP, 0xe0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = _nouveau_falcon_context_ctor,
.dtor = _nouveau_falcon_context_dtor,
.init = _nouveau_falcon_context_init,
.fini = _nouveau_falcon_context_fini,
.rd32 = _nouveau_falcon_context_rd32,
.wr32 = _nouveau_falcon_context_wr32,
},
};
/*******************************************************************************
* PVP engine/subdev functions
******************************************************************************/
static int
nve0_vp_init(struct nouveau_object *object)
{
struct nve0_vp_priv *priv = (void *)object;
int ret;
ret = nouveau_falcon_init(&priv->base);
if (ret)
return ret;
nv_wr32(priv, 0x085010, 0x0000fff2);
nv_wr32(priv, 0x08501c, 0x0000fff2);
return 0;
}
static int
nve0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
{
struct nve0_vp_priv *priv;
int ret;
ret = nouveau_falcon_create(parent, engine, oclass, 0x085000, true,
"PVP", "vp", &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
nv_subdev(priv)->unit = 0x00020000;
nv_engine(priv)->cclass = &nve0_vp_cclass;
nv_engine(priv)->sclass = nve0_vp_sclass;
return 0;
}
struct nouveau_oclass
nve0_vp_oclass = {
.handle = NV_ENGINE(VP, 0xe0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nve0_vp_ctor,
.dtor = _nouveau_falcon_dtor,
.init = nve0_vp_init,
.fini = _nouveau_falcon_fini,
.rd32 = _nouveau_falcon_rd32,
.wr32 = _nouveau_falcon_wr32,
},
};
| gpl-2.0 |
AOKPSaber/kernel_samsung_p4 | drivers/scsi/qla4xxx/ql4_isr.c | 2781 | 30630 | /*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2010 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
#include "ql4_def.h"
#include "ql4_glbl.h"
#include "ql4_dbg.h"
#include "ql4_inline.h"
/**
* qla4xxx_copy_sense - copy sense data into cmd sense buffer
* @ha: Pointer to host adapter structure.
* @sts_entry: Pointer to status entry structure.
* @srb: Pointer to srb structure.
**/
static void qla4xxx_copy_sense(struct scsi_qla_host *ha,
struct status_entry *sts_entry,
struct srb *srb)
{
struct scsi_cmnd *cmd = srb->cmd;
uint16_t sense_len;
memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
sense_len = le16_to_cpu(sts_entry->senseDataByteCnt);
if (sense_len == 0) {
DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d:%d: %s:"
" sense len 0\n", ha->host_no,
cmd->device->channel, cmd->device->id,
cmd->device->lun, __func__));
ha->status_srb = NULL;
return;
}
/* Save total available sense length,
* not to exceed cmd's sense buffer size */
sense_len = min_t(uint16_t, sense_len, SCSI_SENSE_BUFFERSIZE);
srb->req_sense_ptr = cmd->sense_buffer;
srb->req_sense_len = sense_len;
/* Copy sense from sts_entry pkt */
sense_len = min_t(uint16_t, sense_len, IOCB_MAX_SENSEDATA_LEN);
memcpy(cmd->sense_buffer, sts_entry->senseData, sense_len);
DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: %s: sense key = %x, "
"ASL= %02x, ASC/ASCQ = %02x/%02x\n", ha->host_no,
cmd->device->channel, cmd->device->id,
cmd->device->lun, __func__,
sts_entry->senseData[2] & 0x0f,
sts_entry->senseData[7],
sts_entry->senseData[12],
sts_entry->senseData[13]));
DEBUG5(qla4xxx_dump_buffer(cmd->sense_buffer, sense_len));
srb->flags |= SRB_GOT_SENSE;
/* Update srb, in case a sts_cont pkt follows */
srb->req_sense_ptr += sense_len;
srb->req_sense_len -= sense_len;
if (srb->req_sense_len != 0)
ha->status_srb = srb;
else
ha->status_srb = NULL;
}
/**
* qla4xxx_status_cont_entry - Process a Status Continuations entry.
* @ha: SCSI driver HA context
* @sts_cont: Entry pointer
*
* Extended sense data.
*/
static void
qla4xxx_status_cont_entry(struct scsi_qla_host *ha,
struct status_cont_entry *sts_cont)
{
struct srb *srb = ha->status_srb;
struct scsi_cmnd *cmd;
uint16_t sense_len;
if (srb == NULL)
return;
cmd = srb->cmd;
if (cmd == NULL) {
DEBUG2(printk(KERN_INFO "scsi%ld: %s: Cmd already returned "
"back to OS srb=%p srb->state:%d\n", ha->host_no,
__func__, srb, srb->state));
ha->status_srb = NULL;
return;
}
/* Copy sense data. */
sense_len = min_t(uint16_t, srb->req_sense_len,
IOCB_MAX_EXT_SENSEDATA_LEN);
memcpy(srb->req_sense_ptr, sts_cont->ext_sense_data, sense_len);
DEBUG5(qla4xxx_dump_buffer(srb->req_sense_ptr, sense_len));
srb->req_sense_ptr += sense_len;
srb->req_sense_len -= sense_len;
/* Place command on done queue. */
if (srb->req_sense_len == 0) {
kref_put(&srb->srb_ref, qla4xxx_srb_compl);
ha->status_srb = NULL;
}
}
/**
* qla4xxx_status_entry - processes status IOCBs
* @ha: Pointer to host adapter structure.
* @sts_entry: Pointer to status entry structure.
**/
static void qla4xxx_status_entry(struct scsi_qla_host *ha,
struct status_entry *sts_entry)
{
uint8_t scsi_status;
struct scsi_cmnd *cmd;
struct srb *srb;
struct ddb_entry *ddb_entry;
uint32_t residual;
srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle));
if (!srb) {
DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Status Entry invalid "
"handle 0x%x, sp=%p. This cmd may have already "
"been completed.\n", ha->host_no, __func__,
le32_to_cpu(sts_entry->handle), srb));
ql4_printk(KERN_WARNING, ha, "%s invalid status entry:"
" handle=0x%0x\n", __func__, sts_entry->handle);
set_bit(DPC_RESET_HA, &ha->dpc_flags);
return;
}
cmd = srb->cmd;
if (cmd == NULL) {
DEBUG2(printk("scsi%ld: %s: Command already returned back to "
"OS pkt->handle=%d srb=%p srb->state:%d\n",
ha->host_no, __func__, sts_entry->handle,
srb, srb->state));
ql4_printk(KERN_WARNING, ha, "Command is NULL:"
" already returned to OS (srb=%p)\n", srb);
return;
}
ddb_entry = srb->ddb;
if (ddb_entry == NULL) {
cmd->result = DID_NO_CONNECT << 16;
goto status_entry_exit;
}
residual = le32_to_cpu(sts_entry->residualByteCnt);
/* Translate ISP error to a Linux SCSI error. */
scsi_status = sts_entry->scsiStatus;
switch (sts_entry->completionStatus) {
case SCS_COMPLETE:
if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) {
cmd->result = DID_ERROR << 16;
break;
}
if (sts_entry->iscsiFlags &ISCSI_FLAG_RESIDUAL_UNDER) {
scsi_set_resid(cmd, residual);
if (!scsi_status && ((scsi_bufflen(cmd) - residual) <
cmd->underflow)) {
cmd->result = DID_ERROR << 16;
DEBUG2(printk("scsi%ld:%d:%d:%d: %s: "
"Mid-layer Data underrun0, "
"xferlen = 0x%x, "
"residual = 0x%x\n", ha->host_no,
cmd->device->channel,
cmd->device->id,
cmd->device->lun, __func__,
scsi_bufflen(cmd), residual));
break;
}
}
cmd->result = DID_OK << 16 | scsi_status;
if (scsi_status != SCSI_CHECK_CONDITION)
break;
/* Copy Sense Data into sense buffer. */
qla4xxx_copy_sense(ha, sts_entry, srb);
break;
case SCS_INCOMPLETE:
/* Always set the status to DID_ERROR, since
* all conditions result in that status anyway */
cmd->result = DID_ERROR << 16;
break;
case SCS_RESET_OCCURRED:
DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Device RESET occurred\n",
ha->host_no, cmd->device->channel,
cmd->device->id, cmd->device->lun, __func__));
cmd->result = DID_RESET << 16;
break;
case SCS_ABORTED:
DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Abort occurred\n",
ha->host_no, cmd->device->channel,
cmd->device->id, cmd->device->lun, __func__));
cmd->result = DID_RESET << 16;
break;
case SCS_TIMEOUT:
DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: Timeout\n",
ha->host_no, cmd->device->channel,
cmd->device->id, cmd->device->lun));
cmd->result = DID_TRANSPORT_DISRUPTED << 16;
/*
* Mark device missing so that we won't continue to send
* I/O to this device. We should get a ddb state change
* AEN soon.
*/
if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
qla4xxx_mark_device_missing(ha, ddb_entry);
break;
case SCS_DATA_UNDERRUN:
case SCS_DATA_OVERRUN:
if ((sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) ||
(sts_entry->completionStatus == SCS_DATA_OVERRUN)) {
DEBUG2(printk("scsi%ld:%d:%d:%d: %s: " "Data overrun\n",
ha->host_no,
cmd->device->channel, cmd->device->id,
cmd->device->lun, __func__));
cmd->result = DID_ERROR << 16;
break;
}
scsi_set_resid(cmd, residual);
/*
* If there is scsi_status, it takes precedense over
* underflow condition.
*/
if (scsi_status != 0) {
cmd->result = DID_OK << 16 | scsi_status;
if (scsi_status != SCSI_CHECK_CONDITION)
break;
/* Copy Sense Data into sense buffer. */
qla4xxx_copy_sense(ha, sts_entry, srb);
} else {
/*
* If RISC reports underrun and target does not
* report it then we must have a lost frame, so
* tell upper layer to retry it by reporting a
* bus busy.
*/
if ((sts_entry->iscsiFlags &
ISCSI_FLAG_RESIDUAL_UNDER) == 0) {
cmd->result = DID_BUS_BUSY << 16;
} else if ((scsi_bufflen(cmd) - residual) <
cmd->underflow) {
/*
* Handle mid-layer underflow???
*
* For kernels less than 2.4, the driver must
* return an error if an underflow is detected.
* For kernels equal-to and above 2.4, the
* mid-layer will appearantly handle the
* underflow by detecting the residual count --
* unfortunately, we do not see where this is
* actually being done. In the interim, we
* will return DID_ERROR.
*/
DEBUG2(printk("scsi%ld:%d:%d:%d: %s: "
"Mid-layer Data underrun1, "
"xferlen = 0x%x, "
"residual = 0x%x\n", ha->host_no,
cmd->device->channel,
cmd->device->id,
cmd->device->lun, __func__,
scsi_bufflen(cmd), residual));
cmd->result = DID_ERROR << 16;
} else {
cmd->result = DID_OK << 16;
}
}
break;
case SCS_DEVICE_LOGGED_OUT:
case SCS_DEVICE_UNAVAILABLE:
DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: SCS_DEVICE "
"state: 0x%x\n", ha->host_no,
cmd->device->channel, cmd->device->id,
cmd->device->lun, sts_entry->completionStatus));
/*
* Mark device missing so that we won't continue to
* send I/O to this device. We should get a ddb
* state change AEN soon.
*/
if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
qla4xxx_mark_device_missing(ha, ddb_entry);
cmd->result = DID_TRANSPORT_DISRUPTED << 16;
break;
case SCS_QUEUE_FULL:
/*
* SCSI Mid-Layer handles device queue full
*/
cmd->result = DID_OK << 16 | sts_entry->scsiStatus;
DEBUG2(printk("scsi%ld:%d:%d: %s: QUEUE FULL detected "
"compl=%02x, scsi=%02x, state=%02x, iFlags=%02x,"
" iResp=%02x\n", ha->host_no, cmd->device->id,
cmd->device->lun, __func__,
sts_entry->completionStatus,
sts_entry->scsiStatus, sts_entry->state_flags,
sts_entry->iscsiFlags,
sts_entry->iscsiResponse));
break;
default:
cmd->result = DID_ERROR << 16;
break;
}
status_entry_exit:
/* complete the request, if not waiting for status_continuation pkt */
srb->cc_stat = sts_entry->completionStatus;
if (ha->status_srb == NULL)
kref_put(&srb->srb_ref, qla4xxx_srb_compl);
}
/**
* qla4xxx_process_response_queue - process response queue completions
* @ha: Pointer to host adapter structure.
*
* This routine process response queue completions in interrupt context.
* Hardware_lock locked upon entry
**/
void qla4xxx_process_response_queue(struct scsi_qla_host *ha)
{
uint32_t count = 0;
struct srb *srb = NULL;
struct status_entry *sts_entry;
/* Process all responses from response queue */
while ((ha->response_ptr->signature != RESPONSE_PROCESSED)) {
sts_entry = (struct status_entry *) ha->response_ptr;
count++;
/* Advance pointers for next entry */
if (ha->response_out == (RESPONSE_QUEUE_DEPTH - 1)) {
ha->response_out = 0;
ha->response_ptr = ha->response_ring;
} else {
ha->response_out++;
ha->response_ptr++;
}
/* process entry */
switch (sts_entry->hdr.entryType) {
case ET_STATUS:
/* Common status */
qla4xxx_status_entry(ha, sts_entry);
break;
case ET_PASSTHRU_STATUS:
break;
case ET_STATUS_CONTINUATION:
qla4xxx_status_cont_entry(ha,
(struct status_cont_entry *) sts_entry);
break;
case ET_COMMAND:
/* ISP device queue is full. Command not
* accepted by ISP. Queue command for
* later */
srb = qla4xxx_del_from_active_array(ha,
le32_to_cpu(sts_entry->
handle));
if (srb == NULL)
goto exit_prq_invalid_handle;
DEBUG2(printk("scsi%ld: %s: FW device queue full, "
"srb %p\n", ha->host_no, __func__, srb));
/* ETRY normally by sending it back with
* DID_BUS_BUSY */
srb->cmd->result = DID_BUS_BUSY << 16;
kref_put(&srb->srb_ref, qla4xxx_srb_compl);
break;
case ET_CONTINUE:
/* Just throw away the continuation entries */
DEBUG2(printk("scsi%ld: %s: Continuation entry - "
"ignoring\n", ha->host_no, __func__));
break;
default:
/*
* Invalid entry in response queue, reset RISC
* firmware.
*/
DEBUG2(printk("scsi%ld: %s: Invalid entry %x in "
"response queue \n", ha->host_no,
__func__,
sts_entry->hdr.entryType));
goto exit_prq_error;
}
((struct response *)sts_entry)->signature = RESPONSE_PROCESSED;
wmb();
}
/*
* Tell ISP we're done with response(s). This also clears the interrupt.
*/
ha->isp_ops->complete_iocb(ha);
return;
exit_prq_invalid_handle:
DEBUG2(printk("scsi%ld: %s: Invalid handle(srb)=%p type=%x IOCS=%x\n",
ha->host_no, __func__, srb, sts_entry->hdr.entryType,
sts_entry->completionStatus));
exit_prq_error:
ha->isp_ops->complete_iocb(ha);
set_bit(DPC_RESET_HA, &ha->dpc_flags);
}
/**
* qla4xxx_isr_decode_mailbox - decodes mailbox status
* @ha: Pointer to host adapter structure.
* @mailbox_status: Mailbox status.
*
* This routine decodes the mailbox status during the ISR.
* Hardware_lock locked upon entry. runs in interrupt context.
**/
static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
uint32_t mbox_status)
{
int i;
uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
if ((mbox_status == MBOX_STS_BUSY) ||
(mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) ||
(mbox_status >> 12 == MBOX_COMPLETION_STATUS)) {
ha->mbox_status[0] = mbox_status;
if (test_bit(AF_MBOX_COMMAND, &ha->flags)) {
/*
* Copy all mailbox registers to a temporary
* location and set mailbox command done flag
*/
for (i = 0; i < ha->mbox_status_count; i++)
ha->mbox_status[i] = is_qla8022(ha)
? readl(&ha->qla4_8xxx_reg->mailbox_out[i])
: readl(&ha->reg->mailbox[i]);
set_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
if (test_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags))
complete(&ha->mbx_intr_comp);
}
} else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) {
for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
mbox_sts[i] = is_qla8022(ha)
? readl(&ha->qla4_8xxx_reg->mailbox_out[i])
: readl(&ha->reg->mailbox[i]);
/* Immediately process the AENs that don't require much work.
* Only queue the database_changed AENs */
if (ha->aen_log.count < MAX_AEN_ENTRIES) {
for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
ha->aen_log.entry[ha->aen_log.count].mbox_sts[i] =
mbox_sts[i];
ha->aen_log.count++;
}
switch (mbox_status) {
case MBOX_ASTS_SYSTEM_ERROR:
/* Log Mailbox registers */
ql4_printk(KERN_INFO, ha, "%s: System Err\n", __func__);
qla4xxx_dump_registers(ha);
if (ql4xdontresethba) {
DEBUG2(printk("scsi%ld: %s:Don't Reset HBA\n",
ha->host_no, __func__));
} else {
set_bit(AF_GET_CRASH_RECORD, &ha->flags);
set_bit(DPC_RESET_HA, &ha->dpc_flags);
}
break;
case MBOX_ASTS_REQUEST_TRANSFER_ERROR:
case MBOX_ASTS_RESPONSE_TRANSFER_ERROR:
case MBOX_ASTS_NVRAM_INVALID:
case MBOX_ASTS_IP_ADDRESS_CHANGED:
case MBOX_ASTS_DHCP_LEASE_EXPIRED:
DEBUG2(printk("scsi%ld: AEN %04x, ERROR Status, "
"Reset HA\n", ha->host_no, mbox_status));
set_bit(DPC_RESET_HA, &ha->dpc_flags);
break;
case MBOX_ASTS_LINK_UP:
set_bit(AF_LINK_UP, &ha->flags);
if (test_bit(AF_INIT_DONE, &ha->flags))
set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
ql4_printk(KERN_INFO, ha, "%s: LINK UP\n", __func__);
break;
case MBOX_ASTS_LINK_DOWN:
clear_bit(AF_LINK_UP, &ha->flags);
if (test_bit(AF_INIT_DONE, &ha->flags))
set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
ql4_printk(KERN_INFO, ha, "%s: LINK DOWN\n", __func__);
break;
case MBOX_ASTS_HEARTBEAT:
ha->seconds_since_last_heartbeat = 0;
break;
case MBOX_ASTS_DHCP_LEASE_ACQUIRED:
DEBUG2(printk("scsi%ld: AEN %04x DHCP LEASE "
"ACQUIRED\n", ha->host_no, mbox_status));
set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
break;
case MBOX_ASTS_PROTOCOL_STATISTIC_ALARM:
case MBOX_ASTS_SCSI_COMMAND_PDU_REJECTED: /* Target
* mode
* only */
case MBOX_ASTS_UNSOLICITED_PDU_RECEIVED: /* Connection mode */
case MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR:
case MBOX_ASTS_SUBNET_STATE_CHANGE:
case MBOX_ASTS_DUPLICATE_IP:
/* No action */
DEBUG2(printk("scsi%ld: AEN %04x\n", ha->host_no,
mbox_status));
break;
case MBOX_ASTS_IP_ADDR_STATE_CHANGED:
printk("scsi%ld: AEN %04x, mbox_sts[2]=%04x, "
"mbox_sts[3]=%04x\n", ha->host_no, mbox_sts[0],
mbox_sts[2], mbox_sts[3]);
/* mbox_sts[2] = Old ACB state
* mbox_sts[3] = new ACB state */
if ((mbox_sts[3] == ACB_STATE_VALID) &&
((mbox_sts[2] == ACB_STATE_TENTATIVE) ||
(mbox_sts[2] == ACB_STATE_ACQUIRING)))
set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) &&
(mbox_sts[2] == ACB_STATE_VALID))
set_bit(DPC_RESET_HA, &ha->dpc_flags);
break;
case MBOX_ASTS_MAC_ADDRESS_CHANGED:
case MBOX_ASTS_DNS:
/* No action */
DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x, "
"mbox_sts[1]=%04x, mbox_sts[2]=%04x\n",
ha->host_no, mbox_sts[0],
mbox_sts[1], mbox_sts[2]));
break;
case MBOX_ASTS_SELF_TEST_FAILED:
case MBOX_ASTS_LOGIN_FAILED:
/* No action */
DEBUG2(printk("scsi%ld: AEN %04x, mbox_sts[1]=%04x, "
"mbox_sts[2]=%04x, mbox_sts[3]=%04x\n",
ha->host_no, mbox_sts[0], mbox_sts[1],
mbox_sts[2], mbox_sts[3]));
break;
case MBOX_ASTS_DATABASE_CHANGED:
/* Queue AEN information and process it in the DPC
* routine */
if (ha->aen_q_count > 0) {
/* decrement available counter */
ha->aen_q_count--;
for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
ha->aen_q[ha->aen_in].mbox_sts[i] =
mbox_sts[i];
/* print debug message */
DEBUG2(printk("scsi%ld: AEN[%d] %04x queued "
"mb1:0x%x mb2:0x%x mb3:0x%x "
"mb4:0x%x mb5:0x%x\n",
ha->host_no, ha->aen_in,
mbox_sts[0], mbox_sts[1],
mbox_sts[2], mbox_sts[3],
mbox_sts[4], mbox_sts[5]));
/* advance pointer */
ha->aen_in++;
if (ha->aen_in == MAX_AEN_ENTRIES)
ha->aen_in = 0;
/* The DPC routine will process the aen */
set_bit(DPC_AEN, &ha->dpc_flags);
} else {
DEBUG2(printk("scsi%ld: %s: aen %04x, queue "
"overflowed! AEN LOST!!\n",
ha->host_no, __func__,
mbox_sts[0]));
DEBUG2(printk("scsi%ld: DUMP AEN QUEUE\n",
ha->host_no));
for (i = 0; i < MAX_AEN_ENTRIES; i++) {
DEBUG2(printk("AEN[%d] %04x %04x %04x "
"%04x\n", i, mbox_sts[0],
mbox_sts[1], mbox_sts[2],
mbox_sts[3]));
}
}
break;
case MBOX_ASTS_TXSCVR_INSERTED:
DEBUG2(printk(KERN_WARNING
"scsi%ld: AEN %04x Transceiver"
" inserted\n", ha->host_no, mbox_sts[0]));
break;
case MBOX_ASTS_TXSCVR_REMOVED:
DEBUG2(printk(KERN_WARNING
"scsi%ld: AEN %04x Transceiver"
" removed\n", ha->host_no, mbox_sts[0]));
break;
default:
DEBUG2(printk(KERN_WARNING
"scsi%ld: AEN %04x UNKNOWN\n",
ha->host_no, mbox_sts[0]));
break;
}
} else {
DEBUG2(printk("scsi%ld: Unknown mailbox status %08X\n",
ha->host_no, mbox_status));
ha->mbox_status[0] = mbox_status;
}
}
/**
* qla4_8xxx_interrupt_service_routine - isr
* @ha: pointer to host adapter structure.
*
* This is the main interrupt service routine.
* hardware_lock locked upon entry. runs in interrupt context.
**/
void qla4_8xxx_interrupt_service_routine(struct scsi_qla_host *ha,
uint32_t intr_status)
{
/* Process response queue interrupt. */
if (intr_status & HSRX_RISC_IOCB_INT)
qla4xxx_process_response_queue(ha);
/* Process mailbox/asynch event interrupt.*/
if (intr_status & HSRX_RISC_MB_INT)
qla4xxx_isr_decode_mailbox(ha,
readl(&ha->qla4_8xxx_reg->mailbox_out[0]));
/* clear the interrupt */
writel(0, &ha->qla4_8xxx_reg->host_int);
readl(&ha->qla4_8xxx_reg->host_int);
}
/**
* qla4xxx_interrupt_service_routine - isr
* @ha: pointer to host adapter structure.
*
* This is the main interrupt service routine.
* hardware_lock locked upon entry. runs in interrupt context.
**/
void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
uint32_t intr_status)
{
/* Process response queue interrupt. */
if (intr_status & CSR_SCSI_COMPLETION_INTR)
qla4xxx_process_response_queue(ha);
/* Process mailbox/asynch event interrupt.*/
if (intr_status & CSR_SCSI_PROCESSOR_INTR) {
qla4xxx_isr_decode_mailbox(ha,
readl(&ha->reg->mailbox[0]));
/* Clear Mailbox Interrupt */
writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
&ha->reg->ctrl_status);
readl(&ha->reg->ctrl_status);
}
}
/**
* qla4_8xxx_spurious_interrupt - processes spurious interrupt
* @ha: pointer to host adapter structure.
* @reqs_count: .
*
**/
static void qla4_8xxx_spurious_interrupt(struct scsi_qla_host *ha,
uint8_t reqs_count)
{
if (reqs_count)
return;
DEBUG2(ql4_printk(KERN_INFO, ha, "Spurious Interrupt\n"));
if (is_qla8022(ha)) {
writel(0, &ha->qla4_8xxx_reg->host_int);
if (test_bit(AF_INTx_ENABLED, &ha->flags))
qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg,
0xfbff);
}
ha->spurious_int_count++;
}
/**
* qla4xxx_intr_handler - hardware interrupt handler.
* @irq: Unused
* @dev_id: Pointer to host adapter structure
**/
irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id)
{
struct scsi_qla_host *ha;
uint32_t intr_status;
unsigned long flags = 0;
uint8_t reqs_count = 0;
ha = (struct scsi_qla_host *) dev_id;
if (!ha) {
DEBUG2(printk(KERN_INFO
"qla4xxx: Interrupt with NULL host ptr\n"));
return IRQ_NONE;
}
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->isr_count++;
/*
* Repeatedly service interrupts up to a maximum of
* MAX_REQS_SERVICED_PER_INTR
*/
while (1) {
/*
* Read interrupt status
*/
if (ha->isp_ops->rd_shdw_rsp_q_in(ha) !=
ha->response_out)
intr_status = CSR_SCSI_COMPLETION_INTR;
else
intr_status = readl(&ha->reg->ctrl_status);
if ((intr_status &
(CSR_SCSI_RESET_INTR|CSR_FATAL_ERROR|INTR_PENDING)) == 0) {
if (reqs_count == 0)
ha->spurious_int_count++;
break;
}
if (intr_status & CSR_FATAL_ERROR) {
DEBUG2(printk(KERN_INFO "scsi%ld: Fatal Error, "
"Status 0x%04x\n", ha->host_no,
readl(isp_port_error_status (ha))));
/* Issue Soft Reset to clear this error condition.
* This will prevent the RISC from repeatedly
* interrupting the driver; thus, allowing the DPC to
* get scheduled to continue error recovery.
* NOTE: Disabling RISC interrupts does not work in
* this case, as CSR_FATAL_ERROR overrides
* CSR_SCSI_INTR_ENABLE */
if ((readl(&ha->reg->ctrl_status) &
CSR_SCSI_RESET_INTR) == 0) {
writel(set_rmask(CSR_SOFT_RESET),
&ha->reg->ctrl_status);
readl(&ha->reg->ctrl_status);
}
writel(set_rmask(CSR_FATAL_ERROR),
&ha->reg->ctrl_status);
readl(&ha->reg->ctrl_status);
__qla4xxx_disable_intrs(ha);
set_bit(DPC_RESET_HA, &ha->dpc_flags);
break;
} else if (intr_status & CSR_SCSI_RESET_INTR) {
clear_bit(AF_ONLINE, &ha->flags);
__qla4xxx_disable_intrs(ha);
writel(set_rmask(CSR_SCSI_RESET_INTR),
&ha->reg->ctrl_status);
readl(&ha->reg->ctrl_status);
if (!test_bit(AF_HA_REMOVAL, &ha->flags))
set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
break;
} else if (intr_status & INTR_PENDING) {
ha->isp_ops->interrupt_service_routine(ha, intr_status);
ha->total_io_count++;
if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
break;
}
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return IRQ_HANDLED;
}
/**
* qla4_8xxx_intr_handler - hardware interrupt handler.
* @irq: Unused
* @dev_id: Pointer to host adapter structure
**/
irqreturn_t qla4_8xxx_intr_handler(int irq, void *dev_id)
{
struct scsi_qla_host *ha = dev_id;
uint32_t intr_status;
uint32_t status;
unsigned long flags = 0;
uint8_t reqs_count = 0;
if (unlikely(pci_channel_offline(ha->pdev)))
return IRQ_HANDLED;
ha->isr_count++;
status = qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
if (!(status & ha->nx_legacy_intr.int_vec_bit))
return IRQ_NONE;
status = qla4_8xxx_rd_32(ha, ISR_INT_STATE_REG);
if (!ISR_IS_LEGACY_INTR_TRIGGERED(status)) {
DEBUG2(ql4_printk(KERN_INFO, ha,
"%s legacy Int not triggered\n", __func__));
return IRQ_NONE;
}
/* clear the interrupt */
qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
/* read twice to ensure write is flushed */
qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
spin_lock_irqsave(&ha->hardware_lock, flags);
while (1) {
if (!(readl(&ha->qla4_8xxx_reg->host_int) &
ISRX_82XX_RISC_INT)) {
qla4_8xxx_spurious_interrupt(ha, reqs_count);
break;
}
intr_status = readl(&ha->qla4_8xxx_reg->host_status);
if ((intr_status &
(HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0) {
qla4_8xxx_spurious_interrupt(ha, reqs_count);
break;
}
ha->isp_ops->interrupt_service_routine(ha, intr_status);
/* Enable Interrupt */
qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
break;
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return IRQ_HANDLED;
}
irqreturn_t
qla4_8xxx_msi_handler(int irq, void *dev_id)
{
struct scsi_qla_host *ha;
ha = (struct scsi_qla_host *) dev_id;
if (!ha) {
DEBUG2(printk(KERN_INFO
"qla4xxx: MSIX: Interrupt with NULL host ptr\n"));
return IRQ_NONE;
}
ha->isr_count++;
/* clear the interrupt */
qla4_8xxx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
/* read twice to ensure write is flushed */
qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
qla4_8xxx_rd_32(ha, ISR_INT_VECTOR);
return qla4_8xxx_default_intr_handler(irq, dev_id);
}
/**
* qla4_8xxx_default_intr_handler - hardware interrupt handler.
* @irq: Unused
* @dev_id: Pointer to host adapter structure
*
* This interrupt handler is called directly for MSI-X, and
* called indirectly for MSI.
**/
irqreturn_t
qla4_8xxx_default_intr_handler(int irq, void *dev_id)
{
struct scsi_qla_host *ha = dev_id;
unsigned long flags;
uint32_t intr_status;
uint8_t reqs_count = 0;
spin_lock_irqsave(&ha->hardware_lock, flags);
while (1) {
if (!(readl(&ha->qla4_8xxx_reg->host_int) &
ISRX_82XX_RISC_INT)) {
qla4_8xxx_spurious_interrupt(ha, reqs_count);
break;
}
intr_status = readl(&ha->qla4_8xxx_reg->host_status);
if ((intr_status &
(HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0) {
qla4_8xxx_spurious_interrupt(ha, reqs_count);
break;
}
ha->isp_ops->interrupt_service_routine(ha, intr_status);
if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
break;
}
ha->isr_count++;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return IRQ_HANDLED;
}
irqreturn_t
qla4_8xxx_msix_rsp_q(int irq, void *dev_id)
{
struct scsi_qla_host *ha = dev_id;
unsigned long flags;
spin_lock_irqsave(&ha->hardware_lock, flags);
qla4xxx_process_response_queue(ha);
writel(0, &ha->qla4_8xxx_reg->host_int);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
ha->isr_count++;
return IRQ_HANDLED;
}
/**
* qla4xxx_process_aen - processes AENs generated by firmware
* @ha: pointer to host adapter structure.
* @process_aen: type of AENs to process
*
* Processes specific types of Asynchronous Events generated by firmware.
* The type of AENs to process is specified by process_aen and can be
* PROCESS_ALL_AENS 0
* FLUSH_DDB_CHANGED_AENS 1
* RELOGIN_DDB_CHANGED_AENS 2
**/
void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
{
uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
struct aen *aen;
int i;
unsigned long flags;
spin_lock_irqsave(&ha->hardware_lock, flags);
while (ha->aen_out != ha->aen_in) {
aen = &ha->aen_q[ha->aen_out];
/* copy aen information to local structure */
for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
mbox_sts[i] = aen->mbox_sts[i];
ha->aen_q_count++;
ha->aen_out++;
if (ha->aen_out == MAX_AEN_ENTRIES)
ha->aen_out = 0;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
DEBUG2(printk("qla4xxx(%ld): AEN[%d]=0x%08x, mbx1=0x%08x mbx2=0x%08x"
" mbx3=0x%08x mbx4=0x%08x\n", ha->host_no,
(ha->aen_out ? (ha->aen_out-1): (MAX_AEN_ENTRIES-1)),
mbox_sts[0], mbox_sts[1], mbox_sts[2],
mbox_sts[3], mbox_sts[4]));
switch (mbox_sts[0]) {
case MBOX_ASTS_DATABASE_CHANGED:
if (process_aen == FLUSH_DDB_CHANGED_AENS) {
DEBUG2(printk("scsi%ld: AEN[%d] %04x, index "
"[%d] state=%04x FLUSHED!\n",
ha->host_no, ha->aen_out,
mbox_sts[0], mbox_sts[2],
mbox_sts[3]));
break;
}
case PROCESS_ALL_AENS:
default:
if (mbox_sts[1] == 0) { /* Global DB change. */
qla4xxx_reinitialize_ddb_list(ha);
} else if (mbox_sts[1] == 1) { /* Specific device. */
qla4xxx_process_ddb_changed(ha, mbox_sts[2],
mbox_sts[3], mbox_sts[4]);
}
break;
}
spin_lock_irqsave(&ha->hardware_lock, flags);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
int qla4xxx_request_irqs(struct scsi_qla_host *ha)
{
int ret;
if (!is_qla8022(ha))
goto try_intx;
if (ql4xenablemsix == 2)
goto try_msi;
if (ql4xenablemsix == 0 || ql4xenablemsix != 1)
goto try_intx;
/* Trying MSI-X */
ret = qla4_8xxx_enable_msix(ha);
if (!ret) {
DEBUG2(ql4_printk(KERN_INFO, ha,
"MSI-X: Enabled (0x%X).\n", ha->revision_id));
goto irq_attached;
}
ql4_printk(KERN_WARNING, ha,
"MSI-X: Falling back-to MSI mode -- %d.\n", ret);
try_msi:
/* Trying MSI */
ret = pci_enable_msi(ha->pdev);
if (!ret) {
ret = request_irq(ha->pdev->irq, qla4_8xxx_msi_handler,
0, DRIVER_NAME, ha);
if (!ret) {
DEBUG2(ql4_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
set_bit(AF_MSI_ENABLED, &ha->flags);
goto irq_attached;
} else {
ql4_printk(KERN_WARNING, ha,
"MSI: Failed to reserve interrupt %d "
"already in use.\n", ha->pdev->irq);
pci_disable_msi(ha->pdev);
}
}
ql4_printk(KERN_WARNING, ha,
"MSI: Falling back-to INTx mode -- %d.\n", ret);
try_intx:
/* Trying INTx */
ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
IRQF_SHARED, DRIVER_NAME, ha);
if (!ret) {
DEBUG2(ql4_printk(KERN_INFO, ha, "INTx: Enabled.\n"));
set_bit(AF_INTx_ENABLED, &ha->flags);
goto irq_attached;
} else {
ql4_printk(KERN_WARNING, ha,
"INTx: Failed to reserve interrupt %d already in"
" use.\n", ha->pdev->irq);
return ret;
}
irq_attached:
set_bit(AF_IRQ_ATTACHED, &ha->flags);
ha->host->irq = ha->pdev->irq;
ql4_printk(KERN_INFO, ha, "%s: irq %d attached\n",
__func__, ha->pdev->irq);
return ret;
}
void qla4xxx_free_irqs(struct scsi_qla_host *ha)
{
if (test_bit(AF_MSIX_ENABLED, &ha->flags))
qla4_8xxx_disable_msix(ha);
else if (test_and_clear_bit(AF_MSI_ENABLED, &ha->flags)) {
free_irq(ha->pdev->irq, ha);
pci_disable_msi(ha->pdev);
} else if (test_and_clear_bit(AF_INTx_ENABLED, &ha->flags))
free_irq(ha->pdev->irq, ha);
}
| gpl-2.0 |
Ca1ne/EnochPrima | drivers/acpi/proc.c | 3037 | 10961 | #include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/suspend.h>
#include <linux/bcd.h>
#include <asm/uaccess.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#ifdef CONFIG_X86
#include <linux/mc146818rtc.h>
#endif
#include "sleep.h"
#define _COMPONENT ACPI_SYSTEM_COMPONENT
/*
* this file provides support for:
* /proc/acpi/alarm
* /proc/acpi/wakeup
*/
ACPI_MODULE_NAME("sleep")
#if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE) || !defined(CONFIG_X86)
/* use /sys/class/rtc/rtcX/wakealarm instead; it's not ACPI-specific */
#else
#define HAVE_ACPI_LEGACY_ALARM
#endif
#ifdef HAVE_ACPI_LEGACY_ALARM
static u32 cmos_bcd_read(int offset, int rtc_control);
static int acpi_system_alarm_seq_show(struct seq_file *seq, void *offset)
{
u32 sec, min, hr;
u32 day, mo, yr, cent = 0;
u32 today = 0;
unsigned char rtc_control = 0;
unsigned long flags;
spin_lock_irqsave(&rtc_lock, flags);
rtc_control = CMOS_READ(RTC_CONTROL);
sec = cmos_bcd_read(RTC_SECONDS_ALARM, rtc_control);
min = cmos_bcd_read(RTC_MINUTES_ALARM, rtc_control);
hr = cmos_bcd_read(RTC_HOURS_ALARM, rtc_control);
/* If we ever get an FACP with proper values... */
if (acpi_gbl_FADT.day_alarm) {
/* ACPI spec: only low 6 its should be cared */
day = CMOS_READ(acpi_gbl_FADT.day_alarm) & 0x3F;
if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
day = bcd2bin(day);
} else
day = cmos_bcd_read(RTC_DAY_OF_MONTH, rtc_control);
if (acpi_gbl_FADT.month_alarm)
mo = cmos_bcd_read(acpi_gbl_FADT.month_alarm, rtc_control);
else {
mo = cmos_bcd_read(RTC_MONTH, rtc_control);
today = cmos_bcd_read(RTC_DAY_OF_MONTH, rtc_control);
}
if (acpi_gbl_FADT.century)
cent = cmos_bcd_read(acpi_gbl_FADT.century, rtc_control);
yr = cmos_bcd_read(RTC_YEAR, rtc_control);
spin_unlock_irqrestore(&rtc_lock, flags);
/* we're trusting the FADT (see above) */
if (!acpi_gbl_FADT.century)
/* If we're not trusting the FADT, we should at least make it
* right for _this_ century... ehm, what is _this_ century?
*
* TBD:
* ASAP: find piece of code in the kernel, e.g. star tracker driver,
* which we can trust to determine the century correctly. Atom
* watch driver would be nice, too...
*
* if that has not happened, change for first release in 2050:
* if (yr<50)
* yr += 2100;
* else
* yr += 2000; // current line of code
*
* if that has not happened either, please do on 2099/12/31:23:59:59
* s/2000/2100
*
*/
yr += 2000;
else
yr += cent * 100;
/*
* Show correct dates for alarms up to a month into the future.
* This solves issues for nearly all situations with the common
* 30-day alarm clocks in PC hardware.
*/
if (day < today) {
if (mo < 12) {
mo += 1;
} else {
mo = 1;
yr += 1;
}
}
seq_printf(seq, "%4.4u-", yr);
(mo > 12) ? seq_puts(seq, "**-") : seq_printf(seq, "%2.2u-", mo);
(day > 31) ? seq_puts(seq, "** ") : seq_printf(seq, "%2.2u ", day);
(hr > 23) ? seq_puts(seq, "**:") : seq_printf(seq, "%2.2u:", hr);
(min > 59) ? seq_puts(seq, "**:") : seq_printf(seq, "%2.2u:", min);
(sec > 59) ? seq_puts(seq, "**\n") : seq_printf(seq, "%2.2u\n", sec);
return 0;
}
static int acpi_system_alarm_open_fs(struct inode *inode, struct file *file)
{
return single_open(file, acpi_system_alarm_seq_show, PDE(inode)->data);
}
static int get_date_field(char **p, u32 * value)
{
char *next = NULL;
char *string_end = NULL;
int result = -EINVAL;
/*
* Try to find delimeter, only to insert null. The end of the
* string won't have one, but is still valid.
*/
if (*p == NULL)
return result;
next = strpbrk(*p, "- :");
if (next)
*next++ = '\0';
*value = simple_strtoul(*p, &string_end, 10);
/* Signal success if we got a good digit */
if (string_end != *p)
result = 0;
if (next)
*p = next;
else
*p = NULL;
return result;
}
/* Read a possibly BCD register, always return binary */
static u32 cmos_bcd_read(int offset, int rtc_control)
{
u32 val = CMOS_READ(offset);
if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
val = bcd2bin(val);
return val;
}
/* Write binary value into possibly BCD register */
static void cmos_bcd_write(u32 val, int offset, int rtc_control)
{
if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
val = bin2bcd(val);
CMOS_WRITE(val, offset);
}
static ssize_t
acpi_system_write_alarm(struct file *file,
const char __user * buffer, size_t count, loff_t * ppos)
{
int result = 0;
char alarm_string[30] = { '\0' };
char *p = alarm_string;
u32 sec, min, hr, day, mo, yr;
int adjust = 0;
unsigned char rtc_control = 0;
if (count > sizeof(alarm_string) - 1)
return -EINVAL;
if (copy_from_user(alarm_string, buffer, count))
return -EFAULT;
alarm_string[count] = '\0';
/* check for time adjustment */
if (alarm_string[0] == '+') {
p++;
adjust = 1;
}
if ((result = get_date_field(&p, &yr)))
goto end;
if ((result = get_date_field(&p, &mo)))
goto end;
if ((result = get_date_field(&p, &day)))
goto end;
if ((result = get_date_field(&p, &hr)))
goto end;
if ((result = get_date_field(&p, &min)))
goto end;
if ((result = get_date_field(&p, &sec)))
goto end;
spin_lock_irq(&rtc_lock);
rtc_control = CMOS_READ(RTC_CONTROL);
if (adjust) {
yr += cmos_bcd_read(RTC_YEAR, rtc_control);
mo += cmos_bcd_read(RTC_MONTH, rtc_control);
day += cmos_bcd_read(RTC_DAY_OF_MONTH, rtc_control);
hr += cmos_bcd_read(RTC_HOURS, rtc_control);
min += cmos_bcd_read(RTC_MINUTES, rtc_control);
sec += cmos_bcd_read(RTC_SECONDS, rtc_control);
}
spin_unlock_irq(&rtc_lock);
if (sec > 59) {
min += sec/60;
sec = sec%60;
}
if (min > 59) {
hr += min/60;
min = min%60;
}
if (hr > 23) {
day += hr/24;
hr = hr%24;
}
if (day > 31) {
mo += day/32;
day = day%32;
}
if (mo > 12) {
yr += mo/13;
mo = mo%13;
}
spin_lock_irq(&rtc_lock);
/*
* Disable alarm interrupt before setting alarm timer or else
* when ACPI_EVENT_RTC is enabled, a spurious ACPI interrupt occurs
*/
rtc_control &= ~RTC_AIE;
CMOS_WRITE(rtc_control, RTC_CONTROL);
CMOS_READ(RTC_INTR_FLAGS);
/* write the fields the rtc knows about */
cmos_bcd_write(hr, RTC_HOURS_ALARM, rtc_control);
cmos_bcd_write(min, RTC_MINUTES_ALARM, rtc_control);
cmos_bcd_write(sec, RTC_SECONDS_ALARM, rtc_control);
/*
* If the system supports an enhanced alarm it will have non-zero
* offsets into the CMOS RAM here -- which for some reason are pointing
* to the RTC area of memory.
*/
if (acpi_gbl_FADT.day_alarm)
cmos_bcd_write(day, acpi_gbl_FADT.day_alarm, rtc_control);
if (acpi_gbl_FADT.month_alarm)
cmos_bcd_write(mo, acpi_gbl_FADT.month_alarm, rtc_control);
if (acpi_gbl_FADT.century) {
if (adjust)
yr += cmos_bcd_read(acpi_gbl_FADT.century, rtc_control) * 100;
cmos_bcd_write(yr / 100, acpi_gbl_FADT.century, rtc_control);
}
/* enable the rtc alarm interrupt */
rtc_control |= RTC_AIE;
CMOS_WRITE(rtc_control, RTC_CONTROL);
CMOS_READ(RTC_INTR_FLAGS);
spin_unlock_irq(&rtc_lock);
acpi_clear_event(ACPI_EVENT_RTC);
acpi_enable_event(ACPI_EVENT_RTC, 0);
*ppos += count;
result = 0;
end:
return result ? result : count;
}
#endif /* HAVE_ACPI_LEGACY_ALARM */
static int
acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
{
struct list_head *node, *next;
seq_printf(seq, "Device\tS-state\t Status Sysfs node\n");
mutex_lock(&acpi_device_lock);
list_for_each_safe(node, next, &acpi_wakeup_device_list) {
struct acpi_device *dev =
container_of(node, struct acpi_device, wakeup_list);
struct device *ldev;
if (!dev->wakeup.flags.valid)
continue;
ldev = acpi_get_physical_device(dev->handle);
seq_printf(seq, "%s\t S%d\t%c%-8s ",
dev->pnp.bus_id,
(u32) dev->wakeup.sleep_state,
dev->wakeup.flags.run_wake ? '*' : ' ',
(device_may_wakeup(&dev->dev)
|| (ldev && device_may_wakeup(ldev))) ?
"enabled" : "disabled");
if (ldev)
seq_printf(seq, "%s:%s",
ldev->bus ? ldev->bus->name : "no-bus",
dev_name(ldev));
seq_printf(seq, "\n");
put_device(ldev);
}
mutex_unlock(&acpi_device_lock);
return 0;
}
static void physical_device_enable_wakeup(struct acpi_device *adev)
{
struct device *dev = acpi_get_physical_device(adev->handle);
if (dev && device_can_wakeup(dev)) {
bool enable = !device_may_wakeup(dev);
device_set_wakeup_enable(dev, enable);
}
}
static ssize_t
acpi_system_write_wakeup_device(struct file *file,
const char __user * buffer,
size_t count, loff_t * ppos)
{
struct list_head *node, *next;
char strbuf[5];
char str[5] = "";
unsigned int len = count;
if (len > 4)
len = 4;
if (len < 0)
return -EFAULT;
if (copy_from_user(strbuf, buffer, len))
return -EFAULT;
strbuf[len] = '\0';
sscanf(strbuf, "%s", str);
mutex_lock(&acpi_device_lock);
list_for_each_safe(node, next, &acpi_wakeup_device_list) {
struct acpi_device *dev =
container_of(node, struct acpi_device, wakeup_list);
if (!dev->wakeup.flags.valid)
continue;
if (!strncmp(dev->pnp.bus_id, str, 4)) {
if (device_can_wakeup(&dev->dev)) {
bool enable = !device_may_wakeup(&dev->dev);
device_set_wakeup_enable(&dev->dev, enable);
} else {
physical_device_enable_wakeup(dev);
}
break;
}
}
mutex_unlock(&acpi_device_lock);
return count;
}
static int
acpi_system_wakeup_device_open_fs(struct inode *inode, struct file *file)
{
return single_open(file, acpi_system_wakeup_device_seq_show,
PDE(inode)->data);
}
static const struct file_operations acpi_system_wakeup_device_fops = {
.owner = THIS_MODULE,
.open = acpi_system_wakeup_device_open_fs,
.read = seq_read,
.write = acpi_system_write_wakeup_device,
.llseek = seq_lseek,
.release = single_release,
};
#ifdef HAVE_ACPI_LEGACY_ALARM
static const struct file_operations acpi_system_alarm_fops = {
.owner = THIS_MODULE,
.open = acpi_system_alarm_open_fs,
.read = seq_read,
.write = acpi_system_write_alarm,
.llseek = seq_lseek,
.release = single_release,
};
static u32 rtc_handler(void *context)
{
acpi_clear_event(ACPI_EVENT_RTC);
acpi_disable_event(ACPI_EVENT_RTC, 0);
return ACPI_INTERRUPT_HANDLED;
}
#endif /* HAVE_ACPI_LEGACY_ALARM */
int __init acpi_sleep_proc_init(void)
{
#ifdef HAVE_ACPI_LEGACY_ALARM
/* 'alarm' [R/W] */
proc_create("alarm", S_IFREG | S_IRUGO | S_IWUSR,
acpi_root_dir, &acpi_system_alarm_fops);
acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL);
/*
* Disable the RTC event after installing RTC handler.
* Only when RTC alarm is set will it be enabled.
*/
acpi_clear_event(ACPI_EVENT_RTC);
acpi_disable_event(ACPI_EVENT_RTC, 0);
#endif /* HAVE_ACPI_LEGACY_ALARM */
/* 'wakeup device' [R/W] */
proc_create("wakeup", S_IFREG | S_IRUGO | S_IWUSR,
acpi_root_dir, &acpi_system_wakeup_device_fops);
return 0;
}
| gpl-2.0 |
corcor67/SMPL_M8_SENSE | drivers/rtc/rtc-rv3029c2.c | 3805 | 11868 | /*
* Micro Crystal RV-3029C2 rtc class driver
*
* Author: Gregory Hermant <gregory.hermant@calao-systems.com>
*
* based on previously existing rtc class drivers
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* NOTE: Currently this driver only supports the bare minimum for read
* and write the RTC and alarms. The extra features provided by this chip
* (trickle charger, eeprom, T° compensation) are unavailable.
*/
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/bcd.h>
#include <linux/rtc.h>
/* Register map */
/* control section */
#define RV3029C2_ONOFF_CTRL 0x00
#define RV3029C2_IRQ_CTRL 0x01
#define RV3029C2_IRQ_CTRL_AIE (1 << 0)
#define RV3029C2_IRQ_FLAGS 0x02
#define RV3029C2_IRQ_FLAGS_AF (1 << 0)
#define RV3029C2_STATUS 0x03
#define RV3029C2_STATUS_VLOW1 (1 << 2)
#define RV3029C2_STATUS_VLOW2 (1 << 3)
#define RV3029C2_STATUS_SR (1 << 4)
#define RV3029C2_STATUS_PON (1 << 5)
#define RV3029C2_STATUS_EEBUSY (1 << 7)
#define RV3029C2_RST_CTRL 0x04
#define RV3029C2_CONTROL_SECTION_LEN 0x05
/* watch section */
#define RV3029C2_W_SEC 0x08
#define RV3029C2_W_MINUTES 0x09
#define RV3029C2_W_HOURS 0x0A
#define RV3029C2_REG_HR_12_24 (1<<6) /* 24h/12h mode */
#define RV3029C2_REG_HR_PM (1<<5) /* PM/AM bit in 12h mode */
#define RV3029C2_W_DATE 0x0B
#define RV3029C2_W_DAYS 0x0C
#define RV3029C2_W_MONTHS 0x0D
#define RV3029C2_W_YEARS 0x0E
#define RV3029C2_WATCH_SECTION_LEN 0x07
/* alarm section */
#define RV3029C2_A_SC 0x10
#define RV3029C2_A_MN 0x11
#define RV3029C2_A_HR 0x12
#define RV3029C2_A_DT 0x13
#define RV3029C2_A_DW 0x14
#define RV3029C2_A_MO 0x15
#define RV3029C2_A_YR 0x16
#define RV3029C2_ALARM_SECTION_LEN 0x07
/* timer section */
#define RV3029C2_TIMER_LOW 0x18
#define RV3029C2_TIMER_HIGH 0x19
/* temperature section */
#define RV3029C2_TEMP_PAGE 0x20
/* eeprom data section */
#define RV3029C2_E2P_EEDATA1 0x28
#define RV3029C2_E2P_EEDATA2 0x29
/* eeprom control section */
#define RV3029C2_CONTROL_E2P_EECTRL 0x30
#define RV3029C2_TRICKLE_1K (1<<0) /* 1K resistance */
#define RV3029C2_TRICKLE_5K (1<<1) /* 5K resistance */
#define RV3029C2_TRICKLE_20K (1<<2) /* 20K resistance */
#define RV3029C2_TRICKLE_80K (1<<3) /* 80K resistance */
#define RV3029C2_CONTROL_E2P_XTALOFFSET 0x31
#define RV3029C2_CONTROL_E2P_QCOEF 0x32
#define RV3029C2_CONTROL_E2P_TURNOVER 0x33
/* user ram section */
#define RV3029C2_USR1_RAM_PAGE 0x38
#define RV3029C2_USR1_SECTION_LEN 0x04
#define RV3029C2_USR2_RAM_PAGE 0x3C
#define RV3029C2_USR2_SECTION_LEN 0x04
static int
rv3029c2_i2c_read_regs(struct i2c_client *client, u8 reg, u8 *buf,
unsigned len)
{
int ret;
if ((reg > RV3029C2_USR1_RAM_PAGE + 7) ||
(reg + len > RV3029C2_USR1_RAM_PAGE + 8))
return -EINVAL;
ret = i2c_smbus_read_i2c_block_data(client, reg, len, buf);
if (ret < 0)
return ret;
if (ret < len)
return -EIO;
return 0;
}
static int
rv3029c2_i2c_write_regs(struct i2c_client *client, u8 reg, u8 const buf[],
unsigned len)
{
if ((reg > RV3029C2_USR1_RAM_PAGE + 7) ||
(reg + len > RV3029C2_USR1_RAM_PAGE + 8))
return -EINVAL;
return i2c_smbus_write_i2c_block_data(client, reg, len, buf);
}
static int
rv3029c2_i2c_get_sr(struct i2c_client *client, u8 *buf)
{
int ret = rv3029c2_i2c_read_regs(client, RV3029C2_STATUS, buf, 1);
if (ret < 0)
return -EIO;
dev_dbg(&client->dev, "status = 0x%.2x (%d)\n", buf[0], buf[0]);
return 0;
}
static int
rv3029c2_i2c_set_sr(struct i2c_client *client, u8 val)
{
u8 buf[1];
int sr;
buf[0] = val;
sr = rv3029c2_i2c_write_regs(client, RV3029C2_STATUS, buf, 1);
dev_dbg(&client->dev, "status = 0x%.2x (%d)\n", buf[0], buf[0]);
if (sr < 0)
return -EIO;
return 0;
}
static int
rv3029c2_i2c_read_time(struct i2c_client *client, struct rtc_time *tm)
{
u8 buf[1];
int ret;
u8 regs[RV3029C2_WATCH_SECTION_LEN] = { 0, };
ret = rv3029c2_i2c_get_sr(client, buf);
if (ret < 0) {
dev_err(&client->dev, "%s: reading SR failed\n", __func__);
return -EIO;
}
ret = rv3029c2_i2c_read_regs(client, RV3029C2_W_SEC , regs,
RV3029C2_WATCH_SECTION_LEN);
if (ret < 0) {
dev_err(&client->dev, "%s: reading RTC section failed\n",
__func__);
return ret;
}
tm->tm_sec = bcd2bin(regs[RV3029C2_W_SEC-RV3029C2_W_SEC]);
tm->tm_min = bcd2bin(regs[RV3029C2_W_MINUTES-RV3029C2_W_SEC]);
/* HR field has a more complex interpretation */
{
const u8 _hr = regs[RV3029C2_W_HOURS-RV3029C2_W_SEC];
if (_hr & RV3029C2_REG_HR_12_24) {
/* 12h format */
tm->tm_hour = bcd2bin(_hr & 0x1f);
if (_hr & RV3029C2_REG_HR_PM) /* PM flag set */
tm->tm_hour += 12;
} else /* 24h format */
tm->tm_hour = bcd2bin(_hr & 0x3f);
}
tm->tm_mday = bcd2bin(regs[RV3029C2_W_DATE-RV3029C2_W_SEC]);
tm->tm_mon = bcd2bin(regs[RV3029C2_W_MONTHS-RV3029C2_W_SEC]) - 1;
tm->tm_year = bcd2bin(regs[RV3029C2_W_YEARS-RV3029C2_W_SEC]) + 100;
tm->tm_wday = bcd2bin(regs[RV3029C2_W_DAYS-RV3029C2_W_SEC]) - 1;
return 0;
}
static int rv3029c2_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
return rv3029c2_i2c_read_time(to_i2c_client(dev), tm);
}
static int
rv3029c2_i2c_read_alarm(struct i2c_client *client, struct rtc_wkalrm *alarm)
{
struct rtc_time *const tm = &alarm->time;
int ret;
u8 regs[8];
ret = rv3029c2_i2c_get_sr(client, regs);
if (ret < 0) {
dev_err(&client->dev, "%s: reading SR failed\n", __func__);
return -EIO;
}
ret = rv3029c2_i2c_read_regs(client, RV3029C2_A_SC, regs,
RV3029C2_ALARM_SECTION_LEN);
if (ret < 0) {
dev_err(&client->dev, "%s: reading alarm section failed\n",
__func__);
return ret;
}
tm->tm_sec = bcd2bin(regs[RV3029C2_A_SC-RV3029C2_A_SC] & 0x7f);
tm->tm_min = bcd2bin(regs[RV3029C2_A_MN-RV3029C2_A_SC] & 0x7f);
tm->tm_hour = bcd2bin(regs[RV3029C2_A_HR-RV3029C2_A_SC] & 0x3f);
tm->tm_mday = bcd2bin(regs[RV3029C2_A_DT-RV3029C2_A_SC] & 0x3f);
tm->tm_mon = bcd2bin(regs[RV3029C2_A_MO-RV3029C2_A_SC] & 0x1f) - 1;
tm->tm_year = bcd2bin(regs[RV3029C2_A_YR-RV3029C2_A_SC] & 0x7f) + 100;
tm->tm_wday = bcd2bin(regs[RV3029C2_A_DW-RV3029C2_A_SC] & 0x07) - 1;
return 0;
}
static int
rv3029c2_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
return rv3029c2_i2c_read_alarm(to_i2c_client(dev), alarm);
}
static int rv3029c2_rtc_i2c_alarm_set_irq(struct i2c_client *client,
int enable)
{
int ret;
u8 buf[1];
/* enable AIE irq */
ret = rv3029c2_i2c_read_regs(client, RV3029C2_IRQ_CTRL, buf, 1);
if (ret < 0) {
dev_err(&client->dev, "can't read INT reg\n");
return ret;
}
if (enable)
buf[0] |= RV3029C2_IRQ_CTRL_AIE;
else
buf[0] &= ~RV3029C2_IRQ_CTRL_AIE;
ret = rv3029c2_i2c_write_regs(client, RV3029C2_IRQ_CTRL, buf, 1);
if (ret < 0) {
dev_err(&client->dev, "can't set INT reg\n");
return ret;
}
return 0;
}
static int rv3029c2_rtc_i2c_set_alarm(struct i2c_client *client,
struct rtc_wkalrm *alarm)
{
struct rtc_time *const tm = &alarm->time;
int ret;
u8 regs[8];
/*
* The clock has an 8 bit wide bcd-coded register (they never learn)
* for the year. tm_year is an offset from 1900 and we are interested
* in the 2000-2099 range, so any value less than 100 is invalid.
*/
if (tm->tm_year < 100)
return -EINVAL;
ret = rv3029c2_i2c_get_sr(client, regs);
if (ret < 0) {
dev_err(&client->dev, "%s: reading SR failed\n", __func__);
return -EIO;
}
regs[RV3029C2_A_SC-RV3029C2_A_SC] = bin2bcd(tm->tm_sec & 0x7f);
regs[RV3029C2_A_MN-RV3029C2_A_SC] = bin2bcd(tm->tm_min & 0x7f);
regs[RV3029C2_A_HR-RV3029C2_A_SC] = bin2bcd(tm->tm_hour & 0x3f);
regs[RV3029C2_A_DT-RV3029C2_A_SC] = bin2bcd(tm->tm_mday & 0x3f);
regs[RV3029C2_A_MO-RV3029C2_A_SC] = bin2bcd((tm->tm_mon & 0x1f) - 1);
regs[RV3029C2_A_DW-RV3029C2_A_SC] = bin2bcd((tm->tm_wday & 7) - 1);
regs[RV3029C2_A_YR-RV3029C2_A_SC] = bin2bcd((tm->tm_year & 0x7f) - 100);
ret = rv3029c2_i2c_write_regs(client, RV3029C2_A_SC, regs,
RV3029C2_ALARM_SECTION_LEN);
if (ret < 0)
return ret;
if (alarm->enabled) {
u8 buf[1];
/* clear AF flag */
ret = rv3029c2_i2c_read_regs(client, RV3029C2_IRQ_FLAGS,
buf, 1);
if (ret < 0) {
dev_err(&client->dev, "can't read alarm flag\n");
return ret;
}
buf[0] &= ~RV3029C2_IRQ_FLAGS_AF;
ret = rv3029c2_i2c_write_regs(client, RV3029C2_IRQ_FLAGS,
buf, 1);
if (ret < 0) {
dev_err(&client->dev, "can't set alarm flag\n");
return ret;
}
/* enable AIE irq */
ret = rv3029c2_rtc_i2c_alarm_set_irq(client, 1);
if (ret)
return ret;
dev_dbg(&client->dev, "alarm IRQ armed\n");
} else {
/* disable AIE irq */
ret = rv3029c2_rtc_i2c_alarm_set_irq(client, 1);
if (ret)
return ret;
dev_dbg(&client->dev, "alarm IRQ disabled\n");
}
return 0;
}
static int rv3029c2_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
return rv3029c2_rtc_i2c_set_alarm(to_i2c_client(dev), alarm);
}
static int
rv3029c2_i2c_set_time(struct i2c_client *client, struct rtc_time const *tm)
{
u8 regs[8];
int ret;
/*
* The clock has an 8 bit wide bcd-coded register (they never learn)
* for the year. tm_year is an offset from 1900 and we are interested
* in the 2000-2099 range, so any value less than 100 is invalid.
*/
if (tm->tm_year < 100)
return -EINVAL;
regs[RV3029C2_W_SEC-RV3029C2_W_SEC] = bin2bcd(tm->tm_sec);
regs[RV3029C2_W_MINUTES-RV3029C2_W_SEC] = bin2bcd(tm->tm_min);
regs[RV3029C2_W_HOURS-RV3029C2_W_SEC] = bin2bcd(tm->tm_hour);
regs[RV3029C2_W_DATE-RV3029C2_W_SEC] = bin2bcd(tm->tm_mday);
regs[RV3029C2_W_MONTHS-RV3029C2_W_SEC] = bin2bcd(tm->tm_mon+1);
regs[RV3029C2_W_DAYS-RV3029C2_W_SEC] = bin2bcd((tm->tm_wday & 7)+1);
regs[RV3029C2_W_YEARS-RV3029C2_W_SEC] = bin2bcd(tm->tm_year - 100);
ret = rv3029c2_i2c_write_regs(client, RV3029C2_W_SEC, regs,
RV3029C2_WATCH_SECTION_LEN);
if (ret < 0)
return ret;
ret = rv3029c2_i2c_get_sr(client, regs);
if (ret < 0) {
dev_err(&client->dev, "%s: reading SR failed\n", __func__);
return ret;
}
/* clear PON bit */
ret = rv3029c2_i2c_set_sr(client, (regs[0] & ~RV3029C2_STATUS_PON));
if (ret < 0) {
dev_err(&client->dev, "%s: reading SR failed\n", __func__);
return ret;
}
return 0;
}
static int rv3029c2_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
return rv3029c2_i2c_set_time(to_i2c_client(dev), tm);
}
static const struct rtc_class_ops rv3029c2_rtc_ops = {
.read_time = rv3029c2_rtc_read_time,
.set_time = rv3029c2_rtc_set_time,
.read_alarm = rv3029c2_rtc_read_alarm,
.set_alarm = rv3029c2_rtc_set_alarm,
};
static struct i2c_device_id rv3029c2_id[] = {
{ "rv3029c2", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, rv3029c2_id);
static int __devinit
rv3029c2_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct rtc_device *rtc;
int rc = 0;
u8 buf[1];
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_EMUL))
return -ENODEV;
rtc = rtc_device_register(client->name,
&client->dev, &rv3029c2_rtc_ops,
THIS_MODULE);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
i2c_set_clientdata(client, rtc);
rc = rv3029c2_i2c_get_sr(client, buf);
if (rc < 0) {
dev_err(&client->dev, "reading status failed\n");
goto exit_unregister;
}
return 0;
exit_unregister:
rtc_device_unregister(rtc);
return rc;
}
static int __devexit rv3029c2_remove(struct i2c_client *client)
{
struct rtc_device *rtc = i2c_get_clientdata(client);
rtc_device_unregister(rtc);
return 0;
}
static struct i2c_driver rv3029c2_driver = {
.driver = {
.name = "rtc-rv3029c2",
},
.probe = rv3029c2_probe,
.remove = __devexit_p(rv3029c2_remove),
.id_table = rv3029c2_id,
};
module_i2c_driver(rv3029c2_driver);
MODULE_AUTHOR("Gregory Hermant <gregory.hermant@calao-systems.com>");
MODULE_DESCRIPTION("Micro Crystal RV3029C2 RTC driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
shareefalis/android_kernel_samsung_d2-jb_2.5.1 | arch/arm/mach-omap2/board-ti8168evm.c | 4829 | 1878 | /*
* Code for TI8168/TI8148 EVM.
*
* Copyright (C) 2010 Texas Instruments, Inc. - http://www.ti.com/
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation version 2.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <plat/irqs.h>
#include <plat/board.h>
#include "common.h"
#include <plat/usb.h>
static struct omap_musb_board_data musb_board_data = {
.set_phy_power = ti81xx_musb_phy_power,
.interface_type = MUSB_INTERFACE_ULPI,
.mode = MUSB_OTG,
.power = 500,
};
static struct omap_board_config_kernel ti81xx_evm_config[] __initdata = {
};
static void __init ti81xx_evm_init(void)
{
omap_serial_init();
omap_sdrc_init(NULL, NULL);
omap_board_config = ti81xx_evm_config;
omap_board_config_size = ARRAY_SIZE(ti81xx_evm_config);
usb_musb_init(&musb_board_data);
}
MACHINE_START(TI8168EVM, "ti8168evm")
/* Maintainer: Texas Instruments */
.atag_offset = 0x100,
.map_io = ti81xx_map_io,
.init_early = ti81xx_init_early,
.init_irq = ti81xx_init_irq,
.timer = &omap3_timer,
.init_machine = ti81xx_evm_init,
.restart = omap_prcm_restart,
MACHINE_END
MACHINE_START(TI8148EVM, "ti8148evm")
/* Maintainer: Texas Instruments */
.atag_offset = 0x100,
.map_io = ti81xx_map_io,
.init_early = ti81xx_init_early,
.init_irq = ti81xx_init_irq,
.timer = &omap3_timer,
.init_machine = ti81xx_evm_init,
.restart = omap_prcm_restart,
MACHINE_END
| gpl-2.0 |
MattCrystal/yolo-computing-machine | arch/arm/mach-omap2/board-am3517crane.c | 4829 | 2773 | /*
* Support for AM3517/05 Craneboard
* http://www.mistralsolutions.com/products/craneboard.php
*
* Copyright (C) 2010 Mistral Solutions Pvt Ltd. <www.mistralsolutions.com>
* Author: R.Srinath <srinath@mistralsolutions.com>
*
* Based on mach-omap2/board-am3517evm.c
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation version 2.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
* whether express or implied; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/gpio.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <plat/board.h>
#include "common.h"
#include <plat/usb.h>
#include "mux.h"
#include "control.h"
#define GPIO_USB_POWER 35
#define GPIO_USB_NRESET 38
/* Board initialization */
static struct omap_board_config_kernel am3517_crane_config[] __initdata = {
};
#ifdef CONFIG_OMAP_MUX
static struct omap_board_mux board_mux[] __initdata = {
{ .reg_offset = OMAP_MUX_TERMINATOR },
};
#endif
static struct usbhs_omap_board_data usbhs_bdata __initdata = {
.port_mode[0] = OMAP_EHCI_PORT_MODE_PHY,
.port_mode[1] = OMAP_USBHS_PORT_MODE_UNUSED,
.port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED,
.phy_reset = true,
.reset_gpio_port[0] = GPIO_USB_NRESET,
.reset_gpio_port[1] = -EINVAL,
.reset_gpio_port[2] = -EINVAL
};
static void __init am3517_crane_init(void)
{
int ret;
omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
omap_serial_init();
omap_sdrc_init(NULL, NULL);
omap_board_config = am3517_crane_config;
omap_board_config_size = ARRAY_SIZE(am3517_crane_config);
/* Configure GPIO for EHCI port */
if (omap_mux_init_gpio(GPIO_USB_NRESET, OMAP_PIN_OUTPUT)) {
pr_err("Can not configure mux for GPIO_USB_NRESET %d\n",
GPIO_USB_NRESET);
return;
}
if (omap_mux_init_gpio(GPIO_USB_POWER, OMAP_PIN_OUTPUT)) {
pr_err("Can not configure mux for GPIO_USB_POWER %d\n",
GPIO_USB_POWER);
return;
}
ret = gpio_request_one(GPIO_USB_POWER, GPIOF_OUT_INIT_HIGH,
"usb_ehci_enable");
if (ret < 0) {
pr_err("Can not request GPIO %d\n", GPIO_USB_POWER);
return;
}
usbhs_init(&usbhs_bdata);
}
MACHINE_START(CRANEBOARD, "AM3517/05 CRANEBOARD")
.atag_offset = 0x100,
.reserve = omap_reserve,
.map_io = omap3_map_io,
.init_early = am35xx_init_early,
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = am3517_crane_init,
.timer = &omap3_timer,
.restart = omap_prcm_restart,
MACHINE_END
| gpl-2.0 |
garwynn/caf_kernel_msm | arch/x86/kernel/early_printk.c | 5085 | 6533 | #include <linux/console.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/screen_info.h>
#include <linux/usb/ch9.h>
#include <linux/pci_regs.h>
#include <linux/pci_ids.h>
#include <linux/errno.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/fcntl.h>
#include <asm/setup.h>
#include <xen/hvc-console.h>
#include <asm/pci-direct.h>
#include <asm/fixmap.h>
#include <asm/mrst.h>
#include <asm/pgtable.h>
#include <linux/usb/ehci_def.h>
/* Simple VGA output */
#define VGABASE (__ISA_IO_base + 0xb8000)
static int max_ypos = 25, max_xpos = 80;
static int current_ypos = 25, current_xpos;
static void early_vga_write(struct console *con, const char *str, unsigned n)
{
char c;
int i, k, j;
while ((c = *str++) != '\0' && n-- > 0) {
if (current_ypos >= max_ypos) {
/* scroll 1 line up */
for (k = 1, j = 0; k < max_ypos; k++, j++) {
for (i = 0; i < max_xpos; i++) {
writew(readw(VGABASE+2*(max_xpos*k+i)),
VGABASE + 2*(max_xpos*j + i));
}
}
for (i = 0; i < max_xpos; i++)
writew(0x720, VGABASE + 2*(max_xpos*j + i));
current_ypos = max_ypos-1;
}
#ifdef CONFIG_KGDB_KDB
if (c == '\b') {
if (current_xpos > 0)
current_xpos--;
} else if (c == '\r') {
current_xpos = 0;
} else
#endif
if (c == '\n') {
current_xpos = 0;
current_ypos++;
} else if (c != '\r') {
writew(((0x7 << 8) | (unsigned short) c),
VGABASE + 2*(max_xpos*current_ypos +
current_xpos++));
if (current_xpos >= max_xpos) {
current_xpos = 0;
current_ypos++;
}
}
}
}
static struct console early_vga_console = {
.name = "earlyvga",
.write = early_vga_write,
.flags = CON_PRINTBUFFER,
.index = -1,
};
/* Serial functions loosely based on a similar package from Klaus P. Gerlicher */
static int early_serial_base = 0x3f8; /* ttyS0 */
#define XMTRDY 0x20
#define DLAB 0x80
#define TXR 0 /* Transmit register (WRITE) */
#define RXR 0 /* Receive register (READ) */
#define IER 1 /* Interrupt Enable */
#define IIR 2 /* Interrupt ID */
#define FCR 2 /* FIFO control */
#define LCR 3 /* Line control */
#define MCR 4 /* Modem control */
#define LSR 5 /* Line Status */
#define MSR 6 /* Modem Status */
#define DLL 0 /* Divisor Latch Low */
#define DLH 1 /* Divisor latch High */
static int early_serial_putc(unsigned char ch)
{
unsigned timeout = 0xffff;
while ((inb(early_serial_base + LSR) & XMTRDY) == 0 && --timeout)
cpu_relax();
outb(ch, early_serial_base + TXR);
return timeout ? 0 : -1;
}
static void early_serial_write(struct console *con, const char *s, unsigned n)
{
while (*s && n-- > 0) {
if (*s == '\n')
early_serial_putc('\r');
early_serial_putc(*s);
s++;
}
}
#define DEFAULT_BAUD 9600
static __init void early_serial_init(char *s)
{
unsigned char c;
unsigned divisor;
unsigned baud = DEFAULT_BAUD;
char *e;
if (*s == ',')
++s;
if (*s) {
unsigned port;
if (!strncmp(s, "0x", 2)) {
early_serial_base = simple_strtoul(s, &e, 16);
} else {
static const int __initconst bases[] = { 0x3f8, 0x2f8 };
if (!strncmp(s, "ttyS", 4))
s += 4;
port = simple_strtoul(s, &e, 10);
if (port > 1 || s == e)
port = 0;
early_serial_base = bases[port];
}
s += strcspn(s, ",");
if (*s == ',')
s++;
}
outb(0x3, early_serial_base + LCR); /* 8n1 */
outb(0, early_serial_base + IER); /* no interrupt */
outb(0, early_serial_base + FCR); /* no fifo */
outb(0x3, early_serial_base + MCR); /* DTR + RTS */
if (*s) {
baud = simple_strtoul(s, &e, 0);
if (baud == 0 || s == e)
baud = DEFAULT_BAUD;
}
divisor = 115200 / baud;
c = inb(early_serial_base + LCR);
outb(c | DLAB, early_serial_base + LCR);
outb(divisor & 0xff, early_serial_base + DLL);
outb((divisor >> 8) & 0xff, early_serial_base + DLH);
outb(c & ~DLAB, early_serial_base + LCR);
}
static struct console early_serial_console = {
.name = "earlyser",
.write = early_serial_write,
.flags = CON_PRINTBUFFER,
.index = -1,
};
/* Direct interface for emergencies */
static struct console *early_console = &early_vga_console;
static int __initdata early_console_initialized;
asmlinkage void early_printk(const char *fmt, ...)
{
char buf[512];
int n;
va_list ap;
va_start(ap, fmt);
n = vscnprintf(buf, sizeof(buf), fmt, ap);
early_console->write(early_console, buf, n);
va_end(ap);
}
static inline void early_console_register(struct console *con, int keep_early)
{
if (early_console->index != -1) {
printk(KERN_CRIT "ERROR: earlyprintk= %s already used\n",
con->name);
return;
}
early_console = con;
if (keep_early)
early_console->flags &= ~CON_BOOT;
else
early_console->flags |= CON_BOOT;
register_console(early_console);
}
static int __init setup_early_printk(char *buf)
{
int keep;
if (!buf)
return 0;
if (early_console_initialized)
return 0;
early_console_initialized = 1;
keep = (strstr(buf, "keep") != NULL);
while (*buf != '\0') {
if (!strncmp(buf, "serial", 6)) {
buf += 6;
early_serial_init(buf);
early_console_register(&early_serial_console, keep);
if (!strncmp(buf, ",ttyS", 5))
buf += 5;
}
if (!strncmp(buf, "ttyS", 4)) {
early_serial_init(buf + 4);
early_console_register(&early_serial_console, keep);
}
if (!strncmp(buf, "vga", 3) &&
boot_params.screen_info.orig_video_isVGA == 1) {
max_xpos = boot_params.screen_info.orig_video_cols;
max_ypos = boot_params.screen_info.orig_video_lines;
current_ypos = boot_params.screen_info.orig_y;
early_console_register(&early_vga_console, keep);
}
#ifdef CONFIG_EARLY_PRINTK_DBGP
if (!strncmp(buf, "dbgp", 4) && !early_dbgp_init(buf + 4))
early_console_register(&early_dbgp_console, keep);
#endif
#ifdef CONFIG_HVC_XEN
if (!strncmp(buf, "xen", 3))
early_console_register(&xenboot_console, keep);
#endif
#ifdef CONFIG_EARLY_PRINTK_INTEL_MID
if (!strncmp(buf, "mrst", 4)) {
mrst_early_console_init();
early_console_register(&early_mrst_console, keep);
}
if (!strncmp(buf, "hsu", 3)) {
hsu_early_console_init(buf + 3);
early_console_register(&early_hsu_console, keep);
}
#endif
buf++;
}
return 0;
}
early_param("earlyprintk", setup_early_printk);
| gpl-2.0 |
blueskycoco/sam | fs/ocfs2/quota_global.c | 7133 | 27300 | /*
* Implementation of operations over global quota file
*/
#include <linux/spinlock.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/quota.h>
#include <linux/quotaops.h>
#include <linux/dqblk_qtree.h>
#include <linux/jiffies.h>
#include <linux/writeback.h>
#include <linux/workqueue.h>
#include <cluster/masklog.h>
#include "ocfs2_fs.h"
#include "ocfs2.h"
#include "alloc.h"
#include "blockcheck.h"
#include "inode.h"
#include "journal.h"
#include "file.h"
#include "sysfile.h"
#include "dlmglue.h"
#include "uptodate.h"
#include "super.h"
#include "buffer_head_io.h"
#include "quota.h"
#include "ocfs2_trace.h"
/*
* Locking of quotas with OCFS2 is rather complex. Here are rules that
* should be obeyed by all the functions:
* - any write of quota structure (either to local or global file) is protected
* by dqio_mutex or dquot->dq_lock.
* - any modification of global quota file holds inode cluster lock, i_mutex,
* and ip_alloc_sem of the global quota file (achieved by
* ocfs2_lock_global_qf). It also has to hold qinfo_lock.
* - an allocation of new blocks for local quota file is protected by
* its ip_alloc_sem
*
* A rough sketch of locking dependencies (lf = local file, gf = global file):
* Normal filesystem operation:
* start_trans -> dqio_mutex -> write to lf
* Syncing of local and global file:
* ocfs2_lock_global_qf -> start_trans -> dqio_mutex -> qinfo_lock ->
* write to gf
* -> write to lf
* Acquire dquot for the first time:
* dq_lock -> ocfs2_lock_global_qf -> qinfo_lock -> read from gf
* -> alloc space for gf
* -> start_trans -> qinfo_lock -> write to gf
* -> ip_alloc_sem of lf -> alloc space for lf
* -> write to lf
* Release last reference to dquot:
* dq_lock -> ocfs2_lock_global_qf -> start_trans -> qinfo_lock -> write to gf
* -> write to lf
* Note that all the above operations also hold the inode cluster lock of lf.
* Recovery:
* inode cluster lock of recovered lf
* -> read bitmaps -> ip_alloc_sem of lf
* -> ocfs2_lock_global_qf -> start_trans -> dqio_mutex -> qinfo_lock ->
* write to gf
*/
static void qsync_work_fn(struct work_struct *work);
static void ocfs2_global_disk2memdqb(struct dquot *dquot, void *dp)
{
struct ocfs2_global_disk_dqblk *d = dp;
struct mem_dqblk *m = &dquot->dq_dqb;
/* Update from disk only entries not set by the admin */
if (!test_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags)) {
m->dqb_ihardlimit = le64_to_cpu(d->dqb_ihardlimit);
m->dqb_isoftlimit = le64_to_cpu(d->dqb_isoftlimit);
}
if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
m->dqb_curinodes = le64_to_cpu(d->dqb_curinodes);
if (!test_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags)) {
m->dqb_bhardlimit = le64_to_cpu(d->dqb_bhardlimit);
m->dqb_bsoftlimit = le64_to_cpu(d->dqb_bsoftlimit);
}
if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
m->dqb_curspace = le64_to_cpu(d->dqb_curspace);
if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags))
m->dqb_btime = le64_to_cpu(d->dqb_btime);
if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags))
m->dqb_itime = le64_to_cpu(d->dqb_itime);
OCFS2_DQUOT(dquot)->dq_use_count = le32_to_cpu(d->dqb_use_count);
}
static void ocfs2_global_mem2diskdqb(void *dp, struct dquot *dquot)
{
struct ocfs2_global_disk_dqblk *d = dp;
struct mem_dqblk *m = &dquot->dq_dqb;
d->dqb_id = cpu_to_le32(dquot->dq_id);
d->dqb_use_count = cpu_to_le32(OCFS2_DQUOT(dquot)->dq_use_count);
d->dqb_ihardlimit = cpu_to_le64(m->dqb_ihardlimit);
d->dqb_isoftlimit = cpu_to_le64(m->dqb_isoftlimit);
d->dqb_curinodes = cpu_to_le64(m->dqb_curinodes);
d->dqb_bhardlimit = cpu_to_le64(m->dqb_bhardlimit);
d->dqb_bsoftlimit = cpu_to_le64(m->dqb_bsoftlimit);
d->dqb_curspace = cpu_to_le64(m->dqb_curspace);
d->dqb_btime = cpu_to_le64(m->dqb_btime);
d->dqb_itime = cpu_to_le64(m->dqb_itime);
d->dqb_pad1 = d->dqb_pad2 = 0;
}
static int ocfs2_global_is_id(void *dp, struct dquot *dquot)
{
struct ocfs2_global_disk_dqblk *d = dp;
struct ocfs2_mem_dqinfo *oinfo =
sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
if (qtree_entry_unused(&oinfo->dqi_gi, dp))
return 0;
return le32_to_cpu(d->dqb_id) == dquot->dq_id;
}
struct qtree_fmt_operations ocfs2_global_ops = {
.mem2disk_dqblk = ocfs2_global_mem2diskdqb,
.disk2mem_dqblk = ocfs2_global_disk2memdqb,
.is_id = ocfs2_global_is_id,
};
int ocfs2_validate_quota_block(struct super_block *sb, struct buffer_head *bh)
{
struct ocfs2_disk_dqtrailer *dqt =
ocfs2_block_dqtrailer(sb->s_blocksize, bh->b_data);
trace_ocfs2_validate_quota_block((unsigned long long)bh->b_blocknr);
BUG_ON(!buffer_uptodate(bh));
/*
* If the ecc fails, we return the error but otherwise
* leave the filesystem running. We know any error is
* local to this block.
*/
return ocfs2_validate_meta_ecc(sb, bh->b_data, &dqt->dq_check);
}
int ocfs2_read_quota_phys_block(struct inode *inode, u64 p_block,
struct buffer_head **bhp)
{
int rc;
*bhp = NULL;
rc = ocfs2_read_blocks(INODE_CACHE(inode), p_block, 1, bhp, 0,
ocfs2_validate_quota_block);
if (rc)
mlog_errno(rc);
return rc;
}
/* Read data from global quotafile - avoid pagecache and such because we cannot
* afford acquiring the locks... We use quota cluster lock to serialize
* operations. Caller is responsible for acquiring it. */
ssize_t ocfs2_quota_read(struct super_block *sb, int type, char *data,
size_t len, loff_t off)
{
struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
struct inode *gqinode = oinfo->dqi_gqinode;
loff_t i_size = i_size_read(gqinode);
int offset = off & (sb->s_blocksize - 1);
sector_t blk = off >> sb->s_blocksize_bits;
int err = 0;
struct buffer_head *bh;
size_t toread, tocopy;
u64 pblock = 0, pcount = 0;
if (off > i_size)
return 0;
if (off + len > i_size)
len = i_size - off;
toread = len;
while (toread > 0) {
tocopy = min_t(size_t, (sb->s_blocksize - offset), toread);
if (!pcount) {
err = ocfs2_extent_map_get_blocks(gqinode, blk, &pblock,
&pcount, NULL);
if (err) {
mlog_errno(err);
return err;
}
} else {
pcount--;
pblock++;
}
bh = NULL;
err = ocfs2_read_quota_phys_block(gqinode, pblock, &bh);
if (err) {
mlog_errno(err);
return err;
}
memcpy(data, bh->b_data + offset, tocopy);
brelse(bh);
offset = 0;
toread -= tocopy;
data += tocopy;
blk++;
}
return len;
}
/* Write to quotafile (we know the transaction is already started and has
* enough credits) */
ssize_t ocfs2_quota_write(struct super_block *sb, int type,
const char *data, size_t len, loff_t off)
{
struct mem_dqinfo *info = sb_dqinfo(sb, type);
struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
struct inode *gqinode = oinfo->dqi_gqinode;
int offset = off & (sb->s_blocksize - 1);
sector_t blk = off >> sb->s_blocksize_bits;
int err = 0, new = 0, ja_type;
struct buffer_head *bh = NULL;
handle_t *handle = journal_current_handle();
u64 pblock, pcount;
if (!handle) {
mlog(ML_ERROR, "Quota write (off=%llu, len=%llu) cancelled "
"because transaction was not started.\n",
(unsigned long long)off, (unsigned long long)len);
return -EIO;
}
if (len > sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset) {
WARN_ON(1);
len = sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset;
}
if (gqinode->i_size < off + len) {
loff_t rounded_end =
ocfs2_align_bytes_to_blocks(sb, off + len);
/* Space is already allocated in ocfs2_acquire_dquot() */
err = ocfs2_simple_size_update(gqinode,
oinfo->dqi_gqi_bh,
rounded_end);
if (err < 0)
goto out;
new = 1;
}
err = ocfs2_extent_map_get_blocks(gqinode, blk, &pblock, &pcount, NULL);
if (err) {
mlog_errno(err);
goto out;
}
/* Not rewriting whole block? */
if ((offset || len < sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE) &&
!new) {
err = ocfs2_read_quota_phys_block(gqinode, pblock, &bh);
ja_type = OCFS2_JOURNAL_ACCESS_WRITE;
} else {
bh = sb_getblk(sb, pblock);
if (!bh)
err = -ENOMEM;
ja_type = OCFS2_JOURNAL_ACCESS_CREATE;
}
if (err) {
mlog_errno(err);
goto out;
}
lock_buffer(bh);
if (new)
memset(bh->b_data, 0, sb->s_blocksize);
memcpy(bh->b_data + offset, data, len);
flush_dcache_page(bh->b_page);
set_buffer_uptodate(bh);
unlock_buffer(bh);
ocfs2_set_buffer_uptodate(INODE_CACHE(gqinode), bh);
err = ocfs2_journal_access_dq(handle, INODE_CACHE(gqinode), bh,
ja_type);
if (err < 0) {
brelse(bh);
goto out;
}
ocfs2_journal_dirty(handle, bh);
brelse(bh);
out:
if (err) {
mlog_errno(err);
return err;
}
gqinode->i_version++;
ocfs2_mark_inode_dirty(handle, gqinode, oinfo->dqi_gqi_bh);
return len;
}
int ocfs2_lock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
{
int status;
struct buffer_head *bh = NULL;
status = ocfs2_inode_lock(oinfo->dqi_gqinode, &bh, ex);
if (status < 0)
return status;
spin_lock(&dq_data_lock);
if (!oinfo->dqi_gqi_count++)
oinfo->dqi_gqi_bh = bh;
else
WARN_ON(bh != oinfo->dqi_gqi_bh);
spin_unlock(&dq_data_lock);
if (ex) {
mutex_lock(&oinfo->dqi_gqinode->i_mutex);
down_write(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
} else {
down_read(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
}
return 0;
}
void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
{
if (ex) {
up_write(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
mutex_unlock(&oinfo->dqi_gqinode->i_mutex);
} else {
up_read(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem);
}
ocfs2_inode_unlock(oinfo->dqi_gqinode, ex);
brelse(oinfo->dqi_gqi_bh);
spin_lock(&dq_data_lock);
if (!--oinfo->dqi_gqi_count)
oinfo->dqi_gqi_bh = NULL;
spin_unlock(&dq_data_lock);
}
/* Read information header from global quota file */
int ocfs2_global_read_info(struct super_block *sb, int type)
{
struct inode *gqinode = NULL;
unsigned int ino[MAXQUOTAS] = { USER_QUOTA_SYSTEM_INODE,
GROUP_QUOTA_SYSTEM_INODE };
struct ocfs2_global_disk_dqinfo dinfo;
struct mem_dqinfo *info = sb_dqinfo(sb, type);
struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
u64 pcount;
int status;
/* Read global header */
gqinode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type],
OCFS2_INVALID_SLOT);
if (!gqinode) {
mlog(ML_ERROR, "failed to get global quota inode (type=%d)\n",
type);
status = -EINVAL;
goto out_err;
}
oinfo->dqi_gi.dqi_sb = sb;
oinfo->dqi_gi.dqi_type = type;
ocfs2_qinfo_lock_res_init(&oinfo->dqi_gqlock, oinfo);
oinfo->dqi_gi.dqi_entry_size = sizeof(struct ocfs2_global_disk_dqblk);
oinfo->dqi_gi.dqi_ops = &ocfs2_global_ops;
oinfo->dqi_gqi_bh = NULL;
oinfo->dqi_gqi_count = 0;
oinfo->dqi_gqinode = gqinode;
status = ocfs2_lock_global_qf(oinfo, 0);
if (status < 0) {
mlog_errno(status);
goto out_err;
}
status = ocfs2_extent_map_get_blocks(gqinode, 0, &oinfo->dqi_giblk,
&pcount, NULL);
if (status < 0)
goto out_unlock;
status = ocfs2_qinfo_lock(oinfo, 0);
if (status < 0)
goto out_unlock;
status = sb->s_op->quota_read(sb, type, (char *)&dinfo,
sizeof(struct ocfs2_global_disk_dqinfo),
OCFS2_GLOBAL_INFO_OFF);
ocfs2_qinfo_unlock(oinfo, 0);
ocfs2_unlock_global_qf(oinfo, 0);
if (status != sizeof(struct ocfs2_global_disk_dqinfo)) {
mlog(ML_ERROR, "Cannot read global quota info (%d).\n",
status);
if (status >= 0)
status = -EIO;
mlog_errno(status);
goto out_err;
}
info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace);
info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace);
oinfo->dqi_syncms = le32_to_cpu(dinfo.dqi_syncms);
oinfo->dqi_gi.dqi_blocks = le32_to_cpu(dinfo.dqi_blocks);
oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk);
oinfo->dqi_gi.dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry);
oinfo->dqi_gi.dqi_blocksize_bits = sb->s_blocksize_bits;
oinfo->dqi_gi.dqi_usable_bs = sb->s_blocksize -
OCFS2_QBLK_RESERVED_SPACE;
oinfo->dqi_gi.dqi_qtree_depth = qtree_depth(&oinfo->dqi_gi);
INIT_DELAYED_WORK(&oinfo->dqi_sync_work, qsync_work_fn);
schedule_delayed_work(&oinfo->dqi_sync_work,
msecs_to_jiffies(oinfo->dqi_syncms));
out_err:
if (status)
mlog_errno(status);
return status;
out_unlock:
ocfs2_unlock_global_qf(oinfo, 0);
mlog_errno(status);
goto out_err;
}
/* Write information to global quota file. Expects exlusive lock on quota
* file inode and quota info */
static int __ocfs2_global_write_info(struct super_block *sb, int type)
{
struct mem_dqinfo *info = sb_dqinfo(sb, type);
struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv;
struct ocfs2_global_disk_dqinfo dinfo;
ssize_t size;
spin_lock(&dq_data_lock);
info->dqi_flags &= ~DQF_INFO_DIRTY;
dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace);
dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace);
spin_unlock(&dq_data_lock);
dinfo.dqi_syncms = cpu_to_le32(oinfo->dqi_syncms);
dinfo.dqi_blocks = cpu_to_le32(oinfo->dqi_gi.dqi_blocks);
dinfo.dqi_free_blk = cpu_to_le32(oinfo->dqi_gi.dqi_free_blk);
dinfo.dqi_free_entry = cpu_to_le32(oinfo->dqi_gi.dqi_free_entry);
size = sb->s_op->quota_write(sb, type, (char *)&dinfo,
sizeof(struct ocfs2_global_disk_dqinfo),
OCFS2_GLOBAL_INFO_OFF);
if (size != sizeof(struct ocfs2_global_disk_dqinfo)) {
mlog(ML_ERROR, "Cannot write global quota info structure\n");
if (size >= 0)
size = -EIO;
return size;
}
return 0;
}
int ocfs2_global_write_info(struct super_block *sb, int type)
{
int err;
struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
err = ocfs2_qinfo_lock(info, 1);
if (err < 0)
return err;
err = __ocfs2_global_write_info(sb, type);
ocfs2_qinfo_unlock(info, 1);
return err;
}
static int ocfs2_global_qinit_alloc(struct super_block *sb, int type)
{
struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
/*
* We may need to allocate tree blocks and a leaf block but not the
* root block
*/
return oinfo->dqi_gi.dqi_qtree_depth;
}
static int ocfs2_calc_global_qinit_credits(struct super_block *sb, int type)
{
/* We modify all the allocated blocks, tree root, info block and
* the inode */
return (ocfs2_global_qinit_alloc(sb, type) + 2) *
OCFS2_QUOTA_BLOCK_UPDATE_CREDITS + 1;
}
/* Sync local information about quota modifications with global quota file.
* Caller must have started the transaction and obtained exclusive lock for
* global quota file inode */
int __ocfs2_sync_dquot(struct dquot *dquot, int freeing)
{
int err, err2;
struct super_block *sb = dquot->dq_sb;
int type = dquot->dq_type;
struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
struct ocfs2_global_disk_dqblk dqblk;
s64 spacechange, inodechange;
time_t olditime, oldbtime;
err = sb->s_op->quota_read(sb, type, (char *)&dqblk,
sizeof(struct ocfs2_global_disk_dqblk),
dquot->dq_off);
if (err != sizeof(struct ocfs2_global_disk_dqblk)) {
if (err >= 0) {
mlog(ML_ERROR, "Short read from global quota file "
"(%u read)\n", err);
err = -EIO;
}
goto out;
}
/* Update space and inode usage. Get also other information from
* global quota file so that we don't overwrite any changes there.
* We are */
spin_lock(&dq_data_lock);
spacechange = dquot->dq_dqb.dqb_curspace -
OCFS2_DQUOT(dquot)->dq_origspace;
inodechange = dquot->dq_dqb.dqb_curinodes -
OCFS2_DQUOT(dquot)->dq_originodes;
olditime = dquot->dq_dqb.dqb_itime;
oldbtime = dquot->dq_dqb.dqb_btime;
ocfs2_global_disk2memdqb(dquot, &dqblk);
trace_ocfs2_sync_dquot(dquot->dq_id, dquot->dq_dqb.dqb_curspace,
(long long)spacechange,
dquot->dq_dqb.dqb_curinodes,
(long long)inodechange);
if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags))
dquot->dq_dqb.dqb_curspace += spacechange;
if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags))
dquot->dq_dqb.dqb_curinodes += inodechange;
/* Set properly space grace time... */
if (dquot->dq_dqb.dqb_bsoftlimit &&
dquot->dq_dqb.dqb_curspace > dquot->dq_dqb.dqb_bsoftlimit) {
if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags) &&
oldbtime > 0) {
if (dquot->dq_dqb.dqb_btime > 0)
dquot->dq_dqb.dqb_btime =
min(dquot->dq_dqb.dqb_btime, oldbtime);
else
dquot->dq_dqb.dqb_btime = oldbtime;
}
} else {
dquot->dq_dqb.dqb_btime = 0;
clear_bit(DQ_BLKS_B, &dquot->dq_flags);
}
/* Set properly inode grace time... */
if (dquot->dq_dqb.dqb_isoftlimit &&
dquot->dq_dqb.dqb_curinodes > dquot->dq_dqb.dqb_isoftlimit) {
if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags) &&
olditime > 0) {
if (dquot->dq_dqb.dqb_itime > 0)
dquot->dq_dqb.dqb_itime =
min(dquot->dq_dqb.dqb_itime, olditime);
else
dquot->dq_dqb.dqb_itime = olditime;
}
} else {
dquot->dq_dqb.dqb_itime = 0;
clear_bit(DQ_INODES_B, &dquot->dq_flags);
}
/* All information is properly updated, clear the flags */
__clear_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
__clear_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
__clear_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
__clear_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
__clear_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
__clear_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
spin_unlock(&dq_data_lock);
err = ocfs2_qinfo_lock(info, freeing);
if (err < 0) {
mlog(ML_ERROR, "Failed to lock quota info, losing quota write"
" (type=%d, id=%u)\n", dquot->dq_type,
(unsigned)dquot->dq_id);
goto out;
}
if (freeing)
OCFS2_DQUOT(dquot)->dq_use_count--;
err = qtree_write_dquot(&info->dqi_gi, dquot);
if (err < 0)
goto out_qlock;
if (freeing && !OCFS2_DQUOT(dquot)->dq_use_count) {
err = qtree_release_dquot(&info->dqi_gi, dquot);
if (info_dirty(sb_dqinfo(sb, type))) {
err2 = __ocfs2_global_write_info(sb, type);
if (!err)
err = err2;
}
}
out_qlock:
ocfs2_qinfo_unlock(info, freeing);
out:
if (err < 0)
mlog_errno(err);
return err;
}
/*
* Functions for periodic syncing of dquots with global file
*/
static int ocfs2_sync_dquot_helper(struct dquot *dquot, unsigned long type)
{
handle_t *handle;
struct super_block *sb = dquot->dq_sb;
struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
struct ocfs2_super *osb = OCFS2_SB(sb);
int status = 0;
trace_ocfs2_sync_dquot_helper(dquot->dq_id, dquot->dq_type,
type, sb->s_id);
if (type != dquot->dq_type)
goto out;
status = ocfs2_lock_global_qf(oinfo, 1);
if (status < 0)
goto out;
handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS);
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
mlog_errno(status);
goto out_ilock;
}
mutex_lock(&sb_dqopt(sb)->dqio_mutex);
status = ocfs2_sync_dquot(dquot);
if (status < 0)
mlog_errno(status);
/* We have to write local structure as well... */
status = ocfs2_local_write_dquot(dquot);
if (status < 0)
mlog_errno(status);
mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
ocfs2_commit_trans(osb, handle);
out_ilock:
ocfs2_unlock_global_qf(oinfo, 1);
out:
return status;
}
static void qsync_work_fn(struct work_struct *work)
{
struct ocfs2_mem_dqinfo *oinfo = container_of(work,
struct ocfs2_mem_dqinfo,
dqi_sync_work.work);
struct super_block *sb = oinfo->dqi_gqinode->i_sb;
dquot_scan_active(sb, ocfs2_sync_dquot_helper, oinfo->dqi_type);
schedule_delayed_work(&oinfo->dqi_sync_work,
msecs_to_jiffies(oinfo->dqi_syncms));
}
/*
* Wrappers for generic quota functions
*/
static int ocfs2_write_dquot(struct dquot *dquot)
{
handle_t *handle;
struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
int status = 0;
trace_ocfs2_write_dquot(dquot->dq_id, dquot->dq_type);
handle = ocfs2_start_trans(osb, OCFS2_QWRITE_CREDITS);
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
mlog_errno(status);
goto out;
}
mutex_lock(&sb_dqopt(dquot->dq_sb)->dqio_mutex);
status = ocfs2_local_write_dquot(dquot);
mutex_unlock(&sb_dqopt(dquot->dq_sb)->dqio_mutex);
ocfs2_commit_trans(osb, handle);
out:
return status;
}
static int ocfs2_calc_qdel_credits(struct super_block *sb, int type)
{
struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
/*
* We modify tree, leaf block, global info, local chunk header,
* global and local inode; OCFS2_QINFO_WRITE_CREDITS already
* accounts for inode update
*/
return (oinfo->dqi_gi.dqi_qtree_depth + 2) *
OCFS2_QUOTA_BLOCK_UPDATE_CREDITS +
OCFS2_QINFO_WRITE_CREDITS +
OCFS2_INODE_UPDATE_CREDITS;
}
static int ocfs2_release_dquot(struct dquot *dquot)
{
handle_t *handle;
struct ocfs2_mem_dqinfo *oinfo =
sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv;
struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb);
int status = 0;
trace_ocfs2_release_dquot(dquot->dq_id, dquot->dq_type);
mutex_lock(&dquot->dq_lock);
/* Check whether we are not racing with some other dqget() */
if (atomic_read(&dquot->dq_count) > 1)
goto out;
status = ocfs2_lock_global_qf(oinfo, 1);
if (status < 0)
goto out;
handle = ocfs2_start_trans(osb,
ocfs2_calc_qdel_credits(dquot->dq_sb, dquot->dq_type));
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
mlog_errno(status);
goto out_ilock;
}
status = ocfs2_global_release_dquot(dquot);
if (status < 0) {
mlog_errno(status);
goto out_trans;
}
status = ocfs2_local_release_dquot(handle, dquot);
/*
* If we fail here, we cannot do much as global structure is
* already released. So just complain...
*/
if (status < 0)
mlog_errno(status);
clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
out_trans:
ocfs2_commit_trans(osb, handle);
out_ilock:
ocfs2_unlock_global_qf(oinfo, 1);
out:
mutex_unlock(&dquot->dq_lock);
if (status)
mlog_errno(status);
return status;
}
/*
* Read global dquot structure from disk or create it if it does
* not exist. Also update use count of the global structure and
* create structure in node-local quota file.
*/
static int ocfs2_acquire_dquot(struct dquot *dquot)
{
int status = 0, err;
int ex = 0;
struct super_block *sb = dquot->dq_sb;
struct ocfs2_super *osb = OCFS2_SB(sb);
int type = dquot->dq_type;
struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
struct inode *gqinode = info->dqi_gqinode;
int need_alloc = ocfs2_global_qinit_alloc(sb, type);
handle_t *handle;
trace_ocfs2_acquire_dquot(dquot->dq_id, type);
mutex_lock(&dquot->dq_lock);
/*
* We need an exclusive lock, because we're going to update use count
* and instantiate possibly new dquot structure
*/
status = ocfs2_lock_global_qf(info, 1);
if (status < 0)
goto out;
if (!test_bit(DQ_READ_B, &dquot->dq_flags)) {
status = ocfs2_qinfo_lock(info, 0);
if (status < 0)
goto out_dq;
status = qtree_read_dquot(&info->dqi_gi, dquot);
ocfs2_qinfo_unlock(info, 0);
if (status < 0)
goto out_dq;
}
set_bit(DQ_READ_B, &dquot->dq_flags);
OCFS2_DQUOT(dquot)->dq_use_count++;
OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes;
if (!dquot->dq_off) { /* No real quota entry? */
ex = 1;
/*
* Add blocks to quota file before we start a transaction since
* locking allocators ranks above a transaction start
*/
WARN_ON(journal_current_handle());
status = ocfs2_extend_no_holes(gqinode, NULL,
gqinode->i_size + (need_alloc << sb->s_blocksize_bits),
gqinode->i_size);
if (status < 0)
goto out_dq;
}
handle = ocfs2_start_trans(osb,
ocfs2_calc_global_qinit_credits(sb, type));
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
goto out_dq;
}
status = ocfs2_qinfo_lock(info, ex);
if (status < 0)
goto out_trans;
status = qtree_write_dquot(&info->dqi_gi, dquot);
if (ex && info_dirty(sb_dqinfo(sb, type))) {
err = __ocfs2_global_write_info(sb, type);
if (!status)
status = err;
}
ocfs2_qinfo_unlock(info, ex);
out_trans:
ocfs2_commit_trans(osb, handle);
out_dq:
ocfs2_unlock_global_qf(info, 1);
if (status < 0)
goto out;
status = ocfs2_create_local_dquot(dquot);
if (status < 0)
goto out;
set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
out:
mutex_unlock(&dquot->dq_lock);
if (status)
mlog_errno(status);
return status;
}
static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
{
unsigned long mask = (1 << (DQ_LASTSET_B + QIF_ILIMITS_B)) |
(1 << (DQ_LASTSET_B + QIF_BLIMITS_B)) |
(1 << (DQ_LASTSET_B + QIF_INODES_B)) |
(1 << (DQ_LASTSET_B + QIF_SPACE_B)) |
(1 << (DQ_LASTSET_B + QIF_BTIME_B)) |
(1 << (DQ_LASTSET_B + QIF_ITIME_B));
int sync = 0;
int status;
struct super_block *sb = dquot->dq_sb;
int type = dquot->dq_type;
struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
handle_t *handle;
struct ocfs2_super *osb = OCFS2_SB(sb);
trace_ocfs2_mark_dquot_dirty(dquot->dq_id, type);
/* In case user set some limits, sync dquot immediately to global
* quota file so that information propagates quicker */
spin_lock(&dq_data_lock);
if (dquot->dq_flags & mask)
sync = 1;
spin_unlock(&dq_data_lock);
/* This is a slight hack but we can't afford getting global quota
* lock if we already have a transaction started. */
if (!sync || journal_current_handle()) {
status = ocfs2_write_dquot(dquot);
goto out;
}
status = ocfs2_lock_global_qf(oinfo, 1);
if (status < 0)
goto out;
handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS);
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
mlog_errno(status);
goto out_ilock;
}
mutex_lock(&sb_dqopt(sb)->dqio_mutex);
status = ocfs2_sync_dquot(dquot);
if (status < 0) {
mlog_errno(status);
goto out_dlock;
}
/* Now write updated local dquot structure */
status = ocfs2_local_write_dquot(dquot);
out_dlock:
mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
ocfs2_commit_trans(osb, handle);
out_ilock:
ocfs2_unlock_global_qf(oinfo, 1);
out:
if (status)
mlog_errno(status);
return status;
}
/* This should happen only after set_dqinfo(). */
static int ocfs2_write_info(struct super_block *sb, int type)
{
handle_t *handle;
int status = 0;
struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv;
status = ocfs2_lock_global_qf(oinfo, 1);
if (status < 0)
goto out;
handle = ocfs2_start_trans(OCFS2_SB(sb), OCFS2_QINFO_WRITE_CREDITS);
if (IS_ERR(handle)) {
status = PTR_ERR(handle);
mlog_errno(status);
goto out_ilock;
}
status = dquot_commit_info(sb, type);
ocfs2_commit_trans(OCFS2_SB(sb), handle);
out_ilock:
ocfs2_unlock_global_qf(oinfo, 1);
out:
if (status)
mlog_errno(status);
return status;
}
static struct dquot *ocfs2_alloc_dquot(struct super_block *sb, int type)
{
struct ocfs2_dquot *dquot =
kmem_cache_zalloc(ocfs2_dquot_cachep, GFP_NOFS);
if (!dquot)
return NULL;
return &dquot->dq_dquot;
}
static void ocfs2_destroy_dquot(struct dquot *dquot)
{
kmem_cache_free(ocfs2_dquot_cachep, dquot);
}
const struct dquot_operations ocfs2_quota_operations = {
/* We never make dquot dirty so .write_dquot is never called */
.acquire_dquot = ocfs2_acquire_dquot,
.release_dquot = ocfs2_release_dquot,
.mark_dirty = ocfs2_mark_dquot_dirty,
.write_info = ocfs2_write_info,
.alloc_dquot = ocfs2_alloc_dquot,
.destroy_dquot = ocfs2_destroy_dquot,
};
| gpl-2.0 |
davidmueller13/david_kernel_aosp_flo_6.0 | Documentation/vDSO/vdso_test.c | 8413 | 2488 | /*
* vdso_test.c: Sample code to test parse_vdso.c on x86_64
* Copyright (c) 2011 Andy Lutomirski
* Subject to the GNU General Public License, version 2
*
* You can amuse yourself by compiling with:
* gcc -std=gnu99 -nostdlib
* -Os -fno-asynchronous-unwind-tables -flto
* vdso_test.c parse_vdso.c -o vdso_test
* to generate a small binary with no dependencies at all.
*/
#include <sys/syscall.h>
#include <sys/time.h>
#include <unistd.h>
#include <stdint.h>
extern void *vdso_sym(const char *version, const char *name);
extern void vdso_init_from_sysinfo_ehdr(uintptr_t base);
extern void vdso_init_from_auxv(void *auxv);
/* We need a libc functions... */
int strcmp(const char *a, const char *b)
{
/* This implementation is buggy: it never returns -1. */
while (*a || *b) {
if (*a != *b)
return 1;
if (*a == 0 || *b == 0)
return 1;
a++;
b++;
}
return 0;
}
/* ...and two syscalls. This is x86_64-specific. */
static inline long linux_write(int fd, const void *data, size_t len)
{
long ret;
asm volatile ("syscall" : "=a" (ret) : "a" (__NR_write),
"D" (fd), "S" (data), "d" (len) :
"cc", "memory", "rcx",
"r8", "r9", "r10", "r11" );
return ret;
}
static inline void linux_exit(int code)
{
asm volatile ("syscall" : : "a" (__NR_exit), "D" (code));
}
void to_base10(char *lastdig, uint64_t n)
{
while (n) {
*lastdig = (n % 10) + '0';
n /= 10;
lastdig--;
}
}
__attribute__((externally_visible)) void c_main(void **stack)
{
/* Parse the stack */
long argc = (long)*stack;
stack += argc + 2;
/* Now we're pointing at the environment. Skip it. */
while(*stack)
stack++;
stack++;
/* Now we're pointing at auxv. Initialize the vDSO parser. */
vdso_init_from_auxv((void *)stack);
/* Find gettimeofday. */
typedef long (*gtod_t)(struct timeval *tv, struct timezone *tz);
gtod_t gtod = (gtod_t)vdso_sym("LINUX_2.6", "__vdso_gettimeofday");
if (!gtod)
linux_exit(1);
struct timeval tv;
long ret = gtod(&tv, 0);
if (ret == 0) {
char buf[] = "The time is .000000\n";
to_base10(buf + 31, tv.tv_sec);
to_base10(buf + 38, tv.tv_usec);
linux_write(1, buf, sizeof(buf) - 1);
} else {
linux_exit(ret);
}
linux_exit(0);
}
/*
* This is the real entry point. It passes the initial stack into
* the C entry point.
*/
asm (
".text\n"
".global _start\n"
".type _start,@function\n"
"_start:\n\t"
"mov %rsp,%rdi\n\t"
"jmp c_main"
);
| gpl-2.0 |
faux123/HTC-Endeavoru | fs/affs/symlink.c | 12509 | 1737 | /*
* linux/fs/affs/symlink.c
*
* 1995 Hans-Joachim Widmaier - Modified for affs.
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* affs symlink handling code
*/
#include "affs.h"
static int affs_symlink_readpage(struct file *file, struct page *page)
{
struct buffer_head *bh;
struct inode *inode = page->mapping->host;
char *link = kmap(page);
struct slink_front *lf;
int err;
int i, j;
char c;
char lc;
pr_debug("AFFS: follow_link(ino=%lu)\n",inode->i_ino);
err = -EIO;
bh = affs_bread(inode->i_sb, inode->i_ino);
if (!bh)
goto fail;
i = 0;
j = 0;
lf = (struct slink_front *)bh->b_data;
lc = 0;
if (strchr(lf->symname,':')) { /* Handle assign or volume name */
struct affs_sb_info *sbi = AFFS_SB(inode->i_sb);
char *pf;
spin_lock(&sbi->symlink_lock);
pf = sbi->s_prefix ? sbi->s_prefix : "/";
while (i < 1023 && (c = pf[i]))
link[i++] = c;
spin_unlock(&sbi->symlink_lock);
while (i < 1023 && lf->symname[j] != ':')
link[i++] = lf->symname[j++];
if (i < 1023)
link[i++] = '/';
j++;
lc = '/';
}
while (i < 1023 && (c = lf->symname[j])) {
if (c == '/' && lc == '/' && i < 1020) { /* parent dir */
link[i++] = '.';
link[i++] = '.';
}
link[i++] = c;
lc = c;
j++;
}
link[i] = '\0';
affs_brelse(bh);
SetPageUptodate(page);
kunmap(page);
unlock_page(page);
return 0;
fail:
SetPageError(page);
kunmap(page);
unlock_page(page);
return err;
}
const struct address_space_operations affs_symlink_aops = {
.readpage = affs_symlink_readpage,
};
const struct inode_operations affs_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = page_follow_link_light,
.put_link = page_put_link,
.setattr = affs_notify_change,
};
| gpl-2.0 |
getitnowmarketing/Incrediblec-mr3 | arch/sparc/kernel/prom_irqtrans.c | 1502 | 21933 | #include <linux/kernel.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <asm/oplib.h>
#include <asm/prom.h>
#include <asm/irq.h>
#include <asm/upa.h>
#include "prom.h"
#ifdef CONFIG_PCI
/* PSYCHO interrupt mapping support. */
#define PSYCHO_IMAP_A_SLOT0 0x0c00UL
#define PSYCHO_IMAP_B_SLOT0 0x0c20UL
static unsigned long psycho_pcislot_imap_offset(unsigned long ino)
{
unsigned int bus = (ino & 0x10) >> 4;
unsigned int slot = (ino & 0x0c) >> 2;
if (bus == 0)
return PSYCHO_IMAP_A_SLOT0 + (slot * 8);
else
return PSYCHO_IMAP_B_SLOT0 + (slot * 8);
}
#define PSYCHO_OBIO_IMAP_BASE 0x1000UL
#define PSYCHO_ONBOARD_IRQ_BASE 0x20
#define psycho_onboard_imap_offset(__ino) \
(PSYCHO_OBIO_IMAP_BASE + (((__ino) & 0x1f) << 3))
#define PSYCHO_ICLR_A_SLOT0 0x1400UL
#define PSYCHO_ICLR_SCSI 0x1800UL
#define psycho_iclr_offset(ino) \
((ino & 0x20) ? (PSYCHO_ICLR_SCSI + (((ino) & 0x1f) << 3)) : \
(PSYCHO_ICLR_A_SLOT0 + (((ino) & 0x1f)<<3)))
static unsigned int psycho_irq_build(struct device_node *dp,
unsigned int ino,
void *_data)
{
unsigned long controller_regs = (unsigned long) _data;
unsigned long imap, iclr;
unsigned long imap_off, iclr_off;
int inofixup = 0;
ino &= 0x3f;
if (ino < PSYCHO_ONBOARD_IRQ_BASE) {
/* PCI slot */
imap_off = psycho_pcislot_imap_offset(ino);
} else {
/* Onboard device */
imap_off = psycho_onboard_imap_offset(ino);
}
/* Now build the IRQ bucket. */
imap = controller_regs + imap_off;
iclr_off = psycho_iclr_offset(ino);
iclr = controller_regs + iclr_off;
if ((ino & 0x20) == 0)
inofixup = ino & 0x03;
return build_irq(inofixup, iclr, imap);
}
static void __init psycho_irq_trans_init(struct device_node *dp)
{
const struct linux_prom64_registers *regs;
dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
dp->irq_trans->irq_build = psycho_irq_build;
regs = of_get_property(dp, "reg", NULL);
dp->irq_trans->data = (void *) regs[2].phys_addr;
}
#define sabre_read(__reg) \
({ u64 __ret; \
__asm__ __volatile__("ldxa [%1] %2, %0" \
: "=r" (__ret) \
: "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
: "memory"); \
__ret; \
})
struct sabre_irq_data {
unsigned long controller_regs;
unsigned int pci_first_busno;
};
#define SABRE_CONFIGSPACE 0x001000000UL
#define SABRE_WRSYNC 0x1c20UL
#define SABRE_CONFIG_BASE(CONFIG_SPACE) \
(CONFIG_SPACE | (1UL << 24))
#define SABRE_CONFIG_ENCODE(BUS, DEVFN, REG) \
(((unsigned long)(BUS) << 16) | \
((unsigned long)(DEVFN) << 8) | \
((unsigned long)(REG)))
/* When a device lives behind a bridge deeper in the PCI bus topology
* than APB, a special sequence must run to make sure all pending DMA
* transfers at the time of IRQ delivery are visible in the coherency
* domain by the cpu. This sequence is to perform a read on the far
* side of the non-APB bridge, then perform a read of Sabre's DMA
* write-sync register.
*/
static void sabre_wsync_handler(unsigned int ino, void *_arg1, void *_arg2)
{
unsigned int phys_hi = (unsigned int) (unsigned long) _arg1;
struct sabre_irq_data *irq_data = _arg2;
unsigned long controller_regs = irq_data->controller_regs;
unsigned long sync_reg = controller_regs + SABRE_WRSYNC;
unsigned long config_space = controller_regs + SABRE_CONFIGSPACE;
unsigned int bus, devfn;
u16 _unused;
config_space = SABRE_CONFIG_BASE(config_space);
bus = (phys_hi >> 16) & 0xff;
devfn = (phys_hi >> 8) & 0xff;
config_space |= SABRE_CONFIG_ENCODE(bus, devfn, 0x00);
__asm__ __volatile__("membar #Sync\n\t"
"lduha [%1] %2, %0\n\t"
"membar #Sync"
: "=r" (_unused)
: "r" ((u16 *) config_space),
"i" (ASI_PHYS_BYPASS_EC_E_L)
: "memory");
sabre_read(sync_reg);
}
#define SABRE_IMAP_A_SLOT0 0x0c00UL
#define SABRE_IMAP_B_SLOT0 0x0c20UL
#define SABRE_ICLR_A_SLOT0 0x1400UL
#define SABRE_ICLR_B_SLOT0 0x1480UL
#define SABRE_ICLR_SCSI 0x1800UL
#define SABRE_ICLR_ETH 0x1808UL
#define SABRE_ICLR_BPP 0x1810UL
#define SABRE_ICLR_AU_REC 0x1818UL
#define SABRE_ICLR_AU_PLAY 0x1820UL
#define SABRE_ICLR_PFAIL 0x1828UL
#define SABRE_ICLR_KMS 0x1830UL
#define SABRE_ICLR_FLPY 0x1838UL
#define SABRE_ICLR_SHW 0x1840UL
#define SABRE_ICLR_KBD 0x1848UL
#define SABRE_ICLR_MS 0x1850UL
#define SABRE_ICLR_SER 0x1858UL
#define SABRE_ICLR_UE 0x1870UL
#define SABRE_ICLR_CE 0x1878UL
#define SABRE_ICLR_PCIERR 0x1880UL
static unsigned long sabre_pcislot_imap_offset(unsigned long ino)
{
unsigned int bus = (ino & 0x10) >> 4;
unsigned int slot = (ino & 0x0c) >> 2;
if (bus == 0)
return SABRE_IMAP_A_SLOT0 + (slot * 8);
else
return SABRE_IMAP_B_SLOT0 + (slot * 8);
}
#define SABRE_OBIO_IMAP_BASE 0x1000UL
#define SABRE_ONBOARD_IRQ_BASE 0x20
#define sabre_onboard_imap_offset(__ino) \
(SABRE_OBIO_IMAP_BASE + (((__ino) & 0x1f) << 3))
#define sabre_iclr_offset(ino) \
((ino & 0x20) ? (SABRE_ICLR_SCSI + (((ino) & 0x1f) << 3)) : \
(SABRE_ICLR_A_SLOT0 + (((ino) & 0x1f)<<3)))
static int sabre_device_needs_wsync(struct device_node *dp)
{
struct device_node *parent = dp->parent;
const char *parent_model, *parent_compat;
/* This traversal up towards the root is meant to
* handle two cases:
*
* 1) non-PCI bus sitting under PCI, such as 'ebus'
* 2) the PCI controller interrupts themselves, which
* will use the sabre_irq_build but do not need
* the DMA synchronization handling
*/
while (parent) {
if (!strcmp(parent->type, "pci"))
break;
parent = parent->parent;
}
if (!parent)
return 0;
parent_model = of_get_property(parent,
"model", NULL);
if (parent_model &&
(!strcmp(parent_model, "SUNW,sabre") ||
!strcmp(parent_model, "SUNW,simba")))
return 0;
parent_compat = of_get_property(parent,
"compatible", NULL);
if (parent_compat &&
(!strcmp(parent_compat, "pci108e,a000") ||
!strcmp(parent_compat, "pci108e,a001")))
return 0;
return 1;
}
static unsigned int sabre_irq_build(struct device_node *dp,
unsigned int ino,
void *_data)
{
struct sabre_irq_data *irq_data = _data;
unsigned long controller_regs = irq_data->controller_regs;
const struct linux_prom_pci_registers *regs;
unsigned long imap, iclr;
unsigned long imap_off, iclr_off;
int inofixup = 0;
int virt_irq;
ino &= 0x3f;
if (ino < SABRE_ONBOARD_IRQ_BASE) {
/* PCI slot */
imap_off = sabre_pcislot_imap_offset(ino);
} else {
/* onboard device */
imap_off = sabre_onboard_imap_offset(ino);
}
/* Now build the IRQ bucket. */
imap = controller_regs + imap_off;
iclr_off = sabre_iclr_offset(ino);
iclr = controller_regs + iclr_off;
if ((ino & 0x20) == 0)
inofixup = ino & 0x03;
virt_irq = build_irq(inofixup, iclr, imap);
/* If the parent device is a PCI<->PCI bridge other than
* APB, we have to install a pre-handler to ensure that
* all pending DMA is drained before the interrupt handler
* is run.
*/
regs = of_get_property(dp, "reg", NULL);
if (regs && sabre_device_needs_wsync(dp)) {
irq_install_pre_handler(virt_irq,
sabre_wsync_handler,
(void *) (long) regs->phys_hi,
(void *) irq_data);
}
return virt_irq;
}
static void __init sabre_irq_trans_init(struct device_node *dp)
{
const struct linux_prom64_registers *regs;
struct sabre_irq_data *irq_data;
const u32 *busrange;
dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
dp->irq_trans->irq_build = sabre_irq_build;
irq_data = prom_early_alloc(sizeof(struct sabre_irq_data));
regs = of_get_property(dp, "reg", NULL);
irq_data->controller_regs = regs[0].phys_addr;
busrange = of_get_property(dp, "bus-range", NULL);
irq_data->pci_first_busno = busrange[0];
dp->irq_trans->data = irq_data;
}
/* SCHIZO interrupt mapping support. Unlike Psycho, for this controller the
* imap/iclr registers are per-PBM.
*/
#define SCHIZO_IMAP_BASE 0x1000UL
#define SCHIZO_ICLR_BASE 0x1400UL
static unsigned long schizo_imap_offset(unsigned long ino)
{
return SCHIZO_IMAP_BASE + (ino * 8UL);
}
static unsigned long schizo_iclr_offset(unsigned long ino)
{
return SCHIZO_ICLR_BASE + (ino * 8UL);
}
static unsigned long schizo_ino_to_iclr(unsigned long pbm_regs,
unsigned int ino)
{
return pbm_regs + schizo_iclr_offset(ino);
}
static unsigned long schizo_ino_to_imap(unsigned long pbm_regs,
unsigned int ino)
{
return pbm_regs + schizo_imap_offset(ino);
}
#define schizo_read(__reg) \
({ u64 __ret; \
__asm__ __volatile__("ldxa [%1] %2, %0" \
: "=r" (__ret) \
: "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
: "memory"); \
__ret; \
})
#define schizo_write(__reg, __val) \
__asm__ __volatile__("stxa %0, [%1] %2" \
: /* no outputs */ \
: "r" (__val), "r" (__reg), \
"i" (ASI_PHYS_BYPASS_EC_E) \
: "memory")
static void tomatillo_wsync_handler(unsigned int ino, void *_arg1, void *_arg2)
{
unsigned long sync_reg = (unsigned long) _arg2;
u64 mask = 1UL << (ino & IMAP_INO);
u64 val;
int limit;
schizo_write(sync_reg, mask);
limit = 100000;
val = 0;
while (--limit) {
val = schizo_read(sync_reg);
if (!(val & mask))
break;
}
if (limit <= 0) {
printk("tomatillo_wsync_handler: DMA won't sync [%llx:%llx]\n",
val, mask);
}
if (_arg1) {
static unsigned char cacheline[64]
__attribute__ ((aligned (64)));
__asm__ __volatile__("rd %%fprs, %0\n\t"
"or %0, %4, %1\n\t"
"wr %1, 0x0, %%fprs\n\t"
"stda %%f0, [%5] %6\n\t"
"wr %0, 0x0, %%fprs\n\t"
"membar #Sync"
: "=&r" (mask), "=&r" (val)
: "0" (mask), "1" (val),
"i" (FPRS_FEF), "r" (&cacheline[0]),
"i" (ASI_BLK_COMMIT_P));
}
}
struct schizo_irq_data {
unsigned long pbm_regs;
unsigned long sync_reg;
u32 portid;
int chip_version;
};
static unsigned int schizo_irq_build(struct device_node *dp,
unsigned int ino,
void *_data)
{
struct schizo_irq_data *irq_data = _data;
unsigned long pbm_regs = irq_data->pbm_regs;
unsigned long imap, iclr;
int ign_fixup;
int virt_irq;
int is_tomatillo;
ino &= 0x3f;
/* Now build the IRQ bucket. */
imap = schizo_ino_to_imap(pbm_regs, ino);
iclr = schizo_ino_to_iclr(pbm_regs, ino);
/* On Schizo, no inofixup occurs. This is because each
* INO has it's own IMAP register. On Psycho and Sabre
* there is only one IMAP register for each PCI slot even
* though four different INOs can be generated by each
* PCI slot.
*
* But, for JBUS variants (essentially, Tomatillo), we have
* to fixup the lowest bit of the interrupt group number.
*/
ign_fixup = 0;
is_tomatillo = (irq_data->sync_reg != 0UL);
if (is_tomatillo) {
if (irq_data->portid & 1)
ign_fixup = (1 << 6);
}
virt_irq = build_irq(ign_fixup, iclr, imap);
if (is_tomatillo) {
irq_install_pre_handler(virt_irq,
tomatillo_wsync_handler,
((irq_data->chip_version <= 4) ?
(void *) 1 : (void *) 0),
(void *) irq_data->sync_reg);
}
return virt_irq;
}
static void __init __schizo_irq_trans_init(struct device_node *dp,
int is_tomatillo)
{
const struct linux_prom64_registers *regs;
struct schizo_irq_data *irq_data;
dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
dp->irq_trans->irq_build = schizo_irq_build;
irq_data = prom_early_alloc(sizeof(struct schizo_irq_data));
regs = of_get_property(dp, "reg", NULL);
dp->irq_trans->data = irq_data;
irq_data->pbm_regs = regs[0].phys_addr;
if (is_tomatillo)
irq_data->sync_reg = regs[3].phys_addr + 0x1a18UL;
else
irq_data->sync_reg = 0UL;
irq_data->portid = of_getintprop_default(dp, "portid", 0);
irq_data->chip_version = of_getintprop_default(dp, "version#", 0);
}
static void __init schizo_irq_trans_init(struct device_node *dp)
{
__schizo_irq_trans_init(dp, 0);
}
static void __init tomatillo_irq_trans_init(struct device_node *dp)
{
__schizo_irq_trans_init(dp, 1);
}
static unsigned int pci_sun4v_irq_build(struct device_node *dp,
unsigned int devino,
void *_data)
{
u32 devhandle = (u32) (unsigned long) _data;
return sun4v_build_irq(devhandle, devino);
}
static void __init pci_sun4v_irq_trans_init(struct device_node *dp)
{
const struct linux_prom64_registers *regs;
dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
dp->irq_trans->irq_build = pci_sun4v_irq_build;
regs = of_get_property(dp, "reg", NULL);
dp->irq_trans->data = (void *) (unsigned long)
((regs->phys_addr >> 32UL) & 0x0fffffff);
}
struct fire_irq_data {
unsigned long pbm_regs;
u32 portid;
};
#define FIRE_IMAP_BASE 0x001000
#define FIRE_ICLR_BASE 0x001400
static unsigned long fire_imap_offset(unsigned long ino)
{
return FIRE_IMAP_BASE + (ino * 8UL);
}
static unsigned long fire_iclr_offset(unsigned long ino)
{
return FIRE_ICLR_BASE + (ino * 8UL);
}
static unsigned long fire_ino_to_iclr(unsigned long pbm_regs,
unsigned int ino)
{
return pbm_regs + fire_iclr_offset(ino);
}
static unsigned long fire_ino_to_imap(unsigned long pbm_regs,
unsigned int ino)
{
return pbm_regs + fire_imap_offset(ino);
}
static unsigned int fire_irq_build(struct device_node *dp,
unsigned int ino,
void *_data)
{
struct fire_irq_data *irq_data = _data;
unsigned long pbm_regs = irq_data->pbm_regs;
unsigned long imap, iclr;
unsigned long int_ctrlr;
ino &= 0x3f;
/* Now build the IRQ bucket. */
imap = fire_ino_to_imap(pbm_regs, ino);
iclr = fire_ino_to_iclr(pbm_regs, ino);
/* Set the interrupt controller number. */
int_ctrlr = 1 << 6;
upa_writeq(int_ctrlr, imap);
/* The interrupt map registers do not have an INO field
* like other chips do. They return zero in the INO
* field, and the interrupt controller number is controlled
* in bits 6 to 9. So in order for build_irq() to get
* the INO right we pass it in as part of the fixup
* which will get added to the map register zero value
* read by build_irq().
*/
ino |= (irq_data->portid << 6);
ino -= int_ctrlr;
return build_irq(ino, iclr, imap);
}
static void __init fire_irq_trans_init(struct device_node *dp)
{
const struct linux_prom64_registers *regs;
struct fire_irq_data *irq_data;
dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
dp->irq_trans->irq_build = fire_irq_build;
irq_data = prom_early_alloc(sizeof(struct fire_irq_data));
regs = of_get_property(dp, "reg", NULL);
dp->irq_trans->data = irq_data;
irq_data->pbm_regs = regs[0].phys_addr;
irq_data->portid = of_getintprop_default(dp, "portid", 0);
}
#endif /* CONFIG_PCI */
#ifdef CONFIG_SBUS
/* INO number to IMAP register offset for SYSIO external IRQ's.
* This should conform to both Sunfire/Wildfire server and Fusion
* desktop designs.
*/
#define SYSIO_IMAP_SLOT0 0x2c00UL
#define SYSIO_IMAP_SLOT1 0x2c08UL
#define SYSIO_IMAP_SLOT2 0x2c10UL
#define SYSIO_IMAP_SLOT3 0x2c18UL
#define SYSIO_IMAP_SCSI 0x3000UL
#define SYSIO_IMAP_ETH 0x3008UL
#define SYSIO_IMAP_BPP 0x3010UL
#define SYSIO_IMAP_AUDIO 0x3018UL
#define SYSIO_IMAP_PFAIL 0x3020UL
#define SYSIO_IMAP_KMS 0x3028UL
#define SYSIO_IMAP_FLPY 0x3030UL
#define SYSIO_IMAP_SHW 0x3038UL
#define SYSIO_IMAP_KBD 0x3040UL
#define SYSIO_IMAP_MS 0x3048UL
#define SYSIO_IMAP_SER 0x3050UL
#define SYSIO_IMAP_TIM0 0x3060UL
#define SYSIO_IMAP_TIM1 0x3068UL
#define SYSIO_IMAP_UE 0x3070UL
#define SYSIO_IMAP_CE 0x3078UL
#define SYSIO_IMAP_SBERR 0x3080UL
#define SYSIO_IMAP_PMGMT 0x3088UL
#define SYSIO_IMAP_GFX 0x3090UL
#define SYSIO_IMAP_EUPA 0x3098UL
#define bogon ((unsigned long) -1)
static unsigned long sysio_irq_offsets[] = {
/* SBUS Slot 0 --> 3, level 1 --> 7 */
SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
/* Onboard devices (not relevant/used on SunFire). */
SYSIO_IMAP_SCSI,
SYSIO_IMAP_ETH,
SYSIO_IMAP_BPP,
bogon,
SYSIO_IMAP_AUDIO,
SYSIO_IMAP_PFAIL,
bogon,
bogon,
SYSIO_IMAP_KMS,
SYSIO_IMAP_FLPY,
SYSIO_IMAP_SHW,
SYSIO_IMAP_KBD,
SYSIO_IMAP_MS,
SYSIO_IMAP_SER,
bogon,
bogon,
SYSIO_IMAP_TIM0,
SYSIO_IMAP_TIM1,
bogon,
bogon,
SYSIO_IMAP_UE,
SYSIO_IMAP_CE,
SYSIO_IMAP_SBERR,
SYSIO_IMAP_PMGMT,
SYSIO_IMAP_GFX,
SYSIO_IMAP_EUPA,
};
#undef bogon
#define NUM_SYSIO_OFFSETS ARRAY_SIZE(sysio_irq_offsets)
/* Convert Interrupt Mapping register pointer to associated
* Interrupt Clear register pointer, SYSIO specific version.
*/
#define SYSIO_ICLR_UNUSED0 0x3400UL
#define SYSIO_ICLR_SLOT0 0x3408UL
#define SYSIO_ICLR_SLOT1 0x3448UL
#define SYSIO_ICLR_SLOT2 0x3488UL
#define SYSIO_ICLR_SLOT3 0x34c8UL
static unsigned long sysio_imap_to_iclr(unsigned long imap)
{
unsigned long diff = SYSIO_ICLR_UNUSED0 - SYSIO_IMAP_SLOT0;
return imap + diff;
}
static unsigned int sbus_of_build_irq(struct device_node *dp,
unsigned int ino,
void *_data)
{
unsigned long reg_base = (unsigned long) _data;
const struct linux_prom_registers *regs;
unsigned long imap, iclr;
int sbus_slot = 0;
int sbus_level = 0;
ino &= 0x3f;
regs = of_get_property(dp, "reg", NULL);
if (regs)
sbus_slot = regs->which_io;
if (ino < 0x20)
ino += (sbus_slot * 8);
imap = sysio_irq_offsets[ino];
if (imap == ((unsigned long)-1)) {
prom_printf("get_irq_translations: Bad SYSIO INO[%x]\n",
ino);
prom_halt();
}
imap += reg_base;
/* SYSIO inconsistency. For external SLOTS, we have to select
* the right ICLR register based upon the lower SBUS irq level
* bits.
*/
if (ino >= 0x20) {
iclr = sysio_imap_to_iclr(imap);
} else {
sbus_level = ino & 0x7;
switch(sbus_slot) {
case 0:
iclr = reg_base + SYSIO_ICLR_SLOT0;
break;
case 1:
iclr = reg_base + SYSIO_ICLR_SLOT1;
break;
case 2:
iclr = reg_base + SYSIO_ICLR_SLOT2;
break;
default:
case 3:
iclr = reg_base + SYSIO_ICLR_SLOT3;
break;
};
iclr += ((unsigned long)sbus_level - 1UL) * 8UL;
}
return build_irq(sbus_level, iclr, imap);
}
static void __init sbus_irq_trans_init(struct device_node *dp)
{
const struct linux_prom64_registers *regs;
dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
dp->irq_trans->irq_build = sbus_of_build_irq;
regs = of_get_property(dp, "reg", NULL);
dp->irq_trans->data = (void *) (unsigned long) regs->phys_addr;
}
#endif /* CONFIG_SBUS */
static unsigned int central_build_irq(struct device_node *dp,
unsigned int ino,
void *_data)
{
struct device_node *central_dp = _data;
struct of_device *central_op = of_find_device_by_node(central_dp);
struct resource *res;
unsigned long imap, iclr;
u32 tmp;
if (!strcmp(dp->name, "eeprom")) {
res = ¢ral_op->resource[5];
} else if (!strcmp(dp->name, "zs")) {
res = ¢ral_op->resource[4];
} else if (!strcmp(dp->name, "clock-board")) {
res = ¢ral_op->resource[3];
} else {
return ino;
}
imap = res->start + 0x00UL;
iclr = res->start + 0x10UL;
/* Set the INO state to idle, and disable. */
upa_writel(0, iclr);
upa_readl(iclr);
tmp = upa_readl(imap);
tmp &= ~0x80000000;
upa_writel(tmp, imap);
return build_irq(0, iclr, imap);
}
static void __init central_irq_trans_init(struct device_node *dp)
{
dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
dp->irq_trans->irq_build = central_build_irq;
dp->irq_trans->data = dp;
}
struct irq_trans {
const char *name;
void (*init)(struct device_node *);
};
#ifdef CONFIG_PCI
static struct irq_trans __initdata pci_irq_trans_table[] = {
{ "SUNW,sabre", sabre_irq_trans_init },
{ "pci108e,a000", sabre_irq_trans_init },
{ "pci108e,a001", sabre_irq_trans_init },
{ "SUNW,psycho", psycho_irq_trans_init },
{ "pci108e,8000", psycho_irq_trans_init },
{ "SUNW,schizo", schizo_irq_trans_init },
{ "pci108e,8001", schizo_irq_trans_init },
{ "SUNW,schizo+", schizo_irq_trans_init },
{ "pci108e,8002", schizo_irq_trans_init },
{ "SUNW,tomatillo", tomatillo_irq_trans_init },
{ "pci108e,a801", tomatillo_irq_trans_init },
{ "SUNW,sun4v-pci", pci_sun4v_irq_trans_init },
{ "pciex108e,80f0", fire_irq_trans_init },
};
#endif
static unsigned int sun4v_vdev_irq_build(struct device_node *dp,
unsigned int devino,
void *_data)
{
u32 devhandle = (u32) (unsigned long) _data;
return sun4v_build_irq(devhandle, devino);
}
static void __init sun4v_vdev_irq_trans_init(struct device_node *dp)
{
const struct linux_prom64_registers *regs;
dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
dp->irq_trans->irq_build = sun4v_vdev_irq_build;
regs = of_get_property(dp, "reg", NULL);
dp->irq_trans->data = (void *) (unsigned long)
((regs->phys_addr >> 32UL) & 0x0fffffff);
}
void __init irq_trans_init(struct device_node *dp)
{
#ifdef CONFIG_PCI
const char *model;
int i;
#endif
#ifdef CONFIG_PCI
model = of_get_property(dp, "model", NULL);
if (!model)
model = of_get_property(dp, "compatible", NULL);
if (model) {
for (i = 0; i < ARRAY_SIZE(pci_irq_trans_table); i++) {
struct irq_trans *t = &pci_irq_trans_table[i];
if (!strcmp(model, t->name)) {
t->init(dp);
return;
}
}
}
#endif
#ifdef CONFIG_SBUS
if (!strcmp(dp->name, "sbus") ||
!strcmp(dp->name, "sbi")) {
sbus_irq_trans_init(dp);
return;
}
#endif
if (!strcmp(dp->name, "fhc") &&
!strcmp(dp->parent->name, "central")) {
central_irq_trans_init(dp);
return;
}
if (!strcmp(dp->name, "virtual-devices") ||
!strcmp(dp->name, "niu")) {
sun4v_vdev_irq_trans_init(dp);
return;
}
}
| gpl-2.0 |
vic3t3chn0/Samsung_Wave_Kernel_SD_NAND | arch/arm/mach-omap2/powerdomains2xxx_data.c | 2526 | 2777 | /*
* OMAP2XXX powerdomain definitions
*
* Copyright (C) 2007-2008 Texas Instruments, Inc.
* Copyright (C) 2007-2011 Nokia Corporation
*
* Paul Walmsley, Jouni Högander
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include "powerdomain.h"
#include "powerdomains2xxx_3xxx_data.h"
#include "prcm-common.h"
#include "prm2xxx_3xxx.h"
#include "prm-regbits-24xx.h"
/* 24XX powerdomains and dependencies */
/* Powerdomains */
static struct powerdomain dsp_pwrdm = {
.name = "dsp_pwrdm",
.prcm_offs = OMAP24XX_DSP_MOD,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX),
.pwrsts = PWRSTS_OFF_RET_ON,
.pwrsts_logic_ret = PWRSTS_RET,
.banks = 1,
.pwrsts_mem_ret = {
[0] = PWRSTS_RET,
},
.pwrsts_mem_on = {
[0] = PWRSTS_ON,
},
};
static struct powerdomain mpu_24xx_pwrdm = {
.name = "mpu_pwrdm",
.prcm_offs = MPU_MOD,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX),
.pwrsts = PWRSTS_OFF_RET_ON,
.pwrsts_logic_ret = PWRSTS_OFF_RET,
.banks = 1,
.pwrsts_mem_ret = {
[0] = PWRSTS_RET,
},
.pwrsts_mem_on = {
[0] = PWRSTS_ON,
},
};
static struct powerdomain core_24xx_pwrdm = {
.name = "core_pwrdm",
.prcm_offs = CORE_MOD,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP24XX),
.pwrsts = PWRSTS_OFF_RET_ON,
.banks = 3,
.pwrsts_mem_ret = {
[0] = PWRSTS_OFF_RET, /* MEM1RETSTATE */
[1] = PWRSTS_OFF_RET, /* MEM2RETSTATE */
[2] = PWRSTS_OFF_RET, /* MEM3RETSTATE */
},
.pwrsts_mem_on = {
[0] = PWRSTS_OFF_RET_ON, /* MEM1ONSTATE */
[1] = PWRSTS_OFF_RET_ON, /* MEM2ONSTATE */
[2] = PWRSTS_OFF_RET_ON, /* MEM3ONSTATE */
},
};
/*
* 2430-specific powerdomains
*/
#ifdef CONFIG_SOC_OMAP2430
/* XXX 2430 KILLDOMAINWKUP bit? No current users apparently */
static struct powerdomain mdm_pwrdm = {
.name = "mdm_pwrdm",
.prcm_offs = OMAP2430_MDM_MOD,
.omap_chip = OMAP_CHIP_INIT(CHIP_IS_OMAP2430),
.pwrsts = PWRSTS_OFF_RET_ON,
.pwrsts_logic_ret = PWRSTS_RET,
.banks = 1,
.pwrsts_mem_ret = {
[0] = PWRSTS_RET, /* MEMRETSTATE */
},
.pwrsts_mem_on = {
[0] = PWRSTS_ON, /* MEMONSTATE */
},
};
#endif /* CONFIG_SOC_OMAP2430 */
/* As powerdomains are added or removed above, this list must also be changed */
static struct powerdomain *powerdomains_omap2xxx[] __initdata = {
&wkup_omap2_pwrdm,
&gfx_omap2_pwrdm,
#ifdef CONFIG_ARCH_OMAP2
&dsp_pwrdm,
&mpu_24xx_pwrdm,
&core_24xx_pwrdm,
#endif
#ifdef CONFIG_SOC_OMAP2430
&mdm_pwrdm,
#endif
NULL
};
void __init omap2xxx_powerdomains_init(void)
{
pwrdm_init(powerdomains_omap2xxx, &omap2_pwrdm_operations);
}
| gpl-2.0 |
goodhanrry/n9200_goodhanrry_kernel | arch/mips/jz4740/irq.c | 3038 | 4022 | /*
* Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
* JZ4740 platform IRQ support
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/timex.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <asm/io.h>
#include <asm/mipsregs.h>
#include <asm/irq_cpu.h>
#include <asm/mach-jz4740/base.h>
static void __iomem *jz_intc_base;
#define JZ_REG_INTC_STATUS 0x00
#define JZ_REG_INTC_MASK 0x04
#define JZ_REG_INTC_SET_MASK 0x08
#define JZ_REG_INTC_CLEAR_MASK 0x0c
#define JZ_REG_INTC_PENDING 0x10
static irqreturn_t jz4740_cascade(int irq, void *data)
{
uint32_t irq_reg;
irq_reg = readl(jz_intc_base + JZ_REG_INTC_PENDING);
if (irq_reg)
generic_handle_irq(__fls(irq_reg) + JZ4740_IRQ_BASE);
return IRQ_HANDLED;
}
static void jz4740_irq_set_mask(struct irq_chip_generic *gc, uint32_t mask)
{
struct irq_chip_regs *regs = &gc->chip_types->regs;
writel(mask, gc->reg_base + regs->enable);
writel(~mask, gc->reg_base + regs->disable);
}
void jz4740_irq_suspend(struct irq_data *data)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
jz4740_irq_set_mask(gc, gc->wake_active);
}
void jz4740_irq_resume(struct irq_data *data)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
jz4740_irq_set_mask(gc, gc->mask_cache);
}
static struct irqaction jz4740_cascade_action = {
.handler = jz4740_cascade,
.name = "JZ4740 cascade interrupt",
};
void __init arch_init_irq(void)
{
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
mips_cpu_irq_init();
jz_intc_base = ioremap(JZ4740_INTC_BASE_ADDR, 0x14);
/* Mask all irqs */
writel(0xffffffff, jz_intc_base + JZ_REG_INTC_SET_MASK);
gc = irq_alloc_generic_chip("INTC", 1, JZ4740_IRQ_BASE, jz_intc_base,
handle_level_irq);
gc->wake_enabled = IRQ_MSK(32);
ct = gc->chip_types;
ct->regs.enable = JZ_REG_INTC_CLEAR_MASK;
ct->regs.disable = JZ_REG_INTC_SET_MASK;
ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
ct->chip.irq_mask = irq_gc_mask_disable_reg;
ct->chip.irq_mask_ack = irq_gc_mask_disable_reg;
ct->chip.irq_set_wake = irq_gc_set_wake;
ct->chip.irq_suspend = jz4740_irq_suspend;
ct->chip.irq_resume = jz4740_irq_resume;
irq_setup_generic_chip(gc, IRQ_MSK(32), 0, 0, IRQ_NOPROBE | IRQ_LEVEL);
setup_irq(2, &jz4740_cascade_action);
}
asmlinkage void plat_irq_dispatch(void)
{
unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
if (pending & STATUSF_IP2)
do_IRQ(2);
else if (pending & STATUSF_IP3)
do_IRQ(3);
else
spurious_interrupt();
}
#ifdef CONFIG_DEBUG_FS
static inline void intc_seq_reg(struct seq_file *s, const char *name,
unsigned int reg)
{
seq_printf(s, "%s:\t\t%08x\n", name, readl(jz_intc_base + reg));
}
static int intc_regs_show(struct seq_file *s, void *unused)
{
intc_seq_reg(s, "Status", JZ_REG_INTC_STATUS);
intc_seq_reg(s, "Mask", JZ_REG_INTC_MASK);
intc_seq_reg(s, "Pending", JZ_REG_INTC_PENDING);
return 0;
}
static int intc_regs_open(struct inode *inode, struct file *file)
{
return single_open(file, intc_regs_show, NULL);
}
static const struct file_operations intc_regs_operations = {
.open = intc_regs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int __init intc_debugfs_init(void)
{
(void) debugfs_create_file("jz_regs_intc", S_IFREG | S_IRUGO,
NULL, NULL, &intc_regs_operations);
return 0;
}
subsys_initcall(intc_debugfs_init);
#endif
| gpl-2.0 |
slukk/mako_msm | drivers/s390/net/qeth_core_main.c | 3806 | 156531 | /*
* drivers/s390/net/qeth_core_main.c
*
* Copyright IBM Corp. 2007, 2009
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
* Frank Pavlic <fpavlic@de.ibm.com>,
* Thomas Spatzier <tspat@de.ibm.com>,
* Frank Blaschka <frank.blaschka@de.ibm.com>
*/
#define KMSG_COMPONENT "qeth"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/mii.h>
#include <linux/kthread.h>
#include <linux/slab.h>
#include <net/iucv/af_iucv.h>
#include <asm/ebcdic.h>
#include <asm/io.h>
#include <asm/sysinfo.h>
#include <asm/compat.h>
#include "qeth_core.h"
struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
/* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
/* N P A M L V H */
[QETH_DBF_SETUP] = {"qeth_setup",
8, 1, 8, 5, &debug_hex_ascii_view, NULL},
[QETH_DBF_MSG] = {"qeth_msg",
8, 1, 128, 3, &debug_sprintf_view, NULL},
[QETH_DBF_CTRL] = {"qeth_control",
8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
};
EXPORT_SYMBOL_GPL(qeth_dbf);
struct qeth_card_list_struct qeth_core_card_list;
EXPORT_SYMBOL_GPL(qeth_core_card_list);
struct kmem_cache *qeth_core_header_cache;
EXPORT_SYMBOL_GPL(qeth_core_header_cache);
static struct kmem_cache *qeth_qdio_outbuf_cache;
static struct device *qeth_core_root_dev;
static unsigned int known_devices[][6] = QETH_MODELLIST_ARRAY;
static struct lock_class_key qdio_out_skb_queue_key;
static struct mutex qeth_mod_mutex;
static void qeth_send_control_data_cb(struct qeth_channel *,
struct qeth_cmd_buffer *);
static int qeth_issue_next_read(struct qeth_card *);
static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *);
static void qeth_setup_ccw(struct qeth_channel *, unsigned char *, __u32);
static void qeth_free_buffer_pool(struct qeth_card *);
static int qeth_qdio_establish(struct qeth_card *);
static void qeth_free_qdio_buffers(struct qeth_card *);
static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
struct qeth_qdio_out_buffer *buf,
enum iucv_tx_notify notification);
static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
struct qeth_qdio_out_buffer *buf,
enum qeth_qdio_buffer_states newbufstate);
static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
static inline const char *qeth_get_cardname(struct qeth_card *card)
{
if (card->info.guestlan) {
switch (card->info.type) {
case QETH_CARD_TYPE_OSD:
return " Guest LAN QDIO";
case QETH_CARD_TYPE_IQD:
return " Guest LAN Hiper";
case QETH_CARD_TYPE_OSM:
return " Guest LAN QDIO - OSM";
case QETH_CARD_TYPE_OSX:
return " Guest LAN QDIO - OSX";
default:
return " unknown";
}
} else {
switch (card->info.type) {
case QETH_CARD_TYPE_OSD:
return " OSD Express";
case QETH_CARD_TYPE_IQD:
return " HiperSockets";
case QETH_CARD_TYPE_OSN:
return " OSN QDIO";
case QETH_CARD_TYPE_OSM:
return " OSM QDIO";
case QETH_CARD_TYPE_OSX:
return " OSX QDIO";
default:
return " unknown";
}
}
return " n/a";
}
/* max length to be returned: 14 */
const char *qeth_get_cardname_short(struct qeth_card *card)
{
if (card->info.guestlan) {
switch (card->info.type) {
case QETH_CARD_TYPE_OSD:
return "GuestLAN QDIO";
case QETH_CARD_TYPE_IQD:
return "GuestLAN Hiper";
case QETH_CARD_TYPE_OSM:
return "GuestLAN OSM";
case QETH_CARD_TYPE_OSX:
return "GuestLAN OSX";
default:
return "unknown";
}
} else {
switch (card->info.type) {
case QETH_CARD_TYPE_OSD:
switch (card->info.link_type) {
case QETH_LINK_TYPE_FAST_ETH:
return "OSD_100";
case QETH_LINK_TYPE_HSTR:
return "HSTR";
case QETH_LINK_TYPE_GBIT_ETH:
return "OSD_1000";
case QETH_LINK_TYPE_10GBIT_ETH:
return "OSD_10GIG";
case QETH_LINK_TYPE_LANE_ETH100:
return "OSD_FE_LANE";
case QETH_LINK_TYPE_LANE_TR:
return "OSD_TR_LANE";
case QETH_LINK_TYPE_LANE_ETH1000:
return "OSD_GbE_LANE";
case QETH_LINK_TYPE_LANE:
return "OSD_ATM_LANE";
default:
return "OSD_Express";
}
case QETH_CARD_TYPE_IQD:
return "HiperSockets";
case QETH_CARD_TYPE_OSN:
return "OSN";
case QETH_CARD_TYPE_OSM:
return "OSM_1000";
case QETH_CARD_TYPE_OSX:
return "OSX_10GIG";
default:
return "unknown";
}
}
return "n/a";
}
void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
int clear_start_mask)
{
unsigned long flags;
spin_lock_irqsave(&card->thread_mask_lock, flags);
card->thread_allowed_mask = threads;
if (clear_start_mask)
card->thread_start_mask &= threads;
spin_unlock_irqrestore(&card->thread_mask_lock, flags);
wake_up(&card->wait_q);
}
EXPORT_SYMBOL_GPL(qeth_set_allowed_threads);
int qeth_threads_running(struct qeth_card *card, unsigned long threads)
{
unsigned long flags;
int rc = 0;
spin_lock_irqsave(&card->thread_mask_lock, flags);
rc = (card->thread_running_mask & threads);
spin_unlock_irqrestore(&card->thread_mask_lock, flags);
return rc;
}
EXPORT_SYMBOL_GPL(qeth_threads_running);
int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads)
{
return wait_event_interruptible(card->wait_q,
qeth_threads_running(card, threads) == 0);
}
EXPORT_SYMBOL_GPL(qeth_wait_for_threads);
void qeth_clear_working_pool_list(struct qeth_card *card)
{
struct qeth_buffer_pool_entry *pool_entry, *tmp;
QETH_CARD_TEXT(card, 5, "clwrklst");
list_for_each_entry_safe(pool_entry, tmp,
&card->qdio.in_buf_pool.entry_list, list){
list_del(&pool_entry->list);
}
}
EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list);
static int qeth_alloc_buffer_pool(struct qeth_card *card)
{
struct qeth_buffer_pool_entry *pool_entry;
void *ptr;
int i, j;
QETH_CARD_TEXT(card, 5, "alocpool");
for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
pool_entry = kzalloc(sizeof(*pool_entry), GFP_KERNEL);
if (!pool_entry) {
qeth_free_buffer_pool(card);
return -ENOMEM;
}
for (j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j) {
ptr = (void *) __get_free_page(GFP_KERNEL);
if (!ptr) {
while (j > 0)
free_page((unsigned long)
pool_entry->elements[--j]);
kfree(pool_entry);
qeth_free_buffer_pool(card);
return -ENOMEM;
}
pool_entry->elements[j] = ptr;
}
list_add(&pool_entry->init_list,
&card->qdio.init_pool.entry_list);
}
return 0;
}
int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
{
QETH_CARD_TEXT(card, 2, "realcbp");
if ((card->state != CARD_STATE_DOWN) &&
(card->state != CARD_STATE_RECOVER))
return -EPERM;
/* TODO: steel/add buffers from/to a running card's buffer pool (?) */
qeth_clear_working_pool_list(card);
qeth_free_buffer_pool(card);
card->qdio.in_buf_pool.buf_count = bufcnt;
card->qdio.init_pool.buf_count = bufcnt;
return qeth_alloc_buffer_pool(card);
}
EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool);
static inline int qeth_cq_init(struct qeth_card *card)
{
int rc;
if (card->options.cq == QETH_CQ_ENABLED) {
QETH_DBF_TEXT(SETUP, 2, "cqinit");
memset(card->qdio.c_q->qdio_bufs, 0,
QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
card->qdio.c_q->next_buf_to_init = 127;
rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT,
card->qdio.no_in_queues - 1, 0,
127);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
goto out;
}
}
rc = 0;
out:
return rc;
}
static inline int qeth_alloc_cq(struct qeth_card *card)
{
int rc;
if (card->options.cq == QETH_CQ_ENABLED) {
int i;
struct qdio_outbuf_state *outbuf_states;
QETH_DBF_TEXT(SETUP, 2, "cqon");
card->qdio.c_q = kzalloc(sizeof(struct qeth_qdio_q),
GFP_KERNEL);
if (!card->qdio.c_q) {
rc = -1;
goto kmsg_out;
}
QETH_DBF_HEX(SETUP, 2, &card->qdio.c_q, sizeof(void *));
for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
card->qdio.c_q->bufs[i].buffer =
&card->qdio.c_q->qdio_bufs[i];
}
card->qdio.no_in_queues = 2;
card->qdio.out_bufstates = (struct qdio_outbuf_state *)
kzalloc(card->qdio.no_out_queues *
QDIO_MAX_BUFFERS_PER_Q *
sizeof(struct qdio_outbuf_state), GFP_KERNEL);
outbuf_states = card->qdio.out_bufstates;
if (outbuf_states == NULL) {
rc = -1;
goto free_cq_out;
}
for (i = 0; i < card->qdio.no_out_queues; ++i) {
card->qdio.out_qs[i]->bufstates = outbuf_states;
outbuf_states += QDIO_MAX_BUFFERS_PER_Q;
}
} else {
QETH_DBF_TEXT(SETUP, 2, "nocq");
card->qdio.c_q = NULL;
card->qdio.no_in_queues = 1;
}
QETH_DBF_TEXT_(SETUP, 2, "iqc%d", card->qdio.no_in_queues);
rc = 0;
out:
return rc;
free_cq_out:
kfree(card->qdio.c_q);
card->qdio.c_q = NULL;
kmsg_out:
dev_err(&card->gdev->dev, "Failed to create completion queue\n");
goto out;
}
static inline void qeth_free_cq(struct qeth_card *card)
{
if (card->qdio.c_q) {
--card->qdio.no_in_queues;
kfree(card->qdio.c_q);
card->qdio.c_q = NULL;
}
kfree(card->qdio.out_bufstates);
card->qdio.out_bufstates = NULL;
}
static inline enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
int delayed) {
enum iucv_tx_notify n;
switch (sbalf15) {
case 0:
n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK;
break;
case 4:
case 16:
case 17:
case 18:
n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE :
TX_NOTIFY_UNREACHABLE;
break;
default:
n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR :
TX_NOTIFY_GENERALERROR;
break;
}
return n;
}
static inline void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q,
int bidx, int forced_cleanup)
{
if (q->card->options.cq != QETH_CQ_ENABLED)
return;
if (q->bufs[bidx]->next_pending != NULL) {
struct qeth_qdio_out_buffer *head = q->bufs[bidx];
struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending;
while (c) {
if (forced_cleanup ||
atomic_read(&c->state) ==
QETH_QDIO_BUF_HANDLED_DELAYED) {
struct qeth_qdio_out_buffer *f = c;
QETH_CARD_TEXT(f->q->card, 5, "fp");
QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f);
/* release here to avoid interleaving between
outbound tasklet and inbound tasklet
regarding notifications and lifecycle */
qeth_release_skbs(c);
c = f->next_pending;
BUG_ON(head->next_pending != f);
head->next_pending = c;
kmem_cache_free(qeth_qdio_outbuf_cache, f);
} else {
head = c;
c = c->next_pending;
}
}
}
if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) ==
QETH_QDIO_BUF_HANDLED_DELAYED)) {
/* for recovery situations */
q->bufs[bidx]->aob = q->bufstates[bidx].aob;
qeth_init_qdio_out_buf(q, bidx);
QETH_CARD_TEXT(q->card, 2, "clprecov");
}
}
static inline void qeth_qdio_handle_aob(struct qeth_card *card,
unsigned long phys_aob_addr) {
struct qaob *aob;
struct qeth_qdio_out_buffer *buffer;
enum iucv_tx_notify notification;
aob = (struct qaob *) phys_to_virt(phys_aob_addr);
QETH_CARD_TEXT(card, 5, "haob");
QETH_CARD_TEXT_(card, 5, "%lx", phys_aob_addr);
buffer = (struct qeth_qdio_out_buffer *) aob->user1;
QETH_CARD_TEXT_(card, 5, "%lx", aob->user1);
BUG_ON(buffer == NULL);
if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
QETH_QDIO_BUF_IN_CQ) == QETH_QDIO_BUF_PRIMED) {
notification = TX_NOTIFY_OK;
} else {
BUG_ON(atomic_read(&buffer->state) != QETH_QDIO_BUF_PENDING);
atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ);
notification = TX_NOTIFY_DELAYED_OK;
}
if (aob->aorc != 0) {
QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc);
notification = qeth_compute_cq_notification(aob->aorc, 1);
}
qeth_notify_skbs(buffer->q, buffer, notification);
buffer->aob = NULL;
qeth_clear_output_buffer(buffer->q, buffer,
QETH_QDIO_BUF_HANDLED_DELAYED);
/* from here on: do not touch buffer anymore */
qdio_release_aob(aob);
}
static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue)
{
return card->options.cq == QETH_CQ_ENABLED &&
card->qdio.c_q != NULL &&
queue != 0 &&
queue == card->qdio.no_in_queues - 1;
}
static int qeth_issue_next_read(struct qeth_card *card)
{
int rc;
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 5, "issnxrd");
if (card->read.state != CH_STATE_UP)
return -EIO;
iob = qeth_get_buffer(&card->read);
if (!iob) {
dev_warn(&card->gdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
QETH_DBF_MESSAGE(2, "%s issue_next_read failed: no iob "
"available\n", dev_name(&card->gdev->dev));
return -ENOMEM;
}
qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE);
QETH_CARD_TEXT(card, 6, "noirqpnd");
rc = ccw_device_start(card->read.ccwdev, &card->read.ccw,
(addr_t) iob, 0, 0);
if (rc) {
QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! "
"rc=%i\n", dev_name(&card->gdev->dev), rc);
atomic_set(&card->read.irq_pending, 0);
card->read_or_write_problem = 1;
qeth_schedule_recovery(card);
wake_up(&card->wait_q);
}
return rc;
}
static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
{
struct qeth_reply *reply;
reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC);
if (reply) {
atomic_set(&reply->refcnt, 1);
atomic_set(&reply->received, 0);
reply->card = card;
};
return reply;
}
static void qeth_get_reply(struct qeth_reply *reply)
{
WARN_ON(atomic_read(&reply->refcnt) <= 0);
atomic_inc(&reply->refcnt);
}
static void qeth_put_reply(struct qeth_reply *reply)
{
WARN_ON(atomic_read(&reply->refcnt) <= 0);
if (atomic_dec_and_test(&reply->refcnt))
kfree(reply);
}
static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
struct qeth_card *card)
{
char *ipa_name;
int com = cmd->hdr.command;
ipa_name = qeth_get_ipa_cmd_name(com);
if (rc)
QETH_DBF_MESSAGE(2, "IPA: %s(x%X) for %s/%s returned "
"x%X \"%s\"\n",
ipa_name, com, dev_name(&card->gdev->dev),
QETH_CARD_IFNAME(card), rc,
qeth_get_ipa_msg(rc));
else
QETH_DBF_MESSAGE(5, "IPA: %s(x%X) for %s/%s succeeded\n",
ipa_name, com, dev_name(&card->gdev->dev),
QETH_CARD_IFNAME(card));
}
static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
struct qeth_cmd_buffer *iob)
{
struct qeth_ipa_cmd *cmd = NULL;
QETH_CARD_TEXT(card, 5, "chkipad");
if (IS_IPA(iob->data)) {
cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
if (IS_IPA_REPLY(cmd)) {
if (cmd->hdr.command != IPA_CMD_SETCCID &&
cmd->hdr.command != IPA_CMD_DELCCID &&
cmd->hdr.command != IPA_CMD_MODCCID &&
cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
qeth_issue_ipa_msg(cmd,
cmd->hdr.return_code, card);
return cmd;
} else {
switch (cmd->hdr.command) {
case IPA_CMD_STOPLAN:
dev_warn(&card->gdev->dev,
"The link for interface %s on CHPID"
" 0x%X failed\n",
QETH_CARD_IFNAME(card),
card->info.chpid);
card->lan_online = 0;
if (card->dev && netif_carrier_ok(card->dev))
netif_carrier_off(card->dev);
return NULL;
case IPA_CMD_STARTLAN:
dev_info(&card->gdev->dev,
"The link for %s on CHPID 0x%X has"
" been restored\n",
QETH_CARD_IFNAME(card),
card->info.chpid);
netif_carrier_on(card->dev);
card->lan_online = 1;
if (card->info.hwtrap)
card->info.hwtrap = 2;
qeth_schedule_recovery(card);
return NULL;
case IPA_CMD_MODCCID:
return cmd;
case IPA_CMD_REGISTER_LOCAL_ADDR:
QETH_CARD_TEXT(card, 3, "irla");
break;
case IPA_CMD_UNREGISTER_LOCAL_ADDR:
QETH_CARD_TEXT(card, 3, "urla");
break;
default:
QETH_DBF_MESSAGE(2, "Received data is IPA "
"but not a reply!\n");
break;
}
}
}
return cmd;
}
void qeth_clear_ipacmd_list(struct qeth_card *card)
{
struct qeth_reply *reply, *r;
unsigned long flags;
QETH_CARD_TEXT(card, 4, "clipalst");
spin_lock_irqsave(&card->lock, flags);
list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
qeth_get_reply(reply);
reply->rc = -EIO;
atomic_inc(&reply->received);
list_del_init(&reply->list);
wake_up(&reply->wait_q);
qeth_put_reply(reply);
}
spin_unlock_irqrestore(&card->lock, flags);
atomic_set(&card->write.irq_pending, 0);
}
EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
static int qeth_check_idx_response(struct qeth_card *card,
unsigned char *buffer)
{
if (!buffer)
return 0;
QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
if ((buffer[2] & 0xc0) == 0xc0) {
QETH_DBF_MESSAGE(2, "received an IDX TERMINATE "
"with cause code 0x%02x%s\n",
buffer[4],
((buffer[4] == 0x22) ?
" -- try another portname" : ""));
QETH_CARD_TEXT(card, 2, "ckidxres");
QETH_CARD_TEXT(card, 2, " idxterm");
QETH_CARD_TEXT_(card, 2, " rc%d", -EIO);
if (buffer[4] == 0xf6) {
dev_err(&card->gdev->dev,
"The qeth device is not configured "
"for the OSI layer required by z/VM\n");
return -EPERM;
}
return -EIO;
}
return 0;
}
static void qeth_setup_ccw(struct qeth_channel *channel, unsigned char *iob,
__u32 len)
{
struct qeth_card *card;
card = CARD_FROM_CDEV(channel->ccwdev);
QETH_CARD_TEXT(card, 4, "setupccw");
if (channel == &card->read)
memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
else
memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
channel->ccw.count = len;
channel->ccw.cda = (__u32) __pa(iob);
}
static struct qeth_cmd_buffer *__qeth_get_buffer(struct qeth_channel *channel)
{
__u8 index;
QETH_CARD_TEXT(CARD_FROM_CDEV(channel->ccwdev), 6, "getbuff");
index = channel->io_buf_no;
do {
if (channel->iob[index].state == BUF_STATE_FREE) {
channel->iob[index].state = BUF_STATE_LOCKED;
channel->io_buf_no = (channel->io_buf_no + 1) %
QETH_CMD_BUFFER_NO;
memset(channel->iob[index].data, 0, QETH_BUFSIZE);
return channel->iob + index;
}
index = (index + 1) % QETH_CMD_BUFFER_NO;
} while (index != channel->io_buf_no);
return NULL;
}
void qeth_release_buffer(struct qeth_channel *channel,
struct qeth_cmd_buffer *iob)
{
unsigned long flags;
QETH_CARD_TEXT(CARD_FROM_CDEV(channel->ccwdev), 6, "relbuff");
spin_lock_irqsave(&channel->iob_lock, flags);
memset(iob->data, 0, QETH_BUFSIZE);
iob->state = BUF_STATE_FREE;
iob->callback = qeth_send_control_data_cb;
iob->rc = 0;
spin_unlock_irqrestore(&channel->iob_lock, flags);
wake_up(&channel->wait_q);
}
EXPORT_SYMBOL_GPL(qeth_release_buffer);
static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *channel)
{
struct qeth_cmd_buffer *buffer = NULL;
unsigned long flags;
spin_lock_irqsave(&channel->iob_lock, flags);
buffer = __qeth_get_buffer(channel);
spin_unlock_irqrestore(&channel->iob_lock, flags);
return buffer;
}
struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *channel)
{
struct qeth_cmd_buffer *buffer;
wait_event(channel->wait_q,
((buffer = qeth_get_buffer(channel)) != NULL));
return buffer;
}
EXPORT_SYMBOL_GPL(qeth_wait_for_buffer);
void qeth_clear_cmd_buffers(struct qeth_channel *channel)
{
int cnt;
for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
qeth_release_buffer(channel, &channel->iob[cnt]);
channel->buf_no = 0;
channel->io_buf_no = 0;
}
EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers);
static void qeth_send_control_data_cb(struct qeth_channel *channel,
struct qeth_cmd_buffer *iob)
{
struct qeth_card *card;
struct qeth_reply *reply, *r;
struct qeth_ipa_cmd *cmd;
unsigned long flags;
int keep_reply;
int rc = 0;
card = CARD_FROM_CDEV(channel->ccwdev);
QETH_CARD_TEXT(card, 4, "sndctlcb");
rc = qeth_check_idx_response(card, iob->data);
switch (rc) {
case 0:
break;
case -EIO:
qeth_clear_ipacmd_list(card);
qeth_schedule_recovery(card);
/* fall through */
default:
goto out;
}
cmd = qeth_check_ipa_data(card, iob);
if ((cmd == NULL) && (card->state != CARD_STATE_DOWN))
goto out;
/*in case of OSN : check if cmd is set */
if (card->info.type == QETH_CARD_TYPE_OSN &&
cmd &&
cmd->hdr.command != IPA_CMD_STARTLAN &&
card->osn_info.assist_cb != NULL) {
card->osn_info.assist_cb(card->dev, cmd);
goto out;
}
spin_lock_irqsave(&card->lock, flags);
list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) {
if ((reply->seqno == QETH_IDX_COMMAND_SEQNO) ||
((cmd) && (reply->seqno == cmd->hdr.seqno))) {
qeth_get_reply(reply);
list_del_init(&reply->list);
spin_unlock_irqrestore(&card->lock, flags);
keep_reply = 0;
if (reply->callback != NULL) {
if (cmd) {
reply->offset = (__u16)((char *)cmd -
(char *)iob->data);
keep_reply = reply->callback(card,
reply,
(unsigned long)cmd);
} else
keep_reply = reply->callback(card,
reply,
(unsigned long)iob);
}
if (cmd)
reply->rc = (u16) cmd->hdr.return_code;
else if (iob->rc)
reply->rc = iob->rc;
if (keep_reply) {
spin_lock_irqsave(&card->lock, flags);
list_add_tail(&reply->list,
&card->cmd_waiter_list);
spin_unlock_irqrestore(&card->lock, flags);
} else {
atomic_inc(&reply->received);
wake_up(&reply->wait_q);
}
qeth_put_reply(reply);
goto out;
}
}
spin_unlock_irqrestore(&card->lock, flags);
out:
memcpy(&card->seqno.pdu_hdr_ack,
QETH_PDU_HEADER_SEQ_NO(iob->data),
QETH_SEQ_NO_LENGTH);
qeth_release_buffer(channel, iob);
}
static int qeth_setup_channel(struct qeth_channel *channel)
{
int cnt;
QETH_DBF_TEXT(SETUP, 2, "setupch");
for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) {
channel->iob[cnt].data =
kzalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL);
if (channel->iob[cnt].data == NULL)
break;
channel->iob[cnt].state = BUF_STATE_FREE;
channel->iob[cnt].channel = channel;
channel->iob[cnt].callback = qeth_send_control_data_cb;
channel->iob[cnt].rc = 0;
}
if (cnt < QETH_CMD_BUFFER_NO) {
while (cnt-- > 0)
kfree(channel->iob[cnt].data);
return -ENOMEM;
}
channel->buf_no = 0;
channel->io_buf_no = 0;
atomic_set(&channel->irq_pending, 0);
spin_lock_init(&channel->iob_lock);
init_waitqueue_head(&channel->wait_q);
return 0;
}
static int qeth_set_thread_start_bit(struct qeth_card *card,
unsigned long thread)
{
unsigned long flags;
spin_lock_irqsave(&card->thread_mask_lock, flags);
if (!(card->thread_allowed_mask & thread) ||
(card->thread_start_mask & thread)) {
spin_unlock_irqrestore(&card->thread_mask_lock, flags);
return -EPERM;
}
card->thread_start_mask |= thread;
spin_unlock_irqrestore(&card->thread_mask_lock, flags);
return 0;
}
void qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread)
{
unsigned long flags;
spin_lock_irqsave(&card->thread_mask_lock, flags);
card->thread_start_mask &= ~thread;
spin_unlock_irqrestore(&card->thread_mask_lock, flags);
wake_up(&card->wait_q);
}
EXPORT_SYMBOL_GPL(qeth_clear_thread_start_bit);
void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
{
unsigned long flags;
spin_lock_irqsave(&card->thread_mask_lock, flags);
card->thread_running_mask &= ~thread;
spin_unlock_irqrestore(&card->thread_mask_lock, flags);
wake_up(&card->wait_q);
}
EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit);
static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
{
unsigned long flags;
int rc = 0;
spin_lock_irqsave(&card->thread_mask_lock, flags);
if (card->thread_start_mask & thread) {
if ((card->thread_allowed_mask & thread) &&
!(card->thread_running_mask & thread)) {
rc = 1;
card->thread_start_mask &= ~thread;
card->thread_running_mask |= thread;
} else
rc = -EPERM;
}
spin_unlock_irqrestore(&card->thread_mask_lock, flags);
return rc;
}
int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
{
int rc = 0;
wait_event(card->wait_q,
(rc = __qeth_do_run_thread(card, thread)) >= 0);
return rc;
}
EXPORT_SYMBOL_GPL(qeth_do_run_thread);
void qeth_schedule_recovery(struct qeth_card *card)
{
QETH_CARD_TEXT(card, 2, "startrec");
if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
schedule_work(&card->kernel_thread_starter);
}
EXPORT_SYMBOL_GPL(qeth_schedule_recovery);
static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb)
{
int dstat, cstat;
char *sense;
struct qeth_card *card;
sense = (char *) irb->ecw;
cstat = irb->scsw.cmd.cstat;
dstat = irb->scsw.cmd.dstat;
card = CARD_FROM_CDEV(cdev);
if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
QETH_CARD_TEXT(card, 2, "CGENCHK");
dev_warn(&cdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
QETH_DBF_MESSAGE(2, "%s check on device dstat=x%x, cstat=x%x\n",
dev_name(&cdev->dev), dstat, cstat);
print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
16, 1, irb, 64, 1);
return 1;
}
if (dstat & DEV_STAT_UNIT_CHECK) {
if (sense[SENSE_RESETTING_EVENT_BYTE] &
SENSE_RESETTING_EVENT_FLAG) {
QETH_CARD_TEXT(card, 2, "REVIND");
return 1;
}
if (sense[SENSE_COMMAND_REJECT_BYTE] &
SENSE_COMMAND_REJECT_FLAG) {
QETH_CARD_TEXT(card, 2, "CMDREJi");
return 1;
}
if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
QETH_CARD_TEXT(card, 2, "AFFE");
return 1;
}
if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
QETH_CARD_TEXT(card, 2, "ZEROSEN");
return 0;
}
QETH_CARD_TEXT(card, 2, "DGENCHK");
return 1;
}
return 0;
}
static long __qeth_check_irb_error(struct ccw_device *cdev,
unsigned long intparm, struct irb *irb)
{
struct qeth_card *card;
card = CARD_FROM_CDEV(cdev);
if (!IS_ERR(irb))
return 0;
switch (PTR_ERR(irb)) {
case -EIO:
QETH_DBF_MESSAGE(2, "%s i/o-error on device\n",
dev_name(&cdev->dev));
QETH_CARD_TEXT(card, 2, "ckirberr");
QETH_CARD_TEXT_(card, 2, " rc%d", -EIO);
break;
case -ETIMEDOUT:
dev_warn(&cdev->dev, "A hardware operation timed out"
" on the device\n");
QETH_CARD_TEXT(card, 2, "ckirberr");
QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT);
if (intparm == QETH_RCD_PARM) {
if (card && (card->data.ccwdev == cdev)) {
card->data.state = CH_STATE_DOWN;
wake_up(&card->wait_q);
}
}
break;
default:
QETH_DBF_MESSAGE(2, "%s unknown error %ld on device\n",
dev_name(&cdev->dev), PTR_ERR(irb));
QETH_CARD_TEXT(card, 2, "ckirberr");
QETH_CARD_TEXT(card, 2, " rc???");
}
return PTR_ERR(irb);
}
static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
struct irb *irb)
{
int rc;
int cstat, dstat;
struct qeth_cmd_buffer *buffer;
struct qeth_channel *channel;
struct qeth_card *card;
struct qeth_cmd_buffer *iob;
__u8 index;
if (__qeth_check_irb_error(cdev, intparm, irb))
return;
cstat = irb->scsw.cmd.cstat;
dstat = irb->scsw.cmd.dstat;
card = CARD_FROM_CDEV(cdev);
if (!card)
return;
QETH_CARD_TEXT(card, 5, "irq");
if (card->read.ccwdev == cdev) {
channel = &card->read;
QETH_CARD_TEXT(card, 5, "read");
} else if (card->write.ccwdev == cdev) {
channel = &card->write;
QETH_CARD_TEXT(card, 5, "write");
} else {
channel = &card->data;
QETH_CARD_TEXT(card, 5, "data");
}
atomic_set(&channel->irq_pending, 0);
if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC))
channel->state = CH_STATE_STOPPED;
if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC))
channel->state = CH_STATE_HALTED;
/*let's wake up immediately on data channel*/
if ((channel == &card->data) && (intparm != 0) &&
(intparm != QETH_RCD_PARM))
goto out;
if (intparm == QETH_CLEAR_CHANNEL_PARM) {
QETH_CARD_TEXT(card, 6, "clrchpar");
/* we don't have to handle this further */
intparm = 0;
}
if (intparm == QETH_HALT_CHANNEL_PARM) {
QETH_CARD_TEXT(card, 6, "hltchpar");
/* we don't have to handle this further */
intparm = 0;
}
if ((dstat & DEV_STAT_UNIT_EXCEP) ||
(dstat & DEV_STAT_UNIT_CHECK) ||
(cstat)) {
if (irb->esw.esw0.erw.cons) {
dev_warn(&channel->ccwdev->dev,
"The qeth device driver failed to recover "
"an error on the device\n");
QETH_DBF_MESSAGE(2, "%s sense data available. cstat "
"0x%X dstat 0x%X\n",
dev_name(&channel->ccwdev->dev), cstat, dstat);
print_hex_dump(KERN_WARNING, "qeth: irb ",
DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
print_hex_dump(KERN_WARNING, "qeth: sense data ",
DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
}
if (intparm == QETH_RCD_PARM) {
channel->state = CH_STATE_DOWN;
goto out;
}
rc = qeth_get_problem(cdev, irb);
if (rc) {
qeth_clear_ipacmd_list(card);
qeth_schedule_recovery(card);
goto out;
}
}
if (intparm == QETH_RCD_PARM) {
channel->state = CH_STATE_RCD_DONE;
goto out;
}
if (intparm) {
buffer = (struct qeth_cmd_buffer *) __va((addr_t)intparm);
buffer->state = BUF_STATE_PROCESSED;
}
if (channel == &card->data)
return;
if (channel == &card->read &&
channel->state == CH_STATE_UP)
qeth_issue_next_read(card);
iob = channel->iob;
index = channel->buf_no;
while (iob[index].state == BUF_STATE_PROCESSED) {
if (iob[index].callback != NULL)
iob[index].callback(channel, iob + index);
index = (index + 1) % QETH_CMD_BUFFER_NO;
}
channel->buf_no = index;
out:
wake_up(&card->wait_q);
return;
}
static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
struct qeth_qdio_out_buffer *buf,
enum iucv_tx_notify notification)
{
struct sk_buff *skb;
if (skb_queue_empty(&buf->skb_list))
goto out;
skb = skb_peek(&buf->skb_list);
while (skb) {
QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
if (skb->protocol == ETH_P_AF_IUCV) {
if (skb->sk) {
struct iucv_sock *iucv = iucv_sk(skb->sk);
iucv->sk_txnotify(skb, notification);
}
}
if (skb_queue_is_last(&buf->skb_list, skb))
skb = NULL;
else
skb = skb_queue_next(&buf->skb_list, skb);
}
out:
return;
}
static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
{
struct sk_buff *skb;
struct iucv_sock *iucv;
int notify_general_error = 0;
if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
notify_general_error = 1;
/* release may never happen from within CQ tasklet scope */
BUG_ON(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
skb = skb_dequeue(&buf->skb_list);
while (skb) {
QETH_CARD_TEXT(buf->q->card, 5, "skbr");
QETH_CARD_TEXT_(buf->q->card, 5, "%lx", (long) skb);
if (notify_general_error && skb->protocol == ETH_P_AF_IUCV) {
if (skb->sk) {
iucv = iucv_sk(skb->sk);
iucv->sk_txnotify(skb, TX_NOTIFY_GENERALERROR);
}
}
atomic_dec(&skb->users);
dev_kfree_skb_any(skb);
skb = skb_dequeue(&buf->skb_list);
}
}
static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
struct qeth_qdio_out_buffer *buf,
enum qeth_qdio_buffer_states newbufstate)
{
int i;
/* is PCI flag set on buffer? */
if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
atomic_dec(&queue->set_pci_flags_count);
if (newbufstate == QETH_QDIO_BUF_EMPTY) {
qeth_release_skbs(buf);
}
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) {
if (buf->buffer->element[i].addr && buf->is_header[i])
kmem_cache_free(qeth_core_header_cache,
buf->buffer->element[i].addr);
buf->is_header[i] = 0;
buf->buffer->element[i].length = 0;
buf->buffer->element[i].addr = NULL;
buf->buffer->element[i].eflags = 0;
buf->buffer->element[i].sflags = 0;
}
buf->buffer->element[15].eflags = 0;
buf->buffer->element[15].sflags = 0;
buf->next_element_to_fill = 0;
atomic_set(&buf->state, newbufstate);
}
static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free)
{
int j;
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
if (!q->bufs[j])
continue;
qeth_cleanup_handled_pending(q, j, 1);
qeth_clear_output_buffer(q, q->bufs[j], QETH_QDIO_BUF_EMPTY);
if (free) {
kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
q->bufs[j] = NULL;
}
}
}
void qeth_clear_qdio_buffers(struct qeth_card *card)
{
int i;
QETH_CARD_TEXT(card, 2, "clearqdbf");
/* clear outbound buffers to free skbs */
for (i = 0; i < card->qdio.no_out_queues; ++i) {
if (card->qdio.out_qs[i]) {
qeth_clear_outq_buffers(card->qdio.out_qs[i], 0);
}
}
}
EXPORT_SYMBOL_GPL(qeth_clear_qdio_buffers);
static void qeth_free_buffer_pool(struct qeth_card *card)
{
struct qeth_buffer_pool_entry *pool_entry, *tmp;
int i = 0;
list_for_each_entry_safe(pool_entry, tmp,
&card->qdio.init_pool.entry_list, init_list){
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i)
free_page((unsigned long)pool_entry->elements[i]);
list_del(&pool_entry->init_list);
kfree(pool_entry);
}
}
static void qeth_free_qdio_buffers(struct qeth_card *card)
{
int i, j;
if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
QETH_QDIO_UNINITIALIZED)
return;
qeth_free_cq(card);
cancel_delayed_work_sync(&card->buffer_reclaim_work);
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
kfree(card->qdio.in_q);
card->qdio.in_q = NULL;
/* inbound buffer pool */
qeth_free_buffer_pool(card);
/* free outbound qdio_qs */
if (card->qdio.out_qs) {
for (i = 0; i < card->qdio.no_out_queues; ++i) {
qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
kfree(card->qdio.out_qs[i]);
}
kfree(card->qdio.out_qs);
card->qdio.out_qs = NULL;
}
}
static void qeth_clean_channel(struct qeth_channel *channel)
{
int cnt;
QETH_DBF_TEXT(SETUP, 2, "freech");
for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++)
kfree(channel->iob[cnt].data);
}
static void qeth_get_channel_path_desc(struct qeth_card *card)
{
struct ccw_device *ccwdev;
struct channelPath_dsc {
u8 flags;
u8 lsn;
u8 desc;
u8 chpid;
u8 swla;
u8 zeroes;
u8 chla;
u8 chpp;
} *chp_dsc;
QETH_DBF_TEXT(SETUP, 2, "chp_desc");
ccwdev = card->data.ccwdev;
chp_dsc = (struct channelPath_dsc *)ccw_device_get_chp_desc(ccwdev, 0);
if (chp_dsc != NULL) {
if (card->info.type != QETH_CARD_TYPE_IQD) {
/* CHPP field bit 6 == 1 -> single queue */
if ((chp_dsc->chpp & 0x02) == 0x02) {
if ((atomic_read(&card->qdio.state) !=
QETH_QDIO_UNINITIALIZED) &&
(card->qdio.no_out_queues == 4))
/* change from 4 to 1 outbound queues */
qeth_free_qdio_buffers(card);
card->qdio.no_out_queues = 1;
if (card->qdio.default_out_queue != 0)
dev_info(&card->gdev->dev,
"Priority Queueing not supported\n");
card->qdio.default_out_queue = 0;
} else {
if ((atomic_read(&card->qdio.state) !=
QETH_QDIO_UNINITIALIZED) &&
(card->qdio.no_out_queues == 1)) {
/* change from 1 to 4 outbound queues */
qeth_free_qdio_buffers(card);
card->qdio.default_out_queue = 2;
}
card->qdio.no_out_queues = 4;
}
}
card->info.func_level = 0x4100 + chp_dsc->desc;
kfree(chp_dsc);
}
QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues);
QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level);
return;
}
static void qeth_init_qdio_info(struct qeth_card *card)
{
QETH_DBF_TEXT(SETUP, 4, "intqdinf");
atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
/* inbound */
card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
if (card->info.type == QETH_CARD_TYPE_IQD)
card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
else
card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
}
static void qeth_set_intial_options(struct qeth_card *card)
{
card->options.route4.type = NO_ROUTER;
card->options.route6.type = NO_ROUTER;
card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS;
card->options.macaddr_mode = QETH_TR_MACADDR_NONCANONICAL;
card->options.fake_broadcast = 0;
card->options.add_hhlen = DEFAULT_ADD_HHLEN;
card->options.performance_stats = 0;
card->options.rx_sg_cb = QETH_RX_SG_CB;
card->options.isolation = ISOLATION_MODE_NONE;
card->options.cq = QETH_CQ_DISABLED;
}
static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
{
unsigned long flags;
int rc = 0;
spin_lock_irqsave(&card->thread_mask_lock, flags);
QETH_CARD_TEXT_(card, 4, " %02x%02x%02x",
(u8) card->thread_start_mask,
(u8) card->thread_allowed_mask,
(u8) card->thread_running_mask);
rc = (card->thread_start_mask & thread);
spin_unlock_irqrestore(&card->thread_mask_lock, flags);
return rc;
}
static void qeth_start_kernel_thread(struct work_struct *work)
{
struct task_struct *ts;
struct qeth_card *card = container_of(work, struct qeth_card,
kernel_thread_starter);
QETH_CARD_TEXT(card , 2, "strthrd");
if (card->read.state != CH_STATE_UP &&
card->write.state != CH_STATE_UP)
return;
if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
ts = kthread_run(card->discipline.recover, (void *)card,
"qeth_recover");
if (IS_ERR(ts)) {
qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
qeth_clear_thread_running_bit(card,
QETH_RECOVER_THREAD);
}
}
}
static int qeth_setup_card(struct qeth_card *card)
{
QETH_DBF_TEXT(SETUP, 2, "setupcrd");
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
card->read.state = CH_STATE_DOWN;
card->write.state = CH_STATE_DOWN;
card->data.state = CH_STATE_DOWN;
card->state = CARD_STATE_DOWN;
card->lan_online = 0;
card->read_or_write_problem = 0;
card->dev = NULL;
spin_lock_init(&card->vlanlock);
spin_lock_init(&card->mclock);
spin_lock_init(&card->lock);
spin_lock_init(&card->ip_lock);
spin_lock_init(&card->thread_mask_lock);
mutex_init(&card->conf_mutex);
mutex_init(&card->discipline_mutex);
card->thread_start_mask = 0;
card->thread_allowed_mask = 0;
card->thread_running_mask = 0;
INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
INIT_LIST_HEAD(&card->ip_list);
INIT_LIST_HEAD(card->ip_tbd_list);
INIT_LIST_HEAD(&card->cmd_waiter_list);
init_waitqueue_head(&card->wait_q);
/* initial options */
qeth_set_intial_options(card);
/* IP address takeover */
INIT_LIST_HEAD(&card->ipato.entries);
card->ipato.enabled = 0;
card->ipato.invert4 = 0;
card->ipato.invert6 = 0;
/* init QDIO stuff */
qeth_init_qdio_info(card);
INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
return 0;
}
static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
{
struct qeth_card *card = container_of(slr, struct qeth_card,
qeth_service_level);
if (card->info.mcl_level[0])
seq_printf(m, "qeth: %s firmware level %s\n",
CARD_BUS_ID(card), card->info.mcl_level);
}
static struct qeth_card *qeth_alloc_card(void)
{
struct qeth_card *card;
QETH_DBF_TEXT(SETUP, 2, "alloccrd");
card = kzalloc(sizeof(struct qeth_card), GFP_DMA|GFP_KERNEL);
if (!card)
goto out;
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
card->ip_tbd_list = kzalloc(sizeof(struct list_head), GFP_KERNEL);
if (!card->ip_tbd_list) {
QETH_DBF_TEXT(SETUP, 0, "iptbdnom");
goto out_card;
}
if (qeth_setup_channel(&card->read))
goto out_ip;
if (qeth_setup_channel(&card->write))
goto out_channel;
card->options.layer2 = -1;
card->qeth_service_level.seq_print = qeth_core_sl_print;
register_service_level(&card->qeth_service_level);
return card;
out_channel:
qeth_clean_channel(&card->read);
out_ip:
kfree(card->ip_tbd_list);
out_card:
kfree(card);
out:
return NULL;
}
static int qeth_determine_card_type(struct qeth_card *card)
{
int i = 0;
QETH_DBF_TEXT(SETUP, 2, "detcdtyp");
card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
while (known_devices[i][QETH_DEV_MODEL_IND]) {
if ((CARD_RDEV(card)->id.dev_type ==
known_devices[i][QETH_DEV_TYPE_IND]) &&
(CARD_RDEV(card)->id.dev_model ==
known_devices[i][QETH_DEV_MODEL_IND])) {
card->info.type = known_devices[i][QETH_DEV_MODEL_IND];
card->qdio.no_out_queues =
known_devices[i][QETH_QUEUE_NO_IND];
card->qdio.no_in_queues = 1;
card->info.is_multicast_different =
known_devices[i][QETH_MULTICAST_IND];
qeth_get_channel_path_desc(card);
return 0;
}
i++;
}
card->info.type = QETH_CARD_TYPE_UNKNOWN;
dev_err(&card->gdev->dev, "The adapter hardware is of an "
"unknown type\n");
return -ENOENT;
}
static int qeth_clear_channel(struct qeth_channel *channel)
{
unsigned long flags;
struct qeth_card *card;
int rc;
card = CARD_FROM_CDEV(channel->ccwdev);
QETH_CARD_TEXT(card, 3, "clearch");
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM);
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
if (rc)
return rc;
rc = wait_event_interruptible_timeout(card->wait_q,
channel->state == CH_STATE_STOPPED, QETH_TIMEOUT);
if (rc == -ERESTARTSYS)
return rc;
if (channel->state != CH_STATE_STOPPED)
return -ETIME;
channel->state = CH_STATE_DOWN;
return 0;
}
static int qeth_halt_channel(struct qeth_channel *channel)
{
unsigned long flags;
struct qeth_card *card;
int rc;
card = CARD_FROM_CDEV(channel->ccwdev);
QETH_CARD_TEXT(card, 3, "haltch");
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM);
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
if (rc)
return rc;
rc = wait_event_interruptible_timeout(card->wait_q,
channel->state == CH_STATE_HALTED, QETH_TIMEOUT);
if (rc == -ERESTARTSYS)
return rc;
if (channel->state != CH_STATE_HALTED)
return -ETIME;
return 0;
}
static int qeth_halt_channels(struct qeth_card *card)
{
int rc1 = 0, rc2 = 0, rc3 = 0;
QETH_CARD_TEXT(card, 3, "haltchs");
rc1 = qeth_halt_channel(&card->read);
rc2 = qeth_halt_channel(&card->write);
rc3 = qeth_halt_channel(&card->data);
if (rc1)
return rc1;
if (rc2)
return rc2;
return rc3;
}
static int qeth_clear_channels(struct qeth_card *card)
{
int rc1 = 0, rc2 = 0, rc3 = 0;
QETH_CARD_TEXT(card, 3, "clearchs");
rc1 = qeth_clear_channel(&card->read);
rc2 = qeth_clear_channel(&card->write);
rc3 = qeth_clear_channel(&card->data);
if (rc1)
return rc1;
if (rc2)
return rc2;
return rc3;
}
static int qeth_clear_halt_card(struct qeth_card *card, int halt)
{
int rc = 0;
QETH_CARD_TEXT(card, 3, "clhacrd");
if (halt)
rc = qeth_halt_channels(card);
if (rc)
return rc;
return qeth_clear_channels(card);
}
int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
{
int rc = 0;
QETH_CARD_TEXT(card, 3, "qdioclr");
switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
QETH_QDIO_CLEANING)) {
case QETH_QDIO_ESTABLISHED:
if (card->info.type == QETH_CARD_TYPE_IQD)
rc = qdio_shutdown(CARD_DDEV(card),
QDIO_FLAG_CLEANUP_USING_HALT);
else
rc = qdio_shutdown(CARD_DDEV(card),
QDIO_FLAG_CLEANUP_USING_CLEAR);
if (rc)
QETH_CARD_TEXT_(card, 3, "1err%d", rc);
qdio_free(CARD_DDEV(card));
atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
break;
case QETH_QDIO_CLEANING:
return rc;
default:
break;
}
rc = qeth_clear_halt_card(card, use_halt);
if (rc)
QETH_CARD_TEXT_(card, 3, "2err%d", rc);
card->state = CARD_STATE_DOWN;
return rc;
}
EXPORT_SYMBOL_GPL(qeth_qdio_clear_card);
static int qeth_read_conf_data(struct qeth_card *card, void **buffer,
int *length)
{
struct ciw *ciw;
char *rcd_buf;
int ret;
struct qeth_channel *channel = &card->data;
unsigned long flags;
/*
* scan for RCD command in extended SenseID data
*/
ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
if (!ciw || ciw->cmd == 0)
return -EOPNOTSUPP;
rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA);
if (!rcd_buf)
return -ENOMEM;
channel->ccw.cmd_code = ciw->cmd;
channel->ccw.cda = (__u32) __pa(rcd_buf);
channel->ccw.count = ciw->count;
channel->ccw.flags = CCW_FLAG_SLI;
channel->state = CH_STATE_RCD;
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
ret = ccw_device_start_timeout(channel->ccwdev, &channel->ccw,
QETH_RCD_PARM, LPM_ANYPATH, 0,
QETH_RCD_TIMEOUT);
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
if (!ret)
wait_event(card->wait_q,
(channel->state == CH_STATE_RCD_DONE ||
channel->state == CH_STATE_DOWN));
if (channel->state == CH_STATE_DOWN)
ret = -EIO;
else
channel->state = CH_STATE_DOWN;
if (ret) {
kfree(rcd_buf);
*buffer = NULL;
*length = 0;
} else {
*length = ciw->count;
*buffer = rcd_buf;
}
return ret;
}
static void qeth_configure_unitaddr(struct qeth_card *card, char *prcd)
{
QETH_DBF_TEXT(SETUP, 2, "cfgunit");
card->info.chpid = prcd[30];
card->info.unit_addr2 = prcd[31];
card->info.cula = prcd[63];
card->info.guestlan = ((prcd[0x10] == _ascebc['V']) &&
(prcd[0x11] == _ascebc['M']));
}
static void qeth_configure_blkt_default(struct qeth_card *card, char *prcd)
{
QETH_DBF_TEXT(SETUP, 2, "cfgblkt");
if (prcd[74] == 0xF0 && prcd[75] == 0xF0 &&
(prcd[76] == 0xF5 || prcd[76] == 0xF6)) {
card->info.blkt.time_total = 250;
card->info.blkt.inter_packet = 5;
card->info.blkt.inter_packet_jumbo = 15;
} else {
card->info.blkt.time_total = 0;
card->info.blkt.inter_packet = 0;
card->info.blkt.inter_packet_jumbo = 0;
}
}
static void qeth_init_tokens(struct qeth_card *card)
{
card->token.issuer_rm_w = 0x00010103UL;
card->token.cm_filter_w = 0x00010108UL;
card->token.cm_connection_w = 0x0001010aUL;
card->token.ulp_filter_w = 0x0001010bUL;
card->token.ulp_connection_w = 0x0001010dUL;
}
static void qeth_init_func_level(struct qeth_card *card)
{
switch (card->info.type) {
case QETH_CARD_TYPE_IQD:
card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD;
break;
case QETH_CARD_TYPE_OSD:
case QETH_CARD_TYPE_OSN:
card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
break;
default:
break;
}
}
static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
void (*idx_reply_cb)(struct qeth_channel *,
struct qeth_cmd_buffer *))
{
struct qeth_cmd_buffer *iob;
unsigned long flags;
int rc;
struct qeth_card *card;
QETH_DBF_TEXT(SETUP, 2, "idxanswr");
card = CARD_FROM_CDEV(channel->ccwdev);
iob = qeth_get_buffer(channel);
iob->callback = idx_reply_cb;
memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
channel->ccw.count = QETH_BUFSIZE;
channel->ccw.cda = (__u32) __pa(iob->data);
wait_event(card->wait_q,
atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
rc = ccw_device_start(channel->ccwdev,
&channel->ccw, (addr_t) iob, 0, 0);
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
if (rc) {
QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc);
QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
atomic_set(&channel->irq_pending, 0);
wake_up(&card->wait_q);
return rc;
}
rc = wait_event_interruptible_timeout(card->wait_q,
channel->state == CH_STATE_UP, QETH_TIMEOUT);
if (rc == -ERESTARTSYS)
return rc;
if (channel->state != CH_STATE_UP) {
rc = -ETIME;
QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
qeth_clear_cmd_buffers(channel);
} else
rc = 0;
return rc;
}
static int qeth_idx_activate_channel(struct qeth_channel *channel,
void (*idx_reply_cb)(struct qeth_channel *,
struct qeth_cmd_buffer *))
{
struct qeth_card *card;
struct qeth_cmd_buffer *iob;
unsigned long flags;
__u16 temp;
__u8 tmp;
int rc;
struct ccw_dev_id temp_devid;
card = CARD_FROM_CDEV(channel->ccwdev);
QETH_DBF_TEXT(SETUP, 2, "idxactch");
iob = qeth_get_buffer(channel);
iob->callback = idx_reply_cb;
memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
channel->ccw.count = IDX_ACTIVATE_SIZE;
channel->ccw.cda = (__u32) __pa(iob->data);
if (channel == &card->write) {
memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
&card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
card->seqno.trans_hdr++;
} else {
memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
&card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
}
tmp = ((__u8)card->info.portno) | 0x80;
memcpy(QETH_IDX_ACT_PNO(iob->data), &tmp, 1);
memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
&card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
&card->info.func_level, sizeof(__u16));
ccw_device_get_id(CARD_DDEV(card), &temp_devid);
memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &temp_devid.devno, 2);
temp = (card->info.cula << 8) + card->info.unit_addr2;
memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2);
wait_event(card->wait_q,
atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags);
rc = ccw_device_start(channel->ccwdev,
&channel->ccw, (addr_t) iob, 0, 0);
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
if (rc) {
QETH_DBF_MESSAGE(2, "Error1 in activating channel. rc=%d\n",
rc);
QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
atomic_set(&channel->irq_pending, 0);
wake_up(&card->wait_q);
return rc;
}
rc = wait_event_interruptible_timeout(card->wait_q,
channel->state == CH_STATE_ACTIVATING, QETH_TIMEOUT);
if (rc == -ERESTARTSYS)
return rc;
if (channel->state != CH_STATE_ACTIVATING) {
dev_warn(&channel->ccwdev->dev, "The qeth device driver"
" failed to recover an error on the device\n");
QETH_DBF_MESSAGE(2, "%s IDX activate timed out\n",
dev_name(&channel->ccwdev->dev));
QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME);
qeth_clear_cmd_buffers(channel);
return -ETIME;
}
return qeth_idx_activate_get_answer(channel, idx_reply_cb);
}
static int qeth_peer_func_level(int level)
{
if ((level & 0xff) == 8)
return (level & 0xff) + 0x400;
if (((level >> 8) & 3) == 1)
return (level & 0xff) + 0x200;
return level;
}
static void qeth_idx_write_cb(struct qeth_channel *channel,
struct qeth_cmd_buffer *iob)
{
struct qeth_card *card;
__u16 temp;
QETH_DBF_TEXT(SETUP , 2, "idxwrcb");
if (channel->state == CH_STATE_DOWN) {
channel->state = CH_STATE_ACTIVATING;
goto out;
}
card = CARD_FROM_CDEV(channel->ccwdev);
if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == QETH_IDX_ACT_ERR_EXCL)
dev_err(&card->write.ccwdev->dev,
"The adapter is used exclusively by another "
"host\n");
else
QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel:"
" negative reply\n",
dev_name(&card->write.ccwdev->dev));
goto out;
}
memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) {
QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel: "
"function level mismatch (sent: 0x%x, received: "
"0x%x)\n", dev_name(&card->write.ccwdev->dev),
card->info.func_level, temp);
goto out;
}
channel->state = CH_STATE_UP;
out:
qeth_release_buffer(channel, iob);
}
static void qeth_idx_read_cb(struct qeth_channel *channel,
struct qeth_cmd_buffer *iob)
{
struct qeth_card *card;
__u16 temp;
QETH_DBF_TEXT(SETUP , 2, "idxrdcb");
if (channel->state == CH_STATE_DOWN) {
channel->state = CH_STATE_ACTIVATING;
goto out;
}
card = CARD_FROM_CDEV(channel->ccwdev);
if (qeth_check_idx_response(card, iob->data))
goto out;
if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
case QETH_IDX_ACT_ERR_EXCL:
dev_err(&card->write.ccwdev->dev,
"The adapter is used exclusively by another "
"host\n");
break;
case QETH_IDX_ACT_ERR_AUTH:
case QETH_IDX_ACT_ERR_AUTH_USER:
dev_err(&card->read.ccwdev->dev,
"Setting the device online failed because of "
"insufficient authorization\n");
break;
default:
QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel:"
" negative reply\n",
dev_name(&card->read.ccwdev->dev));
}
QETH_CARD_TEXT_(card, 2, "idxread%c",
QETH_IDX_ACT_CAUSE_CODE(iob->data));
goto out;
}
/**
* * temporary fix for microcode bug
* * to revert it,replace OR by AND
* */
if ((!QETH_IDX_NO_PORTNAME_REQUIRED(iob->data)) ||
(card->info.type == QETH_CARD_TYPE_OSD))
card->info.portname_required = 1;
memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
if (temp != qeth_peer_func_level(card->info.func_level)) {
QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel: function "
"level mismatch (sent: 0x%x, received: 0x%x)\n",
dev_name(&card->read.ccwdev->dev),
card->info.func_level, temp);
goto out;
}
memcpy(&card->token.issuer_rm_r,
QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
QETH_MPC_TOKEN_LENGTH);
memcpy(&card->info.mcl_level[0],
QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
channel->state = CH_STATE_UP;
out:
qeth_release_buffer(channel, iob);
}
void qeth_prepare_control_data(struct qeth_card *card, int len,
struct qeth_cmd_buffer *iob)
{
qeth_setup_ccw(&card->write, iob->data, len);
iob->callback = qeth_release_buffer;
memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
&card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
card->seqno.trans_hdr++;
memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
&card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
card->seqno.pdu_hdr++;
memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
&card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN);
}
EXPORT_SYMBOL_GPL(qeth_prepare_control_data);
int qeth_send_control_data(struct qeth_card *card, int len,
struct qeth_cmd_buffer *iob,
int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
unsigned long),
void *reply_param)
{
int rc;
unsigned long flags;
struct qeth_reply *reply = NULL;
unsigned long timeout, event_timeout;
struct qeth_ipa_cmd *cmd;
QETH_CARD_TEXT(card, 2, "sendctl");
if (card->read_or_write_problem) {
qeth_release_buffer(iob->channel, iob);
return -EIO;
}
reply = qeth_alloc_reply(card);
if (!reply) {
return -ENOMEM;
}
reply->callback = reply_cb;
reply->param = reply_param;
if (card->state == CARD_STATE_DOWN)
reply->seqno = QETH_IDX_COMMAND_SEQNO;
else
reply->seqno = card->seqno.ipa++;
init_waitqueue_head(&reply->wait_q);
spin_lock_irqsave(&card->lock, flags);
list_add_tail(&reply->list, &card->cmd_waiter_list);
spin_unlock_irqrestore(&card->lock, flags);
QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN);
while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ;
qeth_prepare_control_data(card, len, iob);
if (IS_IPA(iob->data))
event_timeout = QETH_IPA_TIMEOUT;
else
event_timeout = QETH_TIMEOUT;
timeout = jiffies + event_timeout;
QETH_CARD_TEXT(card, 6, "noirqpnd");
spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags);
rc = ccw_device_start(card->write.ccwdev, &card->write.ccw,
(addr_t) iob, 0, 0);
spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags);
if (rc) {
QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: "
"ccw_device_start rc = %i\n",
dev_name(&card->write.ccwdev->dev), rc);
QETH_CARD_TEXT_(card, 2, " err%d", rc);
spin_lock_irqsave(&card->lock, flags);
list_del_init(&reply->list);
qeth_put_reply(reply);
spin_unlock_irqrestore(&card->lock, flags);
qeth_release_buffer(iob->channel, iob);
atomic_set(&card->write.irq_pending, 0);
wake_up(&card->wait_q);
return rc;
}
/* we have only one long running ipassist, since we can ensure
process context of this command we can sleep */
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
if ((cmd->hdr.command == IPA_CMD_SETIP) &&
(cmd->hdr.prot_version == QETH_PROT_IPV4)) {
if (!wait_event_timeout(reply->wait_q,
atomic_read(&reply->received), event_timeout))
goto time_err;
} else {
while (!atomic_read(&reply->received)) {
if (time_after(jiffies, timeout))
goto time_err;
cpu_relax();
};
}
if (reply->rc == -EIO)
goto error;
rc = reply->rc;
qeth_put_reply(reply);
return rc;
time_err:
reply->rc = -ETIME;
spin_lock_irqsave(&reply->card->lock, flags);
list_del_init(&reply->list);
spin_unlock_irqrestore(&reply->card->lock, flags);
atomic_inc(&reply->received);
error:
atomic_set(&card->write.irq_pending, 0);
qeth_release_buffer(iob->channel, iob);
card->write.buf_no = (card->write.buf_no + 1) % QETH_CMD_BUFFER_NO;
rc = reply->rc;
qeth_put_reply(reply);
return rc;
}
EXPORT_SYMBOL_GPL(qeth_send_control_data);
static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
unsigned long data)
{
struct qeth_cmd_buffer *iob;
QETH_DBF_TEXT(SETUP, 2, "cmenblcb");
iob = (struct qeth_cmd_buffer *) data;
memcpy(&card->token.cm_filter_r,
QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
QETH_MPC_TOKEN_LENGTH);
QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
return 0;
}
static int qeth_cm_enable(struct qeth_card *card)
{
int rc;
struct qeth_cmd_buffer *iob;
QETH_DBF_TEXT(SETUP, 2, "cmenable");
iob = qeth_wait_for_buffer(&card->write);
memcpy(iob->data, CM_ENABLE, CM_ENABLE_SIZE);
memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
&card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
&card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
rc = qeth_send_control_data(card, CM_ENABLE_SIZE, iob,
qeth_cm_enable_cb, NULL);
return rc;
}
static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
unsigned long data)
{
struct qeth_cmd_buffer *iob;
QETH_DBF_TEXT(SETUP, 2, "cmsetpcb");
iob = (struct qeth_cmd_buffer *) data;
memcpy(&card->token.cm_connection_r,
QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
QETH_MPC_TOKEN_LENGTH);
QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
return 0;
}
static int qeth_cm_setup(struct qeth_card *card)
{
int rc;
struct qeth_cmd_buffer *iob;
QETH_DBF_TEXT(SETUP, 2, "cmsetup");
iob = qeth_wait_for_buffer(&card->write);
memcpy(iob->data, CM_SETUP, CM_SETUP_SIZE);
memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
&card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
&card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
&card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
rc = qeth_send_control_data(card, CM_SETUP_SIZE, iob,
qeth_cm_setup_cb, NULL);
return rc;
}
static inline int qeth_get_initial_mtu_for_card(struct qeth_card *card)
{
switch (card->info.type) {
case QETH_CARD_TYPE_UNKNOWN:
return 1500;
case QETH_CARD_TYPE_IQD:
return card->info.max_mtu;
case QETH_CARD_TYPE_OSD:
switch (card->info.link_type) {
case QETH_LINK_TYPE_HSTR:
case QETH_LINK_TYPE_LANE_TR:
return 2000;
default:
return 1492;
}
case QETH_CARD_TYPE_OSM:
case QETH_CARD_TYPE_OSX:
return 1492;
default:
return 1500;
}
}
static inline int qeth_get_mtu_outof_framesize(int framesize)
{
switch (framesize) {
case 0x4000:
return 8192;
case 0x6000:
return 16384;
case 0xa000:
return 32768;
case 0xffff:
return 57344;
default:
return 0;
}
}
static inline int qeth_mtu_is_valid(struct qeth_card *card, int mtu)
{
switch (card->info.type) {
case QETH_CARD_TYPE_OSD:
case QETH_CARD_TYPE_OSM:
case QETH_CARD_TYPE_OSX:
case QETH_CARD_TYPE_IQD:
return ((mtu >= 576) &&
(mtu <= card->info.max_mtu));
case QETH_CARD_TYPE_OSN:
case QETH_CARD_TYPE_UNKNOWN:
default:
return 1;
}
}
static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
unsigned long data)
{
__u16 mtu, framesize;
__u16 len;
__u8 link_type;
struct qeth_cmd_buffer *iob;
QETH_DBF_TEXT(SETUP, 2, "ulpenacb");
iob = (struct qeth_cmd_buffer *) data;
memcpy(&card->token.ulp_filter_r,
QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
QETH_MPC_TOKEN_LENGTH);
if (card->info.type == QETH_CARD_TYPE_IQD) {
memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
mtu = qeth_get_mtu_outof_framesize(framesize);
if (!mtu) {
iob->rc = -EINVAL;
QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
return 0;
}
if (card->info.initial_mtu && (card->info.initial_mtu != mtu)) {
/* frame size has changed */
if (card->dev &&
((card->dev->mtu == card->info.initial_mtu) ||
(card->dev->mtu > mtu)))
card->dev->mtu = mtu;
qeth_free_qdio_buffers(card);
}
card->info.initial_mtu = mtu;
card->info.max_mtu = mtu;
card->qdio.in_buf_size = mtu + 2 * PAGE_SIZE;
} else {
card->info.initial_mtu = qeth_get_initial_mtu_for_card(card);
card->info.max_mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(
iob->data);
card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
}
memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
memcpy(&link_type,
QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
card->info.link_type = link_type;
} else
card->info.link_type = 0;
QETH_DBF_TEXT_(SETUP, 2, "link%d", card->info.link_type);
QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
return 0;
}
static int qeth_ulp_enable(struct qeth_card *card)
{
int rc;
char prot_type;
struct qeth_cmd_buffer *iob;
/*FIXME: trace view callbacks*/
QETH_DBF_TEXT(SETUP, 2, "ulpenabl");
iob = qeth_wait_for_buffer(&card->write);
memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE);
*(QETH_ULP_ENABLE_LINKNUM(iob->data)) =
(__u8) card->info.portno;
if (card->options.layer2)
if (card->info.type == QETH_CARD_TYPE_OSN)
prot_type = QETH_PROT_OSN2;
else
prot_type = QETH_PROT_LAYER2;
else
prot_type = QETH_PROT_TCPIP;
memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
&card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
&card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_ULP_ENABLE_PORTNAME_AND_LL(iob->data),
card->info.portname, 9);
rc = qeth_send_control_data(card, ULP_ENABLE_SIZE, iob,
qeth_ulp_enable_cb, NULL);
return rc;
}
static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
unsigned long data)
{
struct qeth_cmd_buffer *iob;
int rc = 0;
QETH_DBF_TEXT(SETUP, 2, "ulpstpcb");
iob = (struct qeth_cmd_buffer *) data;
memcpy(&card->token.ulp_connection_r,
QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
QETH_MPC_TOKEN_LENGTH);
if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
3)) {
QETH_DBF_TEXT(SETUP, 2, "olmlimit");
dev_err(&card->gdev->dev, "A connection could not be "
"established because of an OLM limit\n");
iob->rc = -EMLINK;
}
QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc);
return rc;
}
static int qeth_ulp_setup(struct qeth_card *card)
{
int rc;
__u16 temp;
struct qeth_cmd_buffer *iob;
struct ccw_dev_id dev_id;
QETH_DBF_TEXT(SETUP, 2, "ulpsetup");
iob = qeth_wait_for_buffer(&card->write);
memcpy(iob->data, ULP_SETUP, ULP_SETUP_SIZE);
memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
&card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
&card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
&card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
ccw_device_get_id(CARD_DDEV(card), &dev_id);
memcpy(QETH_ULP_SETUP_CUA(iob->data), &dev_id.devno, 2);
temp = (card->info.cula << 8) + card->info.unit_addr2;
memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
rc = qeth_send_control_data(card, ULP_SETUP_SIZE, iob,
qeth_ulp_setup_cb, NULL);
return rc;
}
static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
{
int rc;
struct qeth_qdio_out_buffer *newbuf;
rc = 0;
newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC);
if (!newbuf) {
rc = -ENOMEM;
goto out;
}
newbuf->buffer = &q->qdio_bufs[bidx];
skb_queue_head_init(&newbuf->skb_list);
lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
newbuf->q = q;
newbuf->aob = NULL;
newbuf->next_pending = q->bufs[bidx];
atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
q->bufs[bidx] = newbuf;
if (q->bufstates) {
q->bufstates[bidx].user = newbuf;
QETH_CARD_TEXT_(q->card, 2, "nbs%d", bidx);
QETH_CARD_TEXT_(q->card, 2, "%lx", (long) newbuf);
QETH_CARD_TEXT_(q->card, 2, "%lx",
(long) newbuf->next_pending);
}
out:
return rc;
}
static int qeth_alloc_qdio_buffers(struct qeth_card *card)
{
int i, j;
QETH_DBF_TEXT(SETUP, 2, "allcqdbf");
if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
return 0;
card->qdio.in_q = kzalloc(sizeof(struct qeth_qdio_q),
GFP_KERNEL);
if (!card->qdio.in_q)
goto out_nomem;
QETH_DBF_TEXT(SETUP, 2, "inq");
QETH_DBF_HEX(SETUP, 2, &card->qdio.in_q, sizeof(void *));
memset(card->qdio.in_q, 0, sizeof(struct qeth_qdio_q));
/* give inbound qeth_qdio_buffers their qdio_buffers */
for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
card->qdio.in_q->bufs[i].buffer =
&card->qdio.in_q->qdio_bufs[i];
card->qdio.in_q->bufs[i].rx_skb = NULL;
}
/* inbound buffer pool */
if (qeth_alloc_buffer_pool(card))
goto out_freeinq;
/* outbound */
card->qdio.out_qs =
kzalloc(card->qdio.no_out_queues *
sizeof(struct qeth_qdio_out_q *), GFP_KERNEL);
if (!card->qdio.out_qs)
goto out_freepool;
for (i = 0; i < card->qdio.no_out_queues; ++i) {
card->qdio.out_qs[i] = kzalloc(sizeof(struct qeth_qdio_out_q),
GFP_KERNEL);
if (!card->qdio.out_qs[i])
goto out_freeoutq;
QETH_DBF_TEXT_(SETUP, 2, "outq %i", i);
QETH_DBF_HEX(SETUP, 2, &card->qdio.out_qs[i], sizeof(void *));
card->qdio.out_qs[i]->queue_no = i;
/* give outbound qeth_qdio_buffers their qdio_buffers */
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
BUG_ON(card->qdio.out_qs[i]->bufs[j] != NULL);
if (qeth_init_qdio_out_buf(card->qdio.out_qs[i], j))
goto out_freeoutqbufs;
}
}
/* completion */
if (qeth_alloc_cq(card))
goto out_freeoutq;
return 0;
out_freeoutqbufs:
while (j > 0) {
--j;
kmem_cache_free(qeth_qdio_outbuf_cache,
card->qdio.out_qs[i]->bufs[j]);
card->qdio.out_qs[i]->bufs[j] = NULL;
}
out_freeoutq:
while (i > 0) {
kfree(card->qdio.out_qs[--i]);
qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
}
kfree(card->qdio.out_qs);
card->qdio.out_qs = NULL;
out_freepool:
qeth_free_buffer_pool(card);
out_freeinq:
kfree(card->qdio.in_q);
card->qdio.in_q = NULL;
out_nomem:
atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
return -ENOMEM;
}
static void qeth_create_qib_param_field(struct qeth_card *card,
char *param_field)
{
param_field[0] = _ascebc['P'];
param_field[1] = _ascebc['C'];
param_field[2] = _ascebc['I'];
param_field[3] = _ascebc['T'];
*((unsigned int *) (¶m_field[4])) = QETH_PCI_THRESHOLD_A(card);
*((unsigned int *) (¶m_field[8])) = QETH_PCI_THRESHOLD_B(card);
*((unsigned int *) (¶m_field[12])) = QETH_PCI_TIMER_VALUE(card);
}
static void qeth_create_qib_param_field_blkt(struct qeth_card *card,
char *param_field)
{
param_field[16] = _ascebc['B'];
param_field[17] = _ascebc['L'];
param_field[18] = _ascebc['K'];
param_field[19] = _ascebc['T'];
*((unsigned int *) (¶m_field[20])) = card->info.blkt.time_total;
*((unsigned int *) (¶m_field[24])) = card->info.blkt.inter_packet;
*((unsigned int *) (¶m_field[28])) =
card->info.blkt.inter_packet_jumbo;
}
static int qeth_qdio_activate(struct qeth_card *card)
{
QETH_DBF_TEXT(SETUP, 3, "qdioact");
return qdio_activate(CARD_DDEV(card));
}
static int qeth_dm_act(struct qeth_card *card)
{
int rc;
struct qeth_cmd_buffer *iob;
QETH_DBF_TEXT(SETUP, 2, "dmact");
iob = qeth_wait_for_buffer(&card->write);
memcpy(iob->data, DM_ACT, DM_ACT_SIZE);
memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
&card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
&card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
rc = qeth_send_control_data(card, DM_ACT_SIZE, iob, NULL, NULL);
return rc;
}
static int qeth_mpc_initialize(struct qeth_card *card)
{
int rc;
QETH_DBF_TEXT(SETUP, 2, "mpcinit");
rc = qeth_issue_next_read(card);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
return rc;
}
rc = qeth_cm_enable(card);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
goto out_qdio;
}
rc = qeth_cm_setup(card);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
goto out_qdio;
}
rc = qeth_ulp_enable(card);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
goto out_qdio;
}
rc = qeth_ulp_setup(card);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
goto out_qdio;
}
rc = qeth_alloc_qdio_buffers(card);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
goto out_qdio;
}
rc = qeth_qdio_establish(card);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
qeth_free_qdio_buffers(card);
goto out_qdio;
}
rc = qeth_qdio_activate(card);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
goto out_qdio;
}
rc = qeth_dm_act(card);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc);
goto out_qdio;
}
return 0;
out_qdio:
qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
return rc;
}
static void qeth_print_status_with_portname(struct qeth_card *card)
{
char dbf_text[15];
int i;
sprintf(dbf_text, "%s", card->info.portname + 1);
for (i = 0; i < 8; i++)
dbf_text[i] =
(char) _ebcasc[(__u8) dbf_text[i]];
dbf_text[8] = 0;
dev_info(&card->gdev->dev, "Device is a%s card%s%s%s\n"
"with link type %s (portname: %s)\n",
qeth_get_cardname(card),
(card->info.mcl_level[0]) ? " (level: " : "",
(card->info.mcl_level[0]) ? card->info.mcl_level : "",
(card->info.mcl_level[0]) ? ")" : "",
qeth_get_cardname_short(card),
dbf_text);
}
static void qeth_print_status_no_portname(struct qeth_card *card)
{
if (card->info.portname[0])
dev_info(&card->gdev->dev, "Device is a%s "
"card%s%s%s\nwith link type %s "
"(no portname needed by interface).\n",
qeth_get_cardname(card),
(card->info.mcl_level[0]) ? " (level: " : "",
(card->info.mcl_level[0]) ? card->info.mcl_level : "",
(card->info.mcl_level[0]) ? ")" : "",
qeth_get_cardname_short(card));
else
dev_info(&card->gdev->dev, "Device is a%s "
"card%s%s%s\nwith link type %s.\n",
qeth_get_cardname(card),
(card->info.mcl_level[0]) ? " (level: " : "",
(card->info.mcl_level[0]) ? card->info.mcl_level : "",
(card->info.mcl_level[0]) ? ")" : "",
qeth_get_cardname_short(card));
}
void qeth_print_status_message(struct qeth_card *card)
{
switch (card->info.type) {
case QETH_CARD_TYPE_OSD:
case QETH_CARD_TYPE_OSM:
case QETH_CARD_TYPE_OSX:
/* VM will use a non-zero first character
* to indicate a HiperSockets like reporting
* of the level OSA sets the first character to zero
* */
if (!card->info.mcl_level[0]) {
sprintf(card->info.mcl_level, "%02x%02x",
card->info.mcl_level[2],
card->info.mcl_level[3]);
card->info.mcl_level[QETH_MCL_LENGTH] = 0;
break;
}
/* fallthrough */
case QETH_CARD_TYPE_IQD:
if ((card->info.guestlan) ||
(card->info.mcl_level[0] & 0x80)) {
card->info.mcl_level[0] = (char) _ebcasc[(__u8)
card->info.mcl_level[0]];
card->info.mcl_level[1] = (char) _ebcasc[(__u8)
card->info.mcl_level[1]];
card->info.mcl_level[2] = (char) _ebcasc[(__u8)
card->info.mcl_level[2]];
card->info.mcl_level[3] = (char) _ebcasc[(__u8)
card->info.mcl_level[3]];
card->info.mcl_level[QETH_MCL_LENGTH] = 0;
}
break;
default:
memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
}
if (card->info.portname_required)
qeth_print_status_with_portname(card);
else
qeth_print_status_no_portname(card);
}
EXPORT_SYMBOL_GPL(qeth_print_status_message);
static void qeth_initialize_working_pool_list(struct qeth_card *card)
{
struct qeth_buffer_pool_entry *entry;
QETH_CARD_TEXT(card, 5, "inwrklst");
list_for_each_entry(entry,
&card->qdio.init_pool.entry_list, init_list) {
qeth_put_buffer_pool_entry(card, entry);
}
}
static inline struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
struct qeth_card *card)
{
struct list_head *plh;
struct qeth_buffer_pool_entry *entry;
int i, free;
struct page *page;
if (list_empty(&card->qdio.in_buf_pool.entry_list))
return NULL;
list_for_each(plh, &card->qdio.in_buf_pool.entry_list) {
entry = list_entry(plh, struct qeth_buffer_pool_entry, list);
free = 1;
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
if (page_count(virt_to_page(entry->elements[i])) > 1) {
free = 0;
break;
}
}
if (free) {
list_del_init(&entry->list);
return entry;
}
}
/* no free buffer in pool so take first one and swap pages */
entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
struct qeth_buffer_pool_entry, list);
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
if (page_count(virt_to_page(entry->elements[i])) > 1) {
page = alloc_page(GFP_ATOMIC);
if (!page) {
return NULL;
} else {
free_page((unsigned long)entry->elements[i]);
entry->elements[i] = page_address(page);
if (card->options.performance_stats)
card->perf_stats.sg_alloc_page_rx++;
}
}
}
list_del_init(&entry->list);
return entry;
}
static int qeth_init_input_buffer(struct qeth_card *card,
struct qeth_qdio_buffer *buf)
{
struct qeth_buffer_pool_entry *pool_entry;
int i;
if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
buf->rx_skb = dev_alloc_skb(QETH_RX_PULL_LEN + ETH_HLEN);
if (!buf->rx_skb)
return 1;
}
pool_entry = qeth_find_free_buffer_pool_entry(card);
if (!pool_entry)
return 1;
/*
* since the buffer is accessed only from the input_tasklet
* there shouldn't be a need to synchronize; also, since we use
* the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off
* buffers
*/
buf->pool_entry = pool_entry;
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
buf->buffer->element[i].length = PAGE_SIZE;
buf->buffer->element[i].addr = pool_entry->elements[i];
if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
else
buf->buffer->element[i].eflags = 0;
buf->buffer->element[i].sflags = 0;
}
return 0;
}
int qeth_init_qdio_queues(struct qeth_card *card)
{
int i, j;
int rc;
QETH_DBF_TEXT(SETUP, 2, "initqdqs");
/* inbound queue */
memset(card->qdio.in_q->qdio_bufs, 0,
QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
qeth_initialize_working_pool_list(card);
/*give only as many buffers to hardware as we have buffer pool entries*/
for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i)
qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
card->qdio.in_q->next_buf_to_init =
card->qdio.in_buf_pool.buf_count - 1;
rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
card->qdio.in_buf_pool.buf_count - 1);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
return rc;
}
/* completion */
rc = qeth_cq_init(card);
if (rc) {
return rc;
}
/* outbound queue */
for (i = 0; i < card->qdio.no_out_queues; ++i) {
memset(card->qdio.out_qs[i]->qdio_bufs, 0,
QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer));
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
qeth_clear_output_buffer(card->qdio.out_qs[i],
card->qdio.out_qs[i]->bufs[j],
QETH_QDIO_BUF_EMPTY);
}
card->qdio.out_qs[i]->card = card;
card->qdio.out_qs[i]->next_buf_to_fill = 0;
card->qdio.out_qs[i]->do_pack = 0;
atomic_set(&card->qdio.out_qs[i]->used_buffers, 0);
atomic_set(&card->qdio.out_qs[i]->set_pci_flags_count, 0);
atomic_set(&card->qdio.out_qs[i]->state,
QETH_OUT_Q_UNLOCKED);
}
return 0;
}
EXPORT_SYMBOL_GPL(qeth_init_qdio_queues);
static inline __u8 qeth_get_ipa_adp_type(enum qeth_link_types link_type)
{
switch (link_type) {
case QETH_LINK_TYPE_HSTR:
return 2;
default:
return 1;
}
}
static void qeth_fill_ipacmd_header(struct qeth_card *card,
struct qeth_ipa_cmd *cmd, __u8 command,
enum qeth_prot_versions prot)
{
memset(cmd, 0, sizeof(struct qeth_ipa_cmd));
cmd->hdr.command = command;
cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
cmd->hdr.seqno = card->seqno.ipa;
cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
cmd->hdr.rel_adapter_no = (__u8) card->info.portno;
if (card->options.layer2)
cmd->hdr.prim_version_no = 2;
else
cmd->hdr.prim_version_no = 1;
cmd->hdr.param_count = 1;
cmd->hdr.prot_version = prot;
cmd->hdr.ipa_supported = 0;
cmd->hdr.ipa_enabled = 0;
}
struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
enum qeth_ipa_cmds ipacmd, enum qeth_prot_versions prot)
{
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
iob = qeth_wait_for_buffer(&card->write);
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
qeth_fill_ipacmd_header(card, cmd, ipacmd, prot);
return iob;
}
EXPORT_SYMBOL_GPL(qeth_get_ipacmd_buffer);
void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
char prot_type)
{
memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
&card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
}
EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
unsigned long),
void *reply_param)
{
int rc;
char prot_type;
QETH_CARD_TEXT(card, 4, "sendipa");
if (card->options.layer2)
if (card->info.type == QETH_CARD_TYPE_OSN)
prot_type = QETH_PROT_OSN2;
else
prot_type = QETH_PROT_LAYER2;
else
prot_type = QETH_PROT_TCPIP;
qeth_prepare_ipa_cmd(card, iob, prot_type);
rc = qeth_send_control_data(card, IPA_CMD_LENGTH,
iob, reply_cb, reply_param);
if (rc == -ETIME) {
qeth_clear_ipacmd_list(card);
qeth_schedule_recovery(card);
}
return rc;
}
EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
int qeth_send_startlan(struct qeth_card *card)
{
int rc;
struct qeth_cmd_buffer *iob;
QETH_DBF_TEXT(SETUP, 2, "strtlan");
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0);
rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
return rc;
}
EXPORT_SYMBOL_GPL(qeth_send_startlan);
int qeth_default_setadapterparms_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd;
QETH_CARD_TEXT(card, 4, "defadpcb");
cmd = (struct qeth_ipa_cmd *) data;
if (cmd->hdr.return_code == 0)
cmd->hdr.return_code =
cmd->data.setadapterparms.hdr.return_code;
return 0;
}
EXPORT_SYMBOL_GPL(qeth_default_setadapterparms_cb);
static int qeth_query_setadapterparms_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd;
QETH_CARD_TEXT(card, 3, "quyadpcb");
cmd = (struct qeth_ipa_cmd *) data;
if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) {
card->info.link_type =
cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
QETH_DBF_TEXT_(SETUP, 2, "lnk %d", card->info.link_type);
}
card->options.adp.supported_funcs =
cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
}
struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
__u32 command, __u32 cmdlen)
{
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS,
QETH_PROT_IPV4);
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
cmd->data.setadapterparms.hdr.command_code = command;
cmd->data.setadapterparms.hdr.used_total = 1;
cmd->data.setadapterparms.hdr.seq_no = 1;
return iob;
}
EXPORT_SYMBOL_GPL(qeth_get_adapter_cmd);
int qeth_query_setadapterparms(struct qeth_card *card)
{
int rc;
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 3, "queryadp");
iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
sizeof(struct qeth_ipacmd_setadpparms));
rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
return rc;
}
EXPORT_SYMBOL_GPL(qeth_query_setadapterparms);
static int qeth_query_ipassists_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd;
QETH_DBF_TEXT(SETUP, 2, "qipasscb");
cmd = (struct qeth_ipa_cmd *) data;
if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
} else {
card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
}
QETH_DBF_TEXT(SETUP, 2, "suppenbl");
QETH_DBF_TEXT_(SETUP, 2, "%08x", (__u32)cmd->hdr.ipa_supported);
QETH_DBF_TEXT_(SETUP, 2, "%08x", (__u32)cmd->hdr.ipa_enabled);
return 0;
}
int qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot)
{
int rc;
struct qeth_cmd_buffer *iob;
QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot);
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot);
rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
return rc;
}
EXPORT_SYMBOL_GPL(qeth_query_ipassists);
static int qeth_query_setdiagass_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd;
__u16 rc;
cmd = (struct qeth_ipa_cmd *)data;
rc = cmd->hdr.return_code;
if (rc)
QETH_CARD_TEXT_(card, 2, "diagq:%x", rc);
else
card->info.diagass_support = cmd->data.diagass.ext;
return 0;
}
static int qeth_query_setdiagass(struct qeth_card *card)
{
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
QETH_DBF_TEXT(SETUP, 2, "qdiagass");
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.diagass.subcmd_len = 16;
cmd->data.diagass.subcmd = QETH_DIAGS_CMD_QUERY;
return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL);
}
static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
{
unsigned long info = get_zeroed_page(GFP_KERNEL);
struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info;
struct ccw_dev_id ccwid;
int level, rc;
tid->chpid = card->info.chpid;
ccw_device_get_id(CARD_RDEV(card), &ccwid);
tid->ssid = ccwid.ssid;
tid->devno = ccwid.devno;
if (!info)
return;
rc = stsi(NULL, 0, 0, 0);
if (rc == -ENOSYS)
level = rc;
else
level = (((unsigned int) rc) >> 28);
if ((level >= 2) && (stsi(info222, 2, 2, 2) != -ENOSYS))
tid->lparnr = info222->lpar_number;
if ((level >= 3) && (stsi(info322, 3, 2, 2) != -ENOSYS)) {
EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name));
memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
}
free_page(info);
return;
}
static int qeth_hw_trap_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd;
__u16 rc;
cmd = (struct qeth_ipa_cmd *)data;
rc = cmd->hdr.return_code;
if (rc)
QETH_CARD_TEXT_(card, 2, "trapc:%x", rc);
return 0;
}
int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
{
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
QETH_DBF_TEXT(SETUP, 2, "diagtrap");
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.diagass.subcmd_len = 80;
cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRAP;
cmd->data.diagass.type = 1;
cmd->data.diagass.action = action;
switch (action) {
case QETH_DIAGS_TRAP_ARM:
cmd->data.diagass.options = 0x0003;
cmd->data.diagass.ext = 0x00010000 +
sizeof(struct qeth_trap_id);
qeth_get_trap_id(card,
(struct qeth_trap_id *)cmd->data.diagass.cdata);
break;
case QETH_DIAGS_TRAP_DISARM:
cmd->data.diagass.options = 0x0001;
break;
case QETH_DIAGS_TRAP_CAPTURE:
break;
}
return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL);
}
EXPORT_SYMBOL_GPL(qeth_hw_trap);
int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf,
unsigned int qdio_error, const char *dbftext)
{
if (qdio_error) {
QETH_CARD_TEXT(card, 2, dbftext);
QETH_CARD_TEXT_(card, 2, " F15=%02X",
buf->element[15].sflags);
QETH_CARD_TEXT_(card, 2, " F14=%02X",
buf->element[14].sflags);
QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
if ((buf->element[15].sflags) == 0x12) {
card->stats.rx_dropped++;
return 0;
} else
return 1;
}
return 0;
}
EXPORT_SYMBOL_GPL(qeth_check_qdio_errors);
void qeth_buffer_reclaim_work(struct work_struct *work)
{
struct qeth_card *card = container_of(work, struct qeth_card,
buffer_reclaim_work.work);
QETH_CARD_TEXT_(card, 2, "brw:%x", card->reclaim_index);
qeth_queue_input_buffer(card, card->reclaim_index);
}
void qeth_queue_input_buffer(struct qeth_card *card, int index)
{
struct qeth_qdio_q *queue = card->qdio.in_q;
struct list_head *lh;
int count;
int i;
int rc;
int newcount = 0;
count = (index < queue->next_buf_to_init)?
card->qdio.in_buf_pool.buf_count -
(queue->next_buf_to_init - index) :
card->qdio.in_buf_pool.buf_count -
(queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index);
/* only requeue at a certain threshold to avoid SIGAs */
if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) {
for (i = queue->next_buf_to_init;
i < queue->next_buf_to_init + count; ++i) {
if (qeth_init_input_buffer(card,
&queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q])) {
break;
} else {
newcount++;
}
}
if (newcount < count) {
/* we are in memory shortage so we switch back to
traditional skb allocation and drop packages */
atomic_set(&card->force_alloc_skb, 3);
count = newcount;
} else {
atomic_add_unless(&card->force_alloc_skb, -1, 0);
}
if (!count) {
i = 0;
list_for_each(lh, &card->qdio.in_buf_pool.entry_list)
i++;
if (i == card->qdio.in_buf_pool.buf_count) {
QETH_CARD_TEXT(card, 2, "qsarbw");
card->reclaim_index = index;
schedule_delayed_work(
&card->buffer_reclaim_work,
QETH_RECLAIM_WORK_TIME);
}
return;
}
/*
* according to old code it should be avoided to requeue all
* 128 buffers in order to benefit from PCI avoidance.
* this function keeps at least one buffer (the buffer at
* 'index') un-requeued -> this buffer is the first buffer that
* will be requeued the next time
*/
if (card->options.performance_stats) {
card->perf_stats.inbound_do_qdio_cnt++;
card->perf_stats.inbound_do_qdio_start_time =
qeth_get_micros();
}
rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
queue->next_buf_to_init, count);
if (card->options.performance_stats)
card->perf_stats.inbound_do_qdio_time +=
qeth_get_micros() -
card->perf_stats.inbound_do_qdio_start_time;
if (rc) {
QETH_CARD_TEXT(card, 2, "qinberr");
}
queue->next_buf_to_init = (queue->next_buf_to_init + count) %
QDIO_MAX_BUFFERS_PER_Q;
}
}
EXPORT_SYMBOL_GPL(qeth_queue_input_buffer);
static int qeth_handle_send_error(struct qeth_card *card,
struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
{
int sbalf15 = buffer->buffer->element[15].sflags;
QETH_CARD_TEXT(card, 6, "hdsnderr");
if (card->info.type == QETH_CARD_TYPE_IQD) {
if (sbalf15 == 0) {
qdio_err = 0;
} else {
qdio_err = 1;
}
}
qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
if (!qdio_err)
return QETH_SEND_ERROR_NONE;
if ((sbalf15 >= 15) && (sbalf15 <= 31))
return QETH_SEND_ERROR_RETRY;
QETH_CARD_TEXT(card, 1, "lnkfail");
QETH_CARD_TEXT_(card, 1, "%04x %02x",
(u16)qdio_err, (u8)sbalf15);
return QETH_SEND_ERROR_LINK_FAILURE;
}
/*
* Switched to packing state if the number of used buffers on a queue
* reaches a certain limit.
*/
static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
{
if (!queue->do_pack) {
if (atomic_read(&queue->used_buffers)
>= QETH_HIGH_WATERMARK_PACK){
/* switch non-PACKING -> PACKING */
QETH_CARD_TEXT(queue->card, 6, "np->pack");
if (queue->card->options.performance_stats)
queue->card->perf_stats.sc_dp_p++;
queue->do_pack = 1;
}
}
}
/*
* Switches from packing to non-packing mode. If there is a packing
* buffer on the queue this buffer will be prepared to be flushed.
* In that case 1 is returned to inform the caller. If no buffer
* has to be flushed, zero is returned.
*/
static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
{
struct qeth_qdio_out_buffer *buffer;
int flush_count = 0;
if (queue->do_pack) {
if (atomic_read(&queue->used_buffers)
<= QETH_LOW_WATERMARK_PACK) {
/* switch PACKING -> non-PACKING */
QETH_CARD_TEXT(queue->card, 6, "pack->np");
if (queue->card->options.performance_stats)
queue->card->perf_stats.sc_p_dp++;
queue->do_pack = 0;
/* flush packing buffers */
buffer = queue->bufs[queue->next_buf_to_fill];
if ((atomic_read(&buffer->state) ==
QETH_QDIO_BUF_EMPTY) &&
(buffer->next_element_to_fill > 0)) {
atomic_set(&buffer->state,
QETH_QDIO_BUF_PRIMED);
flush_count++;
queue->next_buf_to_fill =
(queue->next_buf_to_fill + 1) %
QDIO_MAX_BUFFERS_PER_Q;
}
}
}
return flush_count;
}
/*
* Called to flush a packing buffer if no more pci flags are on the queue.
* Checks if there is a packing buffer and prepares it to be flushed.
* In that case returns 1, otherwise zero.
*/
static int qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue)
{
struct qeth_qdio_out_buffer *buffer;
buffer = queue->bufs[queue->next_buf_to_fill];
if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
(buffer->next_element_to_fill > 0)) {
/* it's a packing buffer */
atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
queue->next_buf_to_fill =
(queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q;
return 1;
}
return 0;
}
static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
int count)
{
struct qeth_qdio_out_buffer *buf;
int rc;
int i;
unsigned int qdio_flags;
for (i = index; i < index + count; ++i) {
int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
buf = queue->bufs[bidx];
buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
SBAL_EFLAGS_LAST_ENTRY;
if (queue->bufstates)
queue->bufstates[bidx].user = buf;
if (queue->card->info.type == QETH_CARD_TYPE_IQD)
continue;
if (!queue->do_pack) {
if ((atomic_read(&queue->used_buffers) >=
(QETH_HIGH_WATERMARK_PACK -
QETH_WATERMARK_PACK_FUZZ)) &&
!atomic_read(&queue->set_pci_flags_count)) {
/* it's likely that we'll go to packing
* mode soon */
atomic_inc(&queue->set_pci_flags_count);
buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
}
} else {
if (!atomic_read(&queue->set_pci_flags_count)) {
/*
* there's no outstanding PCI any more, so we
* have to request a PCI to be sure the the PCI
* will wake at some time in the future then we
* can flush packed buffers that might still be
* hanging around, which can happen if no
* further send was requested by the stack
*/
atomic_inc(&queue->set_pci_flags_count);
buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
}
}
}
queue->card->dev->trans_start = jiffies;
if (queue->card->options.performance_stats) {
queue->card->perf_stats.outbound_do_qdio_cnt++;
queue->card->perf_stats.outbound_do_qdio_start_time =
qeth_get_micros();
}
qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
if (atomic_read(&queue->set_pci_flags_count))
qdio_flags |= QDIO_FLAG_PCI_OUT;
rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
queue->queue_no, index, count);
if (queue->card->options.performance_stats)
queue->card->perf_stats.outbound_do_qdio_time +=
qeth_get_micros() -
queue->card->perf_stats.outbound_do_qdio_start_time;
atomic_add(count, &queue->used_buffers);
if (rc) {
queue->card->stats.tx_errors += count;
/* ignore temporary SIGA errors without busy condition */
if (rc == QDIO_ERROR_SIGA_TARGET)
return;
QETH_CARD_TEXT(queue->card, 2, "flushbuf");
QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
QETH_CARD_TEXT_(queue->card, 2, " idx%d", index);
QETH_CARD_TEXT_(queue->card, 2, " c%d", count);
QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
/* this must not happen under normal circumstances. if it
* happens something is really wrong -> recover */
qeth_schedule_recovery(queue->card);
return;
}
if (queue->card->options.performance_stats)
queue->card->perf_stats.bufs_sent += count;
}
static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
{
int index;
int flush_cnt = 0;
int q_was_packing = 0;
/*
* check if weed have to switch to non-packing mode or if
* we have to get a pci flag out on the queue
*/
if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
!atomic_read(&queue->set_pci_flags_count)) {
if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
QETH_OUT_Q_UNLOCKED) {
/*
* If we get in here, there was no action in
* do_send_packet. So, we check if there is a
* packing buffer to be flushed here.
*/
netif_stop_queue(queue->card->dev);
index = queue->next_buf_to_fill;
q_was_packing = queue->do_pack;
/* queue->do_pack may change */
barrier();
flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
if (!flush_cnt &&
!atomic_read(&queue->set_pci_flags_count))
flush_cnt +=
qeth_flush_buffers_on_no_pci(queue);
if (queue->card->options.performance_stats &&
q_was_packing)
queue->card->perf_stats.bufs_sent_pack +=
flush_cnt;
if (flush_cnt)
qeth_flush_buffers(queue, index, flush_cnt);
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
}
}
}
void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
unsigned long card_ptr)
{
struct qeth_card *card = (struct qeth_card *)card_ptr;
if (card->dev && (card->dev->flags & IFF_UP))
napi_schedule(&card->napi);
}
EXPORT_SYMBOL_GPL(qeth_qdio_start_poll);
int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
{
int rc;
if (card->options.cq == QETH_CQ_NOTAVAILABLE) {
rc = -1;
goto out;
} else {
if (card->options.cq == cq) {
rc = 0;
goto out;
}
if (card->state != CARD_STATE_DOWN &&
card->state != CARD_STATE_RECOVER) {
rc = -1;
goto out;
}
qeth_free_qdio_buffers(card);
card->options.cq = cq;
rc = 0;
}
out:
return rc;
}
EXPORT_SYMBOL_GPL(qeth_configure_cq);
static void qeth_qdio_cq_handler(struct qeth_card *card,
unsigned int qdio_err,
unsigned int queue, int first_element, int count) {
struct qeth_qdio_q *cq = card->qdio.c_q;
int i;
int rc;
if (!qeth_is_cq(card, queue))
goto out;
QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);
if (qdio_err) {
netif_stop_queue(card->dev);
qeth_schedule_recovery(card);
goto out;
}
if (card->options.performance_stats) {
card->perf_stats.cq_cnt++;
card->perf_stats.cq_start_time = qeth_get_micros();
}
for (i = first_element; i < first_element + count; ++i) {
int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
struct qdio_buffer *buffer = &cq->qdio_bufs[bidx];
int e;
e = 0;
while (buffer->element[e].addr) {
unsigned long phys_aob_addr;
phys_aob_addr = (unsigned long) buffer->element[e].addr;
qeth_qdio_handle_aob(card, phys_aob_addr);
buffer->element[e].addr = NULL;
buffer->element[e].eflags = 0;
buffer->element[e].sflags = 0;
buffer->element[e].length = 0;
++e;
}
buffer->element[15].eflags = 0;
buffer->element[15].sflags = 0;
}
rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue,
card->qdio.c_q->next_buf_to_init,
count);
if (rc) {
dev_warn(&card->gdev->dev,
"QDIO reported an error, rc=%i\n", rc);
QETH_CARD_TEXT(card, 2, "qcqherr");
}
card->qdio.c_q->next_buf_to_init = (card->qdio.c_q->next_buf_to_init
+ count) % QDIO_MAX_BUFFERS_PER_Q;
netif_wake_queue(card->dev);
if (card->options.performance_stats) {
int delta_t = qeth_get_micros();
delta_t -= card->perf_stats.cq_start_time;
card->perf_stats.cq_time += delta_t;
}
out:
return;
}
void qeth_qdio_input_handler(struct ccw_device *ccwdev, unsigned int qdio_err,
unsigned int queue, int first_elem, int count,
unsigned long card_ptr)
{
struct qeth_card *card = (struct qeth_card *)card_ptr;
QETH_CARD_TEXT_(card, 2, "qihq%d", queue);
QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err);
if (qeth_is_cq(card, queue))
qeth_qdio_cq_handler(card, qdio_err, queue, first_elem, count);
else if (qdio_err)
qeth_schedule_recovery(card);
}
EXPORT_SYMBOL_GPL(qeth_qdio_input_handler);
void qeth_qdio_output_handler(struct ccw_device *ccwdev,
unsigned int qdio_error, int __queue, int first_element,
int count, unsigned long card_ptr)
{
struct qeth_card *card = (struct qeth_card *) card_ptr;
struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
struct qeth_qdio_out_buffer *buffer;
int i;
QETH_CARD_TEXT(card, 6, "qdouhdl");
if (qdio_error & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
QETH_CARD_TEXT(card, 2, "achkcond");
netif_stop_queue(card->dev);
qeth_schedule_recovery(card);
return;
}
if (card->options.performance_stats) {
card->perf_stats.outbound_handler_cnt++;
card->perf_stats.outbound_handler_start_time =
qeth_get_micros();
}
for (i = first_element; i < (first_element + count); ++i) {
int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
buffer = queue->bufs[bidx];
qeth_handle_send_error(card, buffer, qdio_error);
if (queue->bufstates &&
(queue->bufstates[bidx].flags &
QDIO_OUTBUF_STATE_FLAG_PENDING) != 0) {
BUG_ON(card->options.cq != QETH_CQ_ENABLED);
if (atomic_cmpxchg(&buffer->state,
QETH_QDIO_BUF_PRIMED,
QETH_QDIO_BUF_PENDING) ==
QETH_QDIO_BUF_PRIMED) {
qeth_notify_skbs(queue, buffer,
TX_NOTIFY_PENDING);
}
buffer->aob = queue->bufstates[bidx].aob;
QETH_CARD_TEXT_(queue->card, 5, "pel%d", bidx);
QETH_CARD_TEXT(queue->card, 5, "aob");
QETH_CARD_TEXT_(queue->card, 5, "%lx",
virt_to_phys(buffer->aob));
BUG_ON(bidx < 0 || bidx >= QDIO_MAX_BUFFERS_PER_Q);
if (qeth_init_qdio_out_buf(queue, bidx)) {
QETH_CARD_TEXT(card, 2, "outofbuf");
qeth_schedule_recovery(card);
}
} else {
if (card->options.cq == QETH_CQ_ENABLED) {
enum iucv_tx_notify n;
n = qeth_compute_cq_notification(
buffer->buffer->element[15].sflags, 0);
qeth_notify_skbs(queue, buffer, n);
}
qeth_clear_output_buffer(queue, buffer,
QETH_QDIO_BUF_EMPTY);
}
qeth_cleanup_handled_pending(queue, bidx, 0);
}
atomic_sub(count, &queue->used_buffers);
/* check if we need to do something on this outbound queue */
if (card->info.type != QETH_CARD_TYPE_IQD)
qeth_check_outbound_queue(queue);
netif_wake_queue(queue->card->dev);
if (card->options.performance_stats)
card->perf_stats.outbound_handler_time += qeth_get_micros() -
card->perf_stats.outbound_handler_start_time;
}
EXPORT_SYMBOL_GPL(qeth_qdio_output_handler);
int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
int ipv, int cast_type)
{
if (!ipv && (card->info.type == QETH_CARD_TYPE_OSD ||
card->info.type == QETH_CARD_TYPE_OSX))
return card->qdio.default_out_queue;
switch (card->qdio.no_out_queues) {
case 4:
if (cast_type && card->info.is_multicast_different)
return card->info.is_multicast_different &
(card->qdio.no_out_queues - 1);
if (card->qdio.do_prio_queueing && (ipv == 4)) {
const u8 tos = ip_hdr(skb)->tos;
if (card->qdio.do_prio_queueing ==
QETH_PRIO_Q_ING_TOS) {
if (tos & IP_TOS_NOTIMPORTANT)
return 3;
if (tos & IP_TOS_HIGHRELIABILITY)
return 2;
if (tos & IP_TOS_HIGHTHROUGHPUT)
return 1;
if (tos & IP_TOS_LOWDELAY)
return 0;
}
if (card->qdio.do_prio_queueing ==
QETH_PRIO_Q_ING_PREC)
return 3 - (tos >> 6);
} else if (card->qdio.do_prio_queueing && (ipv == 6)) {
/* TODO: IPv6!!! */
}
return card->qdio.default_out_queue;
case 1: /* fallthrough for single-out-queue 1920-device */
default:
return card->qdio.default_out_queue;
}
}
EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
int qeth_get_elements_no(struct qeth_card *card, void *hdr,
struct sk_buff *skb, int elems)
{
int dlen = skb->len - skb->data_len;
int elements_needed = PFN_UP((unsigned long)skb->data + dlen - 1) -
PFN_DOWN((unsigned long)skb->data);
elements_needed += skb_shinfo(skb)->nr_frags;
if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
QETH_DBF_MESSAGE(2, "Invalid size of IP packet "
"(Number=%d / Length=%d). Discarded.\n",
(elements_needed+elems), skb->len);
return 0;
}
return elements_needed;
}
EXPORT_SYMBOL_GPL(qeth_get_elements_no);
int qeth_hdr_chk_and_bounce(struct sk_buff *skb, int len)
{
int hroom, inpage, rest;
if (((unsigned long)skb->data & PAGE_MASK) !=
(((unsigned long)skb->data + len - 1) & PAGE_MASK)) {
hroom = skb_headroom(skb);
inpage = PAGE_SIZE - ((unsigned long) skb->data % PAGE_SIZE);
rest = len - inpage;
if (rest > hroom)
return 1;
memmove(skb->data - rest, skb->data, skb->len - skb->data_len);
skb->data -= rest;
QETH_DBF_MESSAGE(2, "skb bounce len: %d rest: %d\n", len, rest);
}
return 0;
}
EXPORT_SYMBOL_GPL(qeth_hdr_chk_and_bounce);
static inline void __qeth_fill_buffer(struct sk_buff *skb,
struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill,
int offset)
{
int length = skb->len - skb->data_len;
int length_here;
int element;
char *data;
int first_lap, cnt;
struct skb_frag_struct *frag;
element = *next_element_to_fill;
data = skb->data;
first_lap = (is_tso == 0 ? 1 : 0);
if (offset >= 0) {
data = skb->data + offset;
length -= offset;
first_lap = 0;
}
while (length > 0) {
/* length_here is the remaining amount of data in this page */
length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE);
if (length < length_here)
length_here = length;
buffer->element[element].addr = data;
buffer->element[element].length = length_here;
length -= length_here;
if (!length) {
if (first_lap)
if (skb_shinfo(skb)->nr_frags)
buffer->element[element].eflags =
SBAL_EFLAGS_FIRST_FRAG;
else
buffer->element[element].eflags = 0;
else
buffer->element[element].eflags =
SBAL_EFLAGS_MIDDLE_FRAG;
} else {
if (first_lap)
buffer->element[element].eflags =
SBAL_EFLAGS_FIRST_FRAG;
else
buffer->element[element].eflags =
SBAL_EFLAGS_MIDDLE_FRAG;
}
data += length_here;
element++;
first_lap = 0;
}
for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
frag = &skb_shinfo(skb)->frags[cnt];
buffer->element[element].addr = (char *)
page_to_phys(skb_frag_page(frag))
+ frag->page_offset;
buffer->element[element].length = frag->size;
buffer->element[element].eflags = SBAL_EFLAGS_MIDDLE_FRAG;
element++;
}
if (buffer->element[element - 1].eflags)
buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
*next_element_to_fill = element;
}
static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
struct qeth_qdio_out_buffer *buf, struct sk_buff *skb,
struct qeth_hdr *hdr, int offset, int hd_len)
{
struct qdio_buffer *buffer;
int flush_cnt = 0, hdr_len, large_send = 0;
buffer = buf->buffer;
atomic_inc(&skb->users);
skb_queue_tail(&buf->skb_list, skb);
/*check first on TSO ....*/
if (hdr->hdr.l3.id == QETH_HEADER_TYPE_TSO) {
int element = buf->next_element_to_fill;
hdr_len = sizeof(struct qeth_hdr_tso) +
((struct qeth_hdr_tso *)hdr)->ext.dg_hdr_len;
/*fill first buffer entry only with header information */
buffer->element[element].addr = skb->data;
buffer->element[element].length = hdr_len;
buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
buf->next_element_to_fill++;
skb->data += hdr_len;
skb->len -= hdr_len;
large_send = 1;
}
if (offset >= 0) {
int element = buf->next_element_to_fill;
buffer->element[element].addr = hdr;
buffer->element[element].length = sizeof(struct qeth_hdr) +
hd_len;
buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
buf->is_header[element] = 1;
buf->next_element_to_fill++;
}
__qeth_fill_buffer(skb, buffer, large_send,
(int *)&buf->next_element_to_fill, offset);
if (!queue->do_pack) {
QETH_CARD_TEXT(queue->card, 6, "fillbfnp");
/* set state to PRIMED -> will be flushed */
atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
flush_cnt = 1;
} else {
QETH_CARD_TEXT(queue->card, 6, "fillbfpa");
if (queue->card->options.performance_stats)
queue->card->perf_stats.skbs_sent_pack++;
if (buf->next_element_to_fill >=
QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
/*
* packed buffer if full -> set state PRIMED
* -> will be flushed
*/
atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
flush_cnt = 1;
}
}
return flush_cnt;
}
int qeth_do_send_packet_fast(struct qeth_card *card,
struct qeth_qdio_out_q *queue, struct sk_buff *skb,
struct qeth_hdr *hdr, int elements_needed,
int offset, int hd_len)
{
struct qeth_qdio_out_buffer *buffer;
int index;
/* spin until we get the queue ... */
while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
/* ... now we've got the queue */
index = queue->next_buf_to_fill;
buffer = queue->bufs[queue->next_buf_to_fill];
/*
* check if buffer is empty to make sure that we do not 'overtake'
* ourselves and try to fill a buffer that is already primed
*/
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
goto out;
queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
QDIO_MAX_BUFFERS_PER_Q;
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
qeth_flush_buffers(queue, index, 1);
return 0;
out:
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
return -EBUSY;
}
EXPORT_SYMBOL_GPL(qeth_do_send_packet_fast);
int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
struct sk_buff *skb, struct qeth_hdr *hdr,
int elements_needed)
{
struct qeth_qdio_out_buffer *buffer;
int start_index;
int flush_count = 0;
int do_pack = 0;
int tmp;
int rc = 0;
/* spin until we get the queue ... */
while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
start_index = queue->next_buf_to_fill;
buffer = queue->bufs[queue->next_buf_to_fill];
/*
* check if buffer is empty to make sure that we do not 'overtake'
* ourselves and try to fill a buffer that is already primed
*/
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
return -EBUSY;
}
/* check if we need to switch packing state of this queue */
qeth_switch_to_packing_if_needed(queue);
if (queue->do_pack) {
do_pack = 1;
/* does packet fit in current buffer? */
if ((QETH_MAX_BUFFER_ELEMENTS(card) -
buffer->next_element_to_fill) < elements_needed) {
/* ... no -> set state PRIMED */
atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
flush_count++;
queue->next_buf_to_fill =
(queue->next_buf_to_fill + 1) %
QDIO_MAX_BUFFERS_PER_Q;
buffer = queue->bufs[queue->next_buf_to_fill];
/* we did a step forward, so check buffer state
* again */
if (atomic_read(&buffer->state) !=
QETH_QDIO_BUF_EMPTY) {
qeth_flush_buffers(queue, start_index,
flush_count);
atomic_set(&queue->state,
QETH_OUT_Q_UNLOCKED);
return -EBUSY;
}
}
}
tmp = qeth_fill_buffer(queue, buffer, skb, hdr, -1, 0);
queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) %
QDIO_MAX_BUFFERS_PER_Q;
flush_count += tmp;
if (flush_count)
qeth_flush_buffers(queue, start_index, flush_count);
else if (!atomic_read(&queue->set_pci_flags_count))
atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
/*
* queue->state will go from LOCKED -> UNLOCKED or from
* LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
* (switch packing state or flush buffer to get another pci flag out).
* In that case we will enter this loop
*/
while (atomic_dec_return(&queue->state)) {
flush_count = 0;
start_index = queue->next_buf_to_fill;
/* check if we can go back to non-packing state */
flush_count += qeth_switch_to_nonpacking_if_needed(queue);
/*
* check if we need to flush a packing buffer to get a pci
* flag out on the queue
*/
if (!flush_count && !atomic_read(&queue->set_pci_flags_count))
flush_count += qeth_flush_buffers_on_no_pci(queue);
if (flush_count)
qeth_flush_buffers(queue, start_index, flush_count);
}
/* at this point the queue is UNLOCKED again */
if (queue->card->options.performance_stats && do_pack)
queue->card->perf_stats.bufs_sent_pack += flush_count;
return rc;
}
EXPORT_SYMBOL_GPL(qeth_do_send_packet);
static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd;
struct qeth_ipacmd_setadpparms *setparms;
QETH_CARD_TEXT(card, 4, "prmadpcb");
cmd = (struct qeth_ipa_cmd *) data;
setparms = &(cmd->data.setadapterparms);
qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd);
if (cmd->hdr.return_code) {
QETH_CARD_TEXT_(card, 4, "prmrc%2.2x", cmd->hdr.return_code);
setparms->data.mode = SET_PROMISC_MODE_OFF;
}
card->info.promisc_mode = setparms->data.mode;
return 0;
}
void qeth_setadp_promisc_mode(struct qeth_card *card)
{
enum qeth_ipa_promisc_modes mode;
struct net_device *dev = card->dev;
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
QETH_CARD_TEXT(card, 4, "setprom");
if (((dev->flags & IFF_PROMISC) &&
(card->info.promisc_mode == SET_PROMISC_MODE_ON)) ||
(!(dev->flags & IFF_PROMISC) &&
(card->info.promisc_mode == SET_PROMISC_MODE_OFF)))
return;
mode = SET_PROMISC_MODE_OFF;
if (dev->flags & IFF_PROMISC)
mode = SET_PROMISC_MODE_ON;
QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
sizeof(struct qeth_ipacmd_setadpparms));
cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
cmd->data.setadapterparms.data.mode = mode;
qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
}
EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);
int qeth_change_mtu(struct net_device *dev, int new_mtu)
{
struct qeth_card *card;
char dbf_text[15];
card = dev->ml_priv;
QETH_CARD_TEXT(card, 4, "chgmtu");
sprintf(dbf_text, "%8x", new_mtu);
QETH_CARD_TEXT(card, 4, dbf_text);
if (new_mtu < 64)
return -EINVAL;
if (new_mtu > 65535)
return -EINVAL;
if ((!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) &&
(!qeth_mtu_is_valid(card, new_mtu)))
return -EINVAL;
dev->mtu = new_mtu;
return 0;
}
EXPORT_SYMBOL_GPL(qeth_change_mtu);
struct net_device_stats *qeth_get_stats(struct net_device *dev)
{
struct qeth_card *card;
card = dev->ml_priv;
QETH_CARD_TEXT(card, 5, "getstat");
return &card->stats;
}
EXPORT_SYMBOL_GPL(qeth_get_stats);
static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd;
QETH_CARD_TEXT(card, 4, "chgmaccb");
cmd = (struct qeth_ipa_cmd *) data;
if (!card->options.layer2 ||
!(card->info.mac_bits & QETH_LAYER2_MAC_READ)) {
memcpy(card->dev->dev_addr,
&cmd->data.setadapterparms.data.change_addr.addr,
OSA_ADDR_LEN);
card->info.mac_bits |= QETH_LAYER2_MAC_READ;
}
qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
return 0;
}
int qeth_setadpparms_change_macaddr(struct qeth_card *card)
{
int rc;
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
QETH_CARD_TEXT(card, 4, "chgmac");
iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
sizeof(struct qeth_ipacmd_setadpparms));
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN;
memcpy(&cmd->data.setadapterparms.data.change_addr.addr,
card->dev->dev_addr, OSA_ADDR_LEN);
rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
NULL);
return rc;
}
EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd;
struct qeth_set_access_ctrl *access_ctrl_req;
QETH_CARD_TEXT(card, 4, "setaccb");
cmd = (struct qeth_ipa_cmd *) data;
access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
QETH_DBF_TEXT_(SETUP, 2, "setaccb");
QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
QETH_DBF_TEXT_(SETUP, 2, "rc=%d",
cmd->data.setadapterparms.hdr.return_code);
switch (cmd->data.setadapterparms.hdr.return_code) {
case SET_ACCESS_CTRL_RC_SUCCESS:
case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
{
card->options.isolation = access_ctrl_req->subcmd_code;
if (card->options.isolation == ISOLATION_MODE_NONE) {
dev_info(&card->gdev->dev,
"QDIO data connection isolation is deactivated\n");
} else {
dev_info(&card->gdev->dev,
"QDIO data connection isolation is activated\n");
}
QETH_DBF_MESSAGE(3, "OK:SET_ACCESS_CTRL(%s, %d)==%d\n",
card->gdev->dev.kobj.name,
access_ctrl_req->subcmd_code,
cmd->data.setadapterparms.hdr.return_code);
break;
}
case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
{
QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%s,%d)==%d\n",
card->gdev->dev.kobj.name,
access_ctrl_req->subcmd_code,
cmd->data.setadapterparms.hdr.return_code);
dev_err(&card->gdev->dev, "Adapter does not "
"support QDIO data connection isolation\n");
/* ensure isolation mode is "none" */
card->options.isolation = ISOLATION_MODE_NONE;
break;
}
case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
{
QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d\n",
card->gdev->dev.kobj.name,
access_ctrl_req->subcmd_code,
cmd->data.setadapterparms.hdr.return_code);
dev_err(&card->gdev->dev,
"Adapter is dedicated. "
"QDIO data connection isolation not supported\n");
/* ensure isolation mode is "none" */
card->options.isolation = ISOLATION_MODE_NONE;
break;
}
case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
{
QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d\n",
card->gdev->dev.kobj.name,
access_ctrl_req->subcmd_code,
cmd->data.setadapterparms.hdr.return_code);
dev_err(&card->gdev->dev,
"TSO does not permit QDIO data connection isolation\n");
/* ensure isolation mode is "none" */
card->options.isolation = ISOLATION_MODE_NONE;
break;
}
default:
{
/* this should never happen */
QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d"
"==UNKNOWN\n",
card->gdev->dev.kobj.name,
access_ctrl_req->subcmd_code,
cmd->data.setadapterparms.hdr.return_code);
/* ensure isolation mode is "none" */
card->options.isolation = ISOLATION_MODE_NONE;
break;
}
}
qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
return 0;
}
static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
enum qeth_ipa_isolation_modes isolation)
{
int rc;
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
struct qeth_set_access_ctrl *access_ctrl_req;
QETH_CARD_TEXT(card, 4, "setacctl");
QETH_DBF_TEXT_(SETUP, 2, "setacctl");
QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
sizeof(struct qeth_ipacmd_setadpparms_hdr) +
sizeof(struct qeth_set_access_ctrl));
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
access_ctrl_req->subcmd_code = isolation;
rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
NULL);
QETH_DBF_TEXT_(SETUP, 2, "rc=%d", rc);
return rc;
}
int qeth_set_access_ctrl_online(struct qeth_card *card)
{
int rc = 0;
QETH_CARD_TEXT(card, 4, "setactlo");
if ((card->info.type == QETH_CARD_TYPE_OSD ||
card->info.type == QETH_CARD_TYPE_OSX) &&
qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
rc = qeth_setadpparms_set_access_ctrl(card,
card->options.isolation);
if (rc) {
QETH_DBF_MESSAGE(3,
"IPA(SET_ACCESS_CTRL,%s,%d) sent failed\n",
card->gdev->dev.kobj.name,
rc);
}
} else if (card->options.isolation != ISOLATION_MODE_NONE) {
card->options.isolation = ISOLATION_MODE_NONE;
dev_err(&card->gdev->dev, "Adapter does not "
"support QDIO data connection isolation\n");
rc = -EOPNOTSUPP;
}
return rc;
}
EXPORT_SYMBOL_GPL(qeth_set_access_ctrl_online);
void qeth_tx_timeout(struct net_device *dev)
{
struct qeth_card *card;
card = dev->ml_priv;
QETH_CARD_TEXT(card, 4, "txtimeo");
card->stats.tx_errors++;
qeth_schedule_recovery(card);
}
EXPORT_SYMBOL_GPL(qeth_tx_timeout);
int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
{
struct qeth_card *card = dev->ml_priv;
int rc = 0;
switch (regnum) {
case MII_BMCR: /* Basic mode control register */
rc = BMCR_FULLDPLX;
if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
(card->info.link_type != QETH_LINK_TYPE_OSN) &&
(card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH))
rc |= BMCR_SPEED100;
break;
case MII_BMSR: /* Basic mode status register */
rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
BMSR_100BASE4;
break;
case MII_PHYSID1: /* PHYS ID 1 */
rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
dev->dev_addr[2];
rc = (rc >> 5) & 0xFFFF;
break;
case MII_PHYSID2: /* PHYS ID 2 */
rc = (dev->dev_addr[2] << 10) & 0xFFFF;
break;
case MII_ADVERTISE: /* Advertisement control reg */
rc = ADVERTISE_ALL;
break;
case MII_LPA: /* Link partner ability reg */
rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
LPA_100BASE4 | LPA_LPACK;
break;
case MII_EXPANSION: /* Expansion register */
break;
case MII_DCOUNTER: /* disconnect counter */
break;
case MII_FCSCOUNTER: /* false carrier counter */
break;
case MII_NWAYTEST: /* N-way auto-neg test register */
break;
case MII_RERRCOUNTER: /* rx error counter */
rc = card->stats.rx_errors;
break;
case MII_SREVISION: /* silicon revision */
break;
case MII_RESV1: /* reserved 1 */
break;
case MII_LBRERROR: /* loopback, rx, bypass error */
break;
case MII_PHYADDR: /* physical address */
break;
case MII_RESV2: /* reserved 2 */
break;
case MII_TPISTATUS: /* TPI status for 10mbps */
break;
case MII_NCONFIG: /* network interface config */
break;
default:
break;
}
return rc;
}
EXPORT_SYMBOL_GPL(qeth_mdio_read);
static int qeth_send_ipa_snmp_cmd(struct qeth_card *card,
struct qeth_cmd_buffer *iob, int len,
int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
unsigned long),
void *reply_param)
{
u16 s1, s2;
QETH_CARD_TEXT(card, 4, "sendsnmp");
memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
&card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
/* adjust PDU length fields in IPA_PDU_HEADER */
s1 = (u32) IPA_PDU_HEADER_SIZE + len;
s2 = (u32) len;
memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2);
memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2);
memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2);
memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2);
return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
reply_cb, reply_param);
}
static int qeth_snmp_command_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long sdata)
{
struct qeth_ipa_cmd *cmd;
struct qeth_arp_query_info *qinfo;
struct qeth_snmp_cmd *snmp;
unsigned char *data;
__u16 data_len;
QETH_CARD_TEXT(card, 3, "snpcmdcb");
cmd = (struct qeth_ipa_cmd *) sdata;
data = (unsigned char *)((char *)cmd - reply->offset);
qinfo = (struct qeth_arp_query_info *) reply->param;
snmp = &cmd->data.setadapterparms.data.snmp;
if (cmd->hdr.return_code) {
QETH_CARD_TEXT_(card, 4, "scer1%i", cmd->hdr.return_code);
return 0;
}
if (cmd->data.setadapterparms.hdr.return_code) {
cmd->hdr.return_code =
cmd->data.setadapterparms.hdr.return_code;
QETH_CARD_TEXT_(card, 4, "scer2%i", cmd->hdr.return_code);
return 0;
}
data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data));
if (cmd->data.setadapterparms.hdr.seq_no == 1)
data_len -= (__u16)((char *)&snmp->data - (char *)cmd);
else
data_len -= (__u16)((char *)&snmp->request - (char *)cmd);
/* check if there is enough room in userspace */
if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOMEM);
cmd->hdr.return_code = IPA_RC_ENOMEM;
return 0;
}
QETH_CARD_TEXT_(card, 4, "snore%i",
cmd->data.setadapterparms.hdr.used_total);
QETH_CARD_TEXT_(card, 4, "sseqn%i",
cmd->data.setadapterparms.hdr.seq_no);
/*copy entries to user buffer*/
if (cmd->data.setadapterparms.hdr.seq_no == 1) {
memcpy(qinfo->udata + qinfo->udata_offset,
(char *)snmp,
data_len + offsetof(struct qeth_snmp_cmd, data));
qinfo->udata_offset += offsetof(struct qeth_snmp_cmd, data);
} else {
memcpy(qinfo->udata + qinfo->udata_offset,
(char *)&snmp->request, data_len);
}
qinfo->udata_offset += data_len;
/* check if all replies received ... */
QETH_CARD_TEXT_(card, 4, "srtot%i",
cmd->data.setadapterparms.hdr.used_total);
QETH_CARD_TEXT_(card, 4, "srseq%i",
cmd->data.setadapterparms.hdr.seq_no);
if (cmd->data.setadapterparms.hdr.seq_no <
cmd->data.setadapterparms.hdr.used_total)
return 1;
return 0;
}
int qeth_snmp_command(struct qeth_card *card, char __user *udata)
{
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
struct qeth_snmp_ureq *ureq;
int req_len;
struct qeth_arp_query_info qinfo = {0, };
int rc = 0;
QETH_CARD_TEXT(card, 3, "snmpcmd");
if (card->info.guestlan)
return -EOPNOTSUPP;
if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
(!card->options.layer2)) {
return -EOPNOTSUPP;
}
/* skip 4 bytes (data_len struct member) to get req_len */
if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int)))
return -EFAULT;
ureq = memdup_user(udata, req_len + sizeof(struct qeth_snmp_ureq_hdr));
if (IS_ERR(ureq)) {
QETH_CARD_TEXT(card, 2, "snmpnome");
return PTR_ERR(ureq);
}
qinfo.udata_len = ureq->hdr.data_len;
qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
if (!qinfo.udata) {
kfree(ureq);
return -ENOMEM;
}
qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL,
QETH_SNMP_SETADP_CMDLENGTH + req_len);
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);
rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
qeth_snmp_command_cb, (void *)&qinfo);
if (rc)
QETH_DBF_MESSAGE(2, "SNMP command failed on %s: (0x%x)\n",
QETH_CARD_IFNAME(card), rc);
else {
if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
rc = -EFAULT;
}
kfree(ureq);
kfree(qinfo.udata);
return rc;
}
EXPORT_SYMBOL_GPL(qeth_snmp_command);
static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd;
struct qeth_qoat_priv *priv;
char *resdata;
int resdatalen;
QETH_CARD_TEXT(card, 3, "qoatcb");
cmd = (struct qeth_ipa_cmd *)data;
priv = (struct qeth_qoat_priv *)reply->param;
resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
resdata = (char *)data + 28;
if (resdatalen > (priv->buffer_len - priv->response_len)) {
cmd->hdr.return_code = IPA_RC_FFFF;
return 0;
}
memcpy((priv->buffer + priv->response_len), resdata,
resdatalen);
priv->response_len += resdatalen;
if (cmd->data.setadapterparms.hdr.seq_no <
cmd->data.setadapterparms.hdr.used_total)
return 1;
return 0;
}
int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
{
int rc = 0;
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
struct qeth_query_oat *oat_req;
struct qeth_query_oat_data oat_data;
struct qeth_qoat_priv priv;
void __user *tmp;
QETH_CARD_TEXT(card, 3, "qoatcmd");
if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) {
rc = -EOPNOTSUPP;
goto out;
}
if (copy_from_user(&oat_data, udata,
sizeof(struct qeth_query_oat_data))) {
rc = -EFAULT;
goto out;
}
priv.buffer_len = oat_data.buffer_len;
priv.response_len = 0;
priv.buffer = kzalloc(oat_data.buffer_len, GFP_KERNEL);
if (!priv.buffer) {
rc = -ENOMEM;
goto out;
}
iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
sizeof(struct qeth_ipacmd_setadpparms_hdr) +
sizeof(struct qeth_query_oat));
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
oat_req = &cmd->data.setadapterparms.data.query_oat;
oat_req->subcmd_code = oat_data.command;
rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb,
&priv);
if (!rc) {
if (is_compat_task())
tmp = compat_ptr(oat_data.ptr);
else
tmp = (void __user *)(unsigned long)oat_data.ptr;
if (copy_to_user(tmp, priv.buffer,
priv.response_len)) {
rc = -EFAULT;
goto out_free;
}
oat_data.response_len = priv.response_len;
if (copy_to_user(udata, &oat_data,
sizeof(struct qeth_query_oat_data)))
rc = -EFAULT;
} else
if (rc == IPA_RC_FFFF)
rc = -EFAULT;
out_free:
kfree(priv.buffer);
out:
return rc;
}
EXPORT_SYMBOL_GPL(qeth_query_oat_command);
static inline int qeth_get_qdio_q_format(struct qeth_card *card)
{
switch (card->info.type) {
case QETH_CARD_TYPE_IQD:
return 2;
default:
return 0;
}
}
static void qeth_determine_capabilities(struct qeth_card *card)
{
int rc;
int length;
char *prcd;
struct ccw_device *ddev;
int ddev_offline = 0;
QETH_DBF_TEXT(SETUP, 2, "detcapab");
ddev = CARD_DDEV(card);
if (!ddev->online) {
ddev_offline = 1;
rc = ccw_device_set_online(ddev);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
goto out;
}
}
rc = qeth_read_conf_data(card, (void **) &prcd, &length);
if (rc) {
QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n",
dev_name(&card->gdev->dev), rc);
QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
goto out_offline;
}
qeth_configure_unitaddr(card, prcd);
if (ddev_offline)
qeth_configure_blkt_default(card, prcd);
kfree(prcd);
rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
if (rc)
QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
QETH_DBF_TEXT_(SETUP, 2, "qfmt%d", card->ssqd.qfmt);
QETH_DBF_TEXT_(SETUP, 2, "%d", card->ssqd.qdioac1);
QETH_DBF_TEXT_(SETUP, 2, "%d", card->ssqd.qdioac3);
QETH_DBF_TEXT_(SETUP, 2, "icnt%d", card->ssqd.icnt);
if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) ||
((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) ||
((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) {
dev_info(&card->gdev->dev,
"Completion Queueing supported\n");
} else {
card->options.cq = QETH_CQ_NOTAVAILABLE;
}
out_offline:
if (ddev_offline == 1)
ccw_device_set_offline(ddev);
out:
return;
}
static inline void qeth_qdio_establish_cq(struct qeth_card *card,
struct qdio_buffer **in_sbal_ptrs,
void (**queue_start_poll) (struct ccw_device *, int, unsigned long)) {
int i;
if (card->options.cq == QETH_CQ_ENABLED) {
int offset = QDIO_MAX_BUFFERS_PER_Q *
(card->qdio.no_in_queues - 1);
i = QDIO_MAX_BUFFERS_PER_Q * (card->qdio.no_in_queues - 1);
for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
in_sbal_ptrs[offset + i] = (struct qdio_buffer *)
virt_to_phys(card->qdio.c_q->bufs[i].buffer);
}
queue_start_poll[card->qdio.no_in_queues - 1] = NULL;
}
}
static int qeth_qdio_establish(struct qeth_card *card)
{
struct qdio_initialize init_data;
char *qib_param_field;
struct qdio_buffer **in_sbal_ptrs;
void (**queue_start_poll) (struct ccw_device *, int, unsigned long);
struct qdio_buffer **out_sbal_ptrs;
int i, j, k;
int rc = 0;
QETH_DBF_TEXT(SETUP, 2, "qdioest");
qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q * sizeof(char),
GFP_KERNEL);
if (!qib_param_field) {
rc = -ENOMEM;
goto out_free_nothing;
}
qeth_create_qib_param_field(card, qib_param_field);
qeth_create_qib_param_field_blkt(card, qib_param_field);
in_sbal_ptrs = kzalloc(card->qdio.no_in_queues *
QDIO_MAX_BUFFERS_PER_Q * sizeof(void *),
GFP_KERNEL);
if (!in_sbal_ptrs) {
rc = -ENOMEM;
goto out_free_qib_param;
}
for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
in_sbal_ptrs[i] = (struct qdio_buffer *)
virt_to_phys(card->qdio.in_q->bufs[i].buffer);
}
queue_start_poll = kzalloc(sizeof(void *) * card->qdio.no_in_queues,
GFP_KERNEL);
if (!queue_start_poll) {
rc = -ENOMEM;
goto out_free_in_sbals;
}
for (i = 0; i < card->qdio.no_in_queues; ++i)
queue_start_poll[i] = card->discipline.start_poll;
qeth_qdio_establish_cq(card, in_sbal_ptrs, queue_start_poll);
out_sbal_ptrs =
kzalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q *
sizeof(void *), GFP_KERNEL);
if (!out_sbal_ptrs) {
rc = -ENOMEM;
goto out_free_queue_start_poll;
}
for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k) {
out_sbal_ptrs[k] = (struct qdio_buffer *)virt_to_phys(
card->qdio.out_qs[i]->bufs[j]->buffer);
}
memset(&init_data, 0, sizeof(struct qdio_initialize));
init_data.cdev = CARD_DDEV(card);
init_data.q_format = qeth_get_qdio_q_format(card);
init_data.qib_param_field_format = 0;
init_data.qib_param_field = qib_param_field;
init_data.no_input_qs = card->qdio.no_in_queues;
init_data.no_output_qs = card->qdio.no_out_queues;
init_data.input_handler = card->discipline.input_handler;
init_data.output_handler = card->discipline.output_handler;
init_data.queue_start_poll_array = queue_start_poll;
init_data.int_parm = (unsigned long) card;
init_data.input_sbal_addr_array = (void **) in_sbal_ptrs;
init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
init_data.output_sbal_state_array = card->qdio.out_bufstates;
init_data.scan_threshold =
(card->info.type == QETH_CARD_TYPE_IQD) ? 8 : 32;
if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
rc = qdio_allocate(&init_data);
if (rc) {
atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
goto out;
}
rc = qdio_establish(&init_data);
if (rc) {
atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
qdio_free(CARD_DDEV(card));
}
}
switch (card->options.cq) {
case QETH_CQ_ENABLED:
dev_info(&card->gdev->dev, "Completion Queue support enabled");
break;
case QETH_CQ_DISABLED:
dev_info(&card->gdev->dev, "Completion Queue support disabled");
break;
default:
break;
}
out:
kfree(out_sbal_ptrs);
out_free_queue_start_poll:
kfree(queue_start_poll);
out_free_in_sbals:
kfree(in_sbal_ptrs);
out_free_qib_param:
kfree(qib_param_field);
out_free_nothing:
return rc;
}
static void qeth_core_free_card(struct qeth_card *card)
{
QETH_DBF_TEXT(SETUP, 2, "freecrd");
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
qeth_clean_channel(&card->read);
qeth_clean_channel(&card->write);
if (card->dev)
free_netdev(card->dev);
kfree(card->ip_tbd_list);
qeth_free_qdio_buffers(card);
unregister_service_level(&card->qeth_service_level);
kfree(card);
}
static struct ccw_device_id qeth_ids[] = {
{CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
.driver_info = QETH_CARD_TYPE_OSD},
{CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
.driver_info = QETH_CARD_TYPE_IQD},
{CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06),
.driver_info = QETH_CARD_TYPE_OSN},
{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
.driver_info = QETH_CARD_TYPE_OSM},
{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
.driver_info = QETH_CARD_TYPE_OSX},
{},
};
MODULE_DEVICE_TABLE(ccw, qeth_ids);
static struct ccw_driver qeth_ccw_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "qeth",
},
.ids = qeth_ids,
.probe = ccwgroup_probe_ccwdev,
.remove = ccwgroup_remove_ccwdev,
};
static int qeth_core_driver_group(const char *buf, struct device *root_dev,
unsigned long driver_id)
{
return ccwgroup_create_from_string(root_dev, driver_id,
&qeth_ccw_driver, 3, buf);
}
int qeth_core_hardsetup_card(struct qeth_card *card)
{
int retries = 0;
int rc;
QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
atomic_set(&card->force_alloc_skb, 0);
qeth_get_channel_path_desc(card);
retry:
if (retries)
QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n",
dev_name(&card->gdev->dev));
ccw_device_set_offline(CARD_DDEV(card));
ccw_device_set_offline(CARD_WDEV(card));
ccw_device_set_offline(CARD_RDEV(card));
rc = ccw_device_set_online(CARD_RDEV(card));
if (rc)
goto retriable;
rc = ccw_device_set_online(CARD_WDEV(card));
if (rc)
goto retriable;
rc = ccw_device_set_online(CARD_DDEV(card));
if (rc)
goto retriable;
rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
retriable:
if (rc == -ERESTARTSYS) {
QETH_DBF_TEXT(SETUP, 2, "break1");
return rc;
} else if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
if (++retries > 3)
goto out;
else
goto retry;
}
qeth_determine_capabilities(card);
qeth_init_tokens(card);
qeth_init_func_level(card);
rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb);
if (rc == -ERESTARTSYS) {
QETH_DBF_TEXT(SETUP, 2, "break2");
return rc;
} else if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
if (--retries < 0)
goto out;
else
goto retry;
}
rc = qeth_idx_activate_channel(&card->write, qeth_idx_write_cb);
if (rc == -ERESTARTSYS) {
QETH_DBF_TEXT(SETUP, 2, "break3");
return rc;
} else if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
if (--retries < 0)
goto out;
else
goto retry;
}
card->read_or_write_problem = 0;
rc = qeth_mpc_initialize(card);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
goto out;
}
card->options.ipa4.supported_funcs = 0;
card->options.adp.supported_funcs = 0;
card->info.diagass_support = 0;
qeth_query_ipassists(card, QETH_PROT_IPV4);
if (qeth_is_supported(card, IPA_SETADAPTERPARMS))
qeth_query_setadapterparms(card);
if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST))
qeth_query_setdiagass(card);
return 0;
out:
dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
"an error on the device\n");
QETH_DBF_MESSAGE(2, "%s Initialization in hardsetup failed! rc=%d\n",
dev_name(&card->gdev->dev), rc);
return rc;
}
EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
static inline int qeth_create_skb_frag(struct qeth_qdio_buffer *qethbuffer,
struct qdio_buffer_element *element,
struct sk_buff **pskb, int offset, int *pfrag, int data_len)
{
struct page *page = virt_to_page(element->addr);
if (*pskb == NULL) {
if (qethbuffer->rx_skb) {
/* only if qeth_card.options.cq == QETH_CQ_ENABLED */
*pskb = qethbuffer->rx_skb;
qethbuffer->rx_skb = NULL;
} else {
*pskb = dev_alloc_skb(QETH_RX_PULL_LEN + ETH_HLEN);
if (!(*pskb))
return -ENOMEM;
}
skb_reserve(*pskb, ETH_HLEN);
if (data_len <= QETH_RX_PULL_LEN) {
memcpy(skb_put(*pskb, data_len), element->addr + offset,
data_len);
} else {
get_page(page);
memcpy(skb_put(*pskb, QETH_RX_PULL_LEN),
element->addr + offset, QETH_RX_PULL_LEN);
skb_fill_page_desc(*pskb, *pfrag, page,
offset + QETH_RX_PULL_LEN,
data_len - QETH_RX_PULL_LEN);
(*pskb)->data_len += data_len - QETH_RX_PULL_LEN;
(*pskb)->len += data_len - QETH_RX_PULL_LEN;
(*pskb)->truesize += data_len - QETH_RX_PULL_LEN;
(*pfrag)++;
}
} else {
get_page(page);
skb_fill_page_desc(*pskb, *pfrag, page, offset, data_len);
(*pskb)->data_len += data_len;
(*pskb)->len += data_len;
(*pskb)->truesize += data_len;
(*pfrag)++;
}
return 0;
}
struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
struct qeth_qdio_buffer *qethbuffer,
struct qdio_buffer_element **__element, int *__offset,
struct qeth_hdr **hdr)
{
struct qdio_buffer_element *element = *__element;
struct qdio_buffer *buffer = qethbuffer->buffer;
int offset = *__offset;
struct sk_buff *skb = NULL;
int skb_len = 0;
void *data_ptr;
int data_len;
int headroom = 0;
int use_rx_sg = 0;
int frag = 0;
/* qeth_hdr must not cross element boundaries */
if (element->length < offset + sizeof(struct qeth_hdr)) {
if (qeth_is_last_sbale(element))
return NULL;
element++;
offset = 0;
if (element->length < sizeof(struct qeth_hdr))
return NULL;
}
*hdr = element->addr + offset;
offset += sizeof(struct qeth_hdr);
switch ((*hdr)->hdr.l2.id) {
case QETH_HEADER_TYPE_LAYER2:
skb_len = (*hdr)->hdr.l2.pkt_length;
break;
case QETH_HEADER_TYPE_LAYER3:
skb_len = (*hdr)->hdr.l3.length;
if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
(card->info.link_type == QETH_LINK_TYPE_HSTR))
headroom = TR_HLEN;
else
headroom = ETH_HLEN;
break;
case QETH_HEADER_TYPE_OSN:
skb_len = (*hdr)->hdr.osn.pdu_length;
headroom = sizeof(struct qeth_hdr);
break;
default:
break;
}
if (!skb_len)
return NULL;
if (((skb_len >= card->options.rx_sg_cb) &&
(!(card->info.type == QETH_CARD_TYPE_OSN)) &&
(!atomic_read(&card->force_alloc_skb))) ||
(card->options.cq == QETH_CQ_ENABLED)) {
use_rx_sg = 1;
} else {
skb = dev_alloc_skb(skb_len + headroom);
if (!skb)
goto no_mem;
if (headroom)
skb_reserve(skb, headroom);
}
data_ptr = element->addr + offset;
while (skb_len) {
data_len = min(skb_len, (int)(element->length - offset));
if (data_len) {
if (use_rx_sg) {
if (qeth_create_skb_frag(qethbuffer, element,
&skb, offset, &frag, data_len))
goto no_mem;
} else {
memcpy(skb_put(skb, data_len), data_ptr,
data_len);
}
}
skb_len -= data_len;
if (skb_len) {
if (qeth_is_last_sbale(element)) {
QETH_CARD_TEXT(card, 4, "unexeob");
QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
dev_kfree_skb_any(skb);
card->stats.rx_errors++;
return NULL;
}
element++;
offset = 0;
data_ptr = element->addr;
} else {
offset += data_len;
}
}
*__element = element;
*__offset = offset;
if (use_rx_sg && card->options.performance_stats) {
card->perf_stats.sg_skbs_rx++;
card->perf_stats.sg_frags_rx += skb_shinfo(skb)->nr_frags;
}
return skb;
no_mem:
if (net_ratelimit()) {
QETH_CARD_TEXT(card, 2, "noskbmem");
}
card->stats.rx_dropped++;
return NULL;
}
EXPORT_SYMBOL_GPL(qeth_core_get_next_skb);
static void qeth_unregister_dbf_views(void)
{
int x;
for (x = 0; x < QETH_DBF_INFOS; x++) {
debug_unregister(qeth_dbf[x].id);
qeth_dbf[x].id = NULL;
}
}
void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
{
char dbf_txt_buf[32];
va_list args;
if (level > id->level)
return;
va_start(args, fmt);
vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
va_end(args);
debug_text_event(id, level, dbf_txt_buf);
}
EXPORT_SYMBOL_GPL(qeth_dbf_longtext);
static int qeth_register_dbf_views(void)
{
int ret;
int x;
for (x = 0; x < QETH_DBF_INFOS; x++) {
/* register the areas */
qeth_dbf[x].id = debug_register(qeth_dbf[x].name,
qeth_dbf[x].pages,
qeth_dbf[x].areas,
qeth_dbf[x].len);
if (qeth_dbf[x].id == NULL) {
qeth_unregister_dbf_views();
return -ENOMEM;
}
/* register a view */
ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view);
if (ret) {
qeth_unregister_dbf_views();
return ret;
}
/* set a passing level */
debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level);
}
return 0;
}
int qeth_core_load_discipline(struct qeth_card *card,
enum qeth_discipline_id discipline)
{
int rc = 0;
mutex_lock(&qeth_mod_mutex);
switch (discipline) {
case QETH_DISCIPLINE_LAYER3:
card->discipline.ccwgdriver = try_then_request_module(
symbol_get(qeth_l3_ccwgroup_driver),
"qeth_l3");
break;
case QETH_DISCIPLINE_LAYER2:
card->discipline.ccwgdriver = try_then_request_module(
symbol_get(qeth_l2_ccwgroup_driver),
"qeth_l2");
break;
}
if (!card->discipline.ccwgdriver) {
dev_err(&card->gdev->dev, "There is no kernel module to "
"support discipline %d\n", discipline);
rc = -EINVAL;
}
mutex_unlock(&qeth_mod_mutex);
return rc;
}
void qeth_core_free_discipline(struct qeth_card *card)
{
if (card->options.layer2)
symbol_put(qeth_l2_ccwgroup_driver);
else
symbol_put(qeth_l3_ccwgroup_driver);
card->discipline.ccwgdriver = NULL;
}
static int qeth_core_probe_device(struct ccwgroup_device *gdev)
{
struct qeth_card *card;
struct device *dev;
int rc;
unsigned long flags;
char dbf_name[20];
QETH_DBF_TEXT(SETUP, 2, "probedev");
dev = &gdev->dev;
if (!get_device(dev))
return -ENODEV;
QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev));
card = qeth_alloc_card();
if (!card) {
QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM);
rc = -ENOMEM;
goto err_dev;
}
snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
dev_name(&gdev->dev));
card->debug = debug_register(dbf_name, 2, 1, 8);
if (!card->debug) {
QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf");
rc = -ENOMEM;
goto err_card;
}
debug_register_view(card->debug, &debug_hex_ascii_view);
card->read.ccwdev = gdev->cdev[0];
card->write.ccwdev = gdev->cdev[1];
card->data.ccwdev = gdev->cdev[2];
dev_set_drvdata(&gdev->dev, card);
card->gdev = gdev;
gdev->cdev[0]->handler = qeth_irq;
gdev->cdev[1]->handler = qeth_irq;
gdev->cdev[2]->handler = qeth_irq;
rc = qeth_determine_card_type(card);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
goto err_dbf;
}
rc = qeth_setup_card(card);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
goto err_dbf;
}
if (card->info.type == QETH_CARD_TYPE_OSN)
rc = qeth_core_create_osn_attributes(dev);
else
rc = qeth_core_create_device_attributes(dev);
if (rc)
goto err_dbf;
switch (card->info.type) {
case QETH_CARD_TYPE_OSN:
case QETH_CARD_TYPE_OSM:
rc = qeth_core_load_discipline(card, QETH_DISCIPLINE_LAYER2);
if (rc)
goto err_attr;
rc = card->discipline.ccwgdriver->probe(card->gdev);
if (rc)
goto err_disc;
case QETH_CARD_TYPE_OSD:
case QETH_CARD_TYPE_OSX:
default:
break;
}
write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
list_add_tail(&card->list, &qeth_core_card_list.list);
write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
qeth_determine_capabilities(card);
return 0;
err_disc:
qeth_core_free_discipline(card);
err_attr:
if (card->info.type == QETH_CARD_TYPE_OSN)
qeth_core_remove_osn_attributes(dev);
else
qeth_core_remove_device_attributes(dev);
err_dbf:
debug_unregister(card->debug);
err_card:
qeth_core_free_card(card);
err_dev:
put_device(dev);
return rc;
}
static void qeth_core_remove_device(struct ccwgroup_device *gdev)
{
unsigned long flags;
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
QETH_DBF_TEXT(SETUP, 2, "removedv");
if (card->info.type == QETH_CARD_TYPE_OSN) {
qeth_core_remove_osn_attributes(&gdev->dev);
} else {
qeth_core_remove_device_attributes(&gdev->dev);
}
if (card->discipline.ccwgdriver) {
card->discipline.ccwgdriver->remove(gdev);
qeth_core_free_discipline(card);
}
debug_unregister(card->debug);
write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
list_del(&card->list);
write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
qeth_core_free_card(card);
dev_set_drvdata(&gdev->dev, NULL);
put_device(&gdev->dev);
return;
}
static int qeth_core_set_online(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
int rc = 0;
int def_discipline;
if (!card->discipline.ccwgdriver) {
if (card->info.type == QETH_CARD_TYPE_IQD)
def_discipline = QETH_DISCIPLINE_LAYER3;
else
def_discipline = QETH_DISCIPLINE_LAYER2;
rc = qeth_core_load_discipline(card, def_discipline);
if (rc)
goto err;
rc = card->discipline.ccwgdriver->probe(card->gdev);
if (rc)
goto err;
}
rc = card->discipline.ccwgdriver->set_online(gdev);
err:
return rc;
}
static int qeth_core_set_offline(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
return card->discipline.ccwgdriver->set_offline(gdev);
}
static void qeth_core_shutdown(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
if (card->discipline.ccwgdriver &&
card->discipline.ccwgdriver->shutdown)
card->discipline.ccwgdriver->shutdown(gdev);
}
static int qeth_core_prepare(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
if (card->discipline.ccwgdriver &&
card->discipline.ccwgdriver->prepare)
return card->discipline.ccwgdriver->prepare(gdev);
return 0;
}
static void qeth_core_complete(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
if (card->discipline.ccwgdriver &&
card->discipline.ccwgdriver->complete)
card->discipline.ccwgdriver->complete(gdev);
}
static int qeth_core_freeze(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
if (card->discipline.ccwgdriver &&
card->discipline.ccwgdriver->freeze)
return card->discipline.ccwgdriver->freeze(gdev);
return 0;
}
static int qeth_core_thaw(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
if (card->discipline.ccwgdriver &&
card->discipline.ccwgdriver->thaw)
return card->discipline.ccwgdriver->thaw(gdev);
return 0;
}
static int qeth_core_restore(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
if (card->discipline.ccwgdriver &&
card->discipline.ccwgdriver->restore)
return card->discipline.ccwgdriver->restore(gdev);
return 0;
}
static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "qeth",
},
.driver_id = 0xD8C5E3C8,
.probe = qeth_core_probe_device,
.remove = qeth_core_remove_device,
.set_online = qeth_core_set_online,
.set_offline = qeth_core_set_offline,
.shutdown = qeth_core_shutdown,
.prepare = qeth_core_prepare,
.complete = qeth_core_complete,
.freeze = qeth_core_freeze,
.thaw = qeth_core_thaw,
.restore = qeth_core_restore,
};
static ssize_t
qeth_core_driver_group_store(struct device_driver *ddrv, const char *buf,
size_t count)
{
int err;
err = qeth_core_driver_group(buf, qeth_core_root_dev,
qeth_core_ccwgroup_driver.driver_id);
if (err)
return err;
else
return count;
}
static DRIVER_ATTR(group, 0200, NULL, qeth_core_driver_group_store);
static struct {
const char str[ETH_GSTRING_LEN];
} qeth_ethtool_stats_keys[] = {
/* 0 */{"rx skbs"},
{"rx buffers"},
{"tx skbs"},
{"tx buffers"},
{"tx skbs no packing"},
{"tx buffers no packing"},
{"tx skbs packing"},
{"tx buffers packing"},
{"tx sg skbs"},
{"tx sg frags"},
/* 10 */{"rx sg skbs"},
{"rx sg frags"},
{"rx sg page allocs"},
{"tx large kbytes"},
{"tx large count"},
{"tx pk state ch n->p"},
{"tx pk state ch p->n"},
{"tx pk watermark low"},
{"tx pk watermark high"},
{"queue 0 buffer usage"},
/* 20 */{"queue 1 buffer usage"},
{"queue 2 buffer usage"},
{"queue 3 buffer usage"},
{"rx poll time"},
{"rx poll count"},
{"rx do_QDIO time"},
{"rx do_QDIO count"},
{"tx handler time"},
{"tx handler count"},
{"tx time"},
/* 30 */{"tx count"},
{"tx do_QDIO time"},
{"tx do_QDIO count"},
{"tx csum"},
{"tx lin"},
{"cq handler count"},
{"cq handler time"}
};
int qeth_core_get_sset_count(struct net_device *dev, int stringset)
{
switch (stringset) {
case ETH_SS_STATS:
return (sizeof(qeth_ethtool_stats_keys) / ETH_GSTRING_LEN);
default:
return -EINVAL;
}
}
EXPORT_SYMBOL_GPL(qeth_core_get_sset_count);
void qeth_core_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct qeth_card *card = dev->ml_priv;
data[0] = card->stats.rx_packets -
card->perf_stats.initial_rx_packets;
data[1] = card->perf_stats.bufs_rec;
data[2] = card->stats.tx_packets -
card->perf_stats.initial_tx_packets;
data[3] = card->perf_stats.bufs_sent;
data[4] = card->stats.tx_packets - card->perf_stats.initial_tx_packets
- card->perf_stats.skbs_sent_pack;
data[5] = card->perf_stats.bufs_sent - card->perf_stats.bufs_sent_pack;
data[6] = card->perf_stats.skbs_sent_pack;
data[7] = card->perf_stats.bufs_sent_pack;
data[8] = card->perf_stats.sg_skbs_sent;
data[9] = card->perf_stats.sg_frags_sent;
data[10] = card->perf_stats.sg_skbs_rx;
data[11] = card->perf_stats.sg_frags_rx;
data[12] = card->perf_stats.sg_alloc_page_rx;
data[13] = (card->perf_stats.large_send_bytes >> 10);
data[14] = card->perf_stats.large_send_cnt;
data[15] = card->perf_stats.sc_dp_p;
data[16] = card->perf_stats.sc_p_dp;
data[17] = QETH_LOW_WATERMARK_PACK;
data[18] = QETH_HIGH_WATERMARK_PACK;
data[19] = atomic_read(&card->qdio.out_qs[0]->used_buffers);
data[20] = (card->qdio.no_out_queues > 1) ?
atomic_read(&card->qdio.out_qs[1]->used_buffers) : 0;
data[21] = (card->qdio.no_out_queues > 2) ?
atomic_read(&card->qdio.out_qs[2]->used_buffers) : 0;
data[22] = (card->qdio.no_out_queues > 3) ?
atomic_read(&card->qdio.out_qs[3]->used_buffers) : 0;
data[23] = card->perf_stats.inbound_time;
data[24] = card->perf_stats.inbound_cnt;
data[25] = card->perf_stats.inbound_do_qdio_time;
data[26] = card->perf_stats.inbound_do_qdio_cnt;
data[27] = card->perf_stats.outbound_handler_time;
data[28] = card->perf_stats.outbound_handler_cnt;
data[29] = card->perf_stats.outbound_time;
data[30] = card->perf_stats.outbound_cnt;
data[31] = card->perf_stats.outbound_do_qdio_time;
data[32] = card->perf_stats.outbound_do_qdio_cnt;
data[33] = card->perf_stats.tx_csum;
data[34] = card->perf_stats.tx_lin;
data[35] = card->perf_stats.cq_cnt;
data[36] = card->perf_stats.cq_time;
}
EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats);
void qeth_core_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
switch (stringset) {
case ETH_SS_STATS:
memcpy(data, &qeth_ethtool_stats_keys,
sizeof(qeth_ethtool_stats_keys));
break;
default:
WARN_ON(1);
break;
}
}
EXPORT_SYMBOL_GPL(qeth_core_get_strings);
void qeth_core_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct qeth_card *card = dev->ml_priv;
if (card->options.layer2)
strcpy(info->driver, "qeth_l2");
else
strcpy(info->driver, "qeth_l3");
strcpy(info->version, "1.0");
strcpy(info->fw_version, card->info.mcl_level);
sprintf(info->bus_info, "%s/%s/%s",
CARD_RDEV_ID(card),
CARD_WDEV_ID(card),
CARD_DDEV_ID(card));
}
EXPORT_SYMBOL_GPL(qeth_core_get_drvinfo);
int qeth_core_ethtool_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
struct qeth_card *card = netdev->ml_priv;
enum qeth_link_types link_type;
if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan))
link_type = QETH_LINK_TYPE_10GBIT_ETH;
else
link_type = card->info.link_type;
ecmd->transceiver = XCVR_INTERNAL;
ecmd->supported = SUPPORTED_Autoneg;
ecmd->advertising = ADVERTISED_Autoneg;
ecmd->duplex = DUPLEX_FULL;
ecmd->autoneg = AUTONEG_ENABLE;
switch (link_type) {
case QETH_LINK_TYPE_FAST_ETH:
case QETH_LINK_TYPE_LANE_ETH100:
ecmd->supported |= SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
SUPPORTED_TP;
ecmd->advertising |= ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full |
ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full |
ADVERTISED_TP;
ecmd->speed = SPEED_100;
ecmd->port = PORT_TP;
break;
case QETH_LINK_TYPE_GBIT_ETH:
case QETH_LINK_TYPE_LANE_ETH1000:
ecmd->supported |= SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
SUPPORTED_1000baseT_Half |
SUPPORTED_1000baseT_Full |
SUPPORTED_FIBRE;
ecmd->advertising |= ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full |
ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full |
ADVERTISED_1000baseT_Half |
ADVERTISED_1000baseT_Full |
ADVERTISED_FIBRE;
ecmd->speed = SPEED_1000;
ecmd->port = PORT_FIBRE;
break;
case QETH_LINK_TYPE_10GBIT_ETH:
ecmd->supported |= SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
SUPPORTED_1000baseT_Half |
SUPPORTED_1000baseT_Full |
SUPPORTED_10000baseT_Full |
SUPPORTED_FIBRE;
ecmd->advertising |= ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full |
ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full |
ADVERTISED_1000baseT_Half |
ADVERTISED_1000baseT_Full |
ADVERTISED_10000baseT_Full |
ADVERTISED_FIBRE;
ecmd->speed = SPEED_10000;
ecmd->port = PORT_FIBRE;
break;
default:
ecmd->supported |= SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_TP;
ecmd->advertising |= ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full |
ADVERTISED_TP;
ecmd->speed = SPEED_10;
ecmd->port = PORT_TP;
}
return 0;
}
EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_settings);
static int __init qeth_core_init(void)
{
int rc;
pr_info("loading core functions\n");
INIT_LIST_HEAD(&qeth_core_card_list.list);
rwlock_init(&qeth_core_card_list.rwlock);
mutex_init(&qeth_mod_mutex);
rc = qeth_register_dbf_views();
if (rc)
goto out_err;
rc = ccw_driver_register(&qeth_ccw_driver);
if (rc)
goto ccw_err;
rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
if (rc)
goto ccwgroup_err;
rc = driver_create_file(&qeth_core_ccwgroup_driver.driver,
&driver_attr_group);
if (rc)
goto driver_err;
qeth_core_root_dev = root_device_register("qeth");
rc = IS_ERR(qeth_core_root_dev) ? PTR_ERR(qeth_core_root_dev) : 0;
if (rc)
goto register_err;
qeth_core_header_cache = kmem_cache_create("qeth_hdr",
sizeof(struct qeth_hdr) + ETH_HLEN, 64, 0, NULL);
if (!qeth_core_header_cache) {
rc = -ENOMEM;
goto slab_err;
}
qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf",
sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL);
if (!qeth_qdio_outbuf_cache) {
rc = -ENOMEM;
goto cqslab_err;
}
return 0;
cqslab_err:
kmem_cache_destroy(qeth_core_header_cache);
slab_err:
root_device_unregister(qeth_core_root_dev);
register_err:
driver_remove_file(&qeth_core_ccwgroup_driver.driver,
&driver_attr_group);
driver_err:
ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
ccwgroup_err:
ccw_driver_unregister(&qeth_ccw_driver);
ccw_err:
QETH_DBF_MESSAGE(2, "Initialization failed with code %d\n", rc);
qeth_unregister_dbf_views();
out_err:
pr_err("Initializing the qeth device driver failed\n");
return rc;
}
static void __exit qeth_core_exit(void)
{
root_device_unregister(qeth_core_root_dev);
driver_remove_file(&qeth_core_ccwgroup_driver.driver,
&driver_attr_group);
ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
ccw_driver_unregister(&qeth_ccw_driver);
kmem_cache_destroy(qeth_qdio_outbuf_cache);
kmem_cache_destroy(qeth_core_header_cache);
qeth_unregister_dbf_views();
pr_info("core functions removed\n");
}
module_init(qeth_core_init);
module_exit(qeth_core_exit);
MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
MODULE_DESCRIPTION("qeth core functions");
MODULE_LICENSE("GPL");
| gpl-2.0 |
charles1018/kernel_sony_14.4.A.0.157 | drivers/staging/media/lirc/lirc_zilog.c | 4062 | 41534 | /*
* i2c IR lirc driver for devices with zilog IR processors
*
* Copyright (c) 2000 Gerd Knorr <kraxel@goldbach.in-berlin.de>
* modified for PixelView (BT878P+W/FM) by
* Michal Kochanowicz <mkochano@pld.org.pl>
* Christoph Bartelmus <lirc@bartelmus.de>
* modified for KNC ONE TV Station/Anubis Typhoon TView Tuner by
* Ulrich Mueller <ulrich.mueller42@web.de>
* modified for Asus TV-Box and Creative/VisionTek BreakOut-Box by
* Stefan Jahn <stefan@lkcc.org>
* modified for inclusion into kernel sources by
* Jerome Brock <jbrock@users.sourceforge.net>
* modified for Leadtek Winfast PVR2000 by
* Thomas Reitmayr (treitmayr@yahoo.com)
* modified for Hauppauge PVR-150 IR TX device by
* Mark Weaver <mark@npsl.co.uk>
* changed name from lirc_pvr150 to lirc_zilog, works on more than pvr-150
* Jarod Wilson <jarod@redhat.com>
*
* parts are cut&pasted from the lirc_i2c.c driver
*
* Numerous changes updating lirc_zilog.c in kernel 2.6.38 and later are
* Copyright (C) 2011 Andy Walls <awalls@md.metrocast.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/module.h>
#include <linux/kmod.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/completion.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/firmware.h>
#include <linux/vmalloc.h>
#include <linux/mutex.h>
#include <linux/kthread.h>
#include <media/lirc_dev.h>
#include <media/lirc.h>
struct IR;
struct IR_rx {
struct kref ref;
struct IR *ir;
/* RX device */
struct mutex client_lock;
struct i2c_client *c;
/* RX polling thread data */
struct task_struct *task;
/* RX read data */
unsigned char b[3];
bool hdpvr_data_fmt;
};
struct IR_tx {
struct kref ref;
struct IR *ir;
/* TX device */
struct mutex client_lock;
struct i2c_client *c;
/* TX additional actions needed */
int need_boot;
bool post_tx_ready_poll;
};
struct IR {
struct kref ref;
struct list_head list;
/* FIXME spinlock access to l.features */
struct lirc_driver l;
struct lirc_buffer rbuf;
struct mutex ir_lock;
atomic_t open_count;
struct i2c_adapter *adapter;
spinlock_t rx_ref_lock; /* struct IR_rx kref get()/put() */
struct IR_rx *rx;
spinlock_t tx_ref_lock; /* struct IR_tx kref get()/put() */
struct IR_tx *tx;
};
/* IR transceiver instance object list */
/*
* This lock is used for the following:
* a. ir_devices_list access, insertions, deletions
* b. struct IR kref get()s and put()s
* c. serialization of ir_probe() for the two i2c_clients for a Z8
*/
static DEFINE_MUTEX(ir_devices_lock);
static LIST_HEAD(ir_devices_list);
/* Block size for IR transmitter */
#define TX_BLOCK_SIZE 99
/* Hauppauge IR transmitter data */
struct tx_data_struct {
/* Boot block */
unsigned char *boot_data;
/* Start of binary data block */
unsigned char *datap;
/* End of binary data block */
unsigned char *endp;
/* Number of installed codesets */
unsigned int num_code_sets;
/* Pointers to codesets */
unsigned char **code_sets;
/* Global fixed data template */
int fixed[TX_BLOCK_SIZE];
};
static struct tx_data_struct *tx_data;
static struct mutex tx_data_lock;
#define zilog_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, \
## args)
#define zilog_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args)
#define zilog_info(s, args...) printk(KERN_INFO KBUILD_MODNAME ": " s, ## args)
/* module parameters */
static bool debug; /* debug output */
static bool tx_only; /* only handle the IR Tx function */
static int minor = -1; /* minor number */
#define dprintk(fmt, args...) \
do { \
if (debug) \
printk(KERN_DEBUG KBUILD_MODNAME ": " fmt, \
## args); \
} while (0)
/* struct IR reference counting */
static struct IR *get_ir_device(struct IR *ir, bool ir_devices_lock_held)
{
if (ir_devices_lock_held) {
kref_get(&ir->ref);
} else {
mutex_lock(&ir_devices_lock);
kref_get(&ir->ref);
mutex_unlock(&ir_devices_lock);
}
return ir;
}
static void release_ir_device(struct kref *ref)
{
struct IR *ir = container_of(ref, struct IR, ref);
/*
* Things should be in this state by now:
* ir->rx set to NULL and deallocated - happens before ir->rx->ir put()
* ir->rx->task kthread stopped - happens before ir->rx->ir put()
* ir->tx set to NULL and deallocated - happens before ir->tx->ir put()
* ir->open_count == 0 - happens on final close()
* ir_lock, tx_ref_lock, rx_ref_lock, all released
*/
if (ir->l.minor >= 0 && ir->l.minor < MAX_IRCTL_DEVICES) {
lirc_unregister_driver(ir->l.minor);
ir->l.minor = MAX_IRCTL_DEVICES;
}
if (ir->rbuf.fifo_initialized)
lirc_buffer_free(&ir->rbuf);
list_del(&ir->list);
kfree(ir);
}
static int put_ir_device(struct IR *ir, bool ir_devices_lock_held)
{
int released;
if (ir_devices_lock_held)
return kref_put(&ir->ref, release_ir_device);
mutex_lock(&ir_devices_lock);
released = kref_put(&ir->ref, release_ir_device);
mutex_unlock(&ir_devices_lock);
return released;
}
/* struct IR_rx reference counting */
static struct IR_rx *get_ir_rx(struct IR *ir)
{
struct IR_rx *rx;
spin_lock(&ir->rx_ref_lock);
rx = ir->rx;
if (rx != NULL)
kref_get(&rx->ref);
spin_unlock(&ir->rx_ref_lock);
return rx;
}
static void destroy_rx_kthread(struct IR_rx *rx, bool ir_devices_lock_held)
{
/* end up polling thread */
if (!IS_ERR_OR_NULL(rx->task)) {
kthread_stop(rx->task);
rx->task = NULL;
/* Put the ir ptr that ir_probe() gave to the rx poll thread */
put_ir_device(rx->ir, ir_devices_lock_held);
}
}
static void release_ir_rx(struct kref *ref)
{
struct IR_rx *rx = container_of(ref, struct IR_rx, ref);
struct IR *ir = rx->ir;
/*
* This release function can't do all the work, as we want
* to keep the rx_ref_lock a spinlock, and killing the poll thread
* and releasing the ir reference can cause a sleep. That work is
* performed by put_ir_rx()
*/
ir->l.features &= ~LIRC_CAN_REC_LIRCCODE;
/* Don't put_ir_device(rx->ir) here; lock can't be freed yet */
ir->rx = NULL;
/* Don't do the kfree(rx) here; we still need to kill the poll thread */
return;
}
static int put_ir_rx(struct IR_rx *rx, bool ir_devices_lock_held)
{
int released;
struct IR *ir = rx->ir;
spin_lock(&ir->rx_ref_lock);
released = kref_put(&rx->ref, release_ir_rx);
spin_unlock(&ir->rx_ref_lock);
/* Destroy the rx kthread while not holding the spinlock */
if (released) {
destroy_rx_kthread(rx, ir_devices_lock_held);
kfree(rx);
/* Make sure we're not still in a poll_table somewhere */
wake_up_interruptible(&ir->rbuf.wait_poll);
}
/* Do a reference put() for the rx->ir reference, if we released rx */
if (released)
put_ir_device(ir, ir_devices_lock_held);
return released;
}
/* struct IR_tx reference counting */
static struct IR_tx *get_ir_tx(struct IR *ir)
{
struct IR_tx *tx;
spin_lock(&ir->tx_ref_lock);
tx = ir->tx;
if (tx != NULL)
kref_get(&tx->ref);
spin_unlock(&ir->tx_ref_lock);
return tx;
}
static void release_ir_tx(struct kref *ref)
{
struct IR_tx *tx = container_of(ref, struct IR_tx, ref);
struct IR *ir = tx->ir;
ir->l.features &= ~LIRC_CAN_SEND_PULSE;
/* Don't put_ir_device(tx->ir) here, so our lock doesn't get freed */
ir->tx = NULL;
kfree(tx);
}
static int put_ir_tx(struct IR_tx *tx, bool ir_devices_lock_held)
{
int released;
struct IR *ir = tx->ir;
spin_lock(&ir->tx_ref_lock);
released = kref_put(&tx->ref, release_ir_tx);
spin_unlock(&ir->tx_ref_lock);
/* Do a reference put() for the tx->ir reference, if we released tx */
if (released)
put_ir_device(ir, ir_devices_lock_held);
return released;
}
static int add_to_buf(struct IR *ir)
{
__u16 code;
unsigned char codes[2];
unsigned char keybuf[6];
int got_data = 0;
int ret;
int failures = 0;
unsigned char sendbuf[1] = { 0 };
struct lirc_buffer *rbuf = ir->l.rbuf;
struct IR_rx *rx;
struct IR_tx *tx;
if (lirc_buffer_full(rbuf)) {
dprintk("buffer overflow\n");
return -EOVERFLOW;
}
rx = get_ir_rx(ir);
if (rx == NULL)
return -ENXIO;
/* Ensure our rx->c i2c_client remains valid for the duration */
mutex_lock(&rx->client_lock);
if (rx->c == NULL) {
mutex_unlock(&rx->client_lock);
put_ir_rx(rx, false);
return -ENXIO;
}
tx = get_ir_tx(ir);
/*
* service the device as long as it is returning
* data and we have space
*/
do {
if (kthread_should_stop()) {
ret = -ENODATA;
break;
}
/*
* Lock i2c bus for the duration. RX/TX chips interfere so
* this is worth it
*/
mutex_lock(&ir->ir_lock);
if (kthread_should_stop()) {
mutex_unlock(&ir->ir_lock);
ret = -ENODATA;
break;
}
/*
* Send random "poll command" (?) Windows driver does this
* and it is a good point to detect chip failure.
*/
ret = i2c_master_send(rx->c, sendbuf, 1);
if (ret != 1) {
zilog_error("i2c_master_send failed with %d\n", ret);
if (failures >= 3) {
mutex_unlock(&ir->ir_lock);
zilog_error("unable to read from the IR chip "
"after 3 resets, giving up\n");
break;
}
/* Looks like the chip crashed, reset it */
zilog_error("polling the IR receiver chip failed, "
"trying reset\n");
set_current_state(TASK_UNINTERRUPTIBLE);
if (kthread_should_stop()) {
mutex_unlock(&ir->ir_lock);
ret = -ENODATA;
break;
}
schedule_timeout((100 * HZ + 999) / 1000);
if (tx != NULL)
tx->need_boot = 1;
++failures;
mutex_unlock(&ir->ir_lock);
ret = 0;
continue;
}
if (kthread_should_stop()) {
mutex_unlock(&ir->ir_lock);
ret = -ENODATA;
break;
}
ret = i2c_master_recv(rx->c, keybuf, sizeof(keybuf));
mutex_unlock(&ir->ir_lock);
if (ret != sizeof(keybuf)) {
zilog_error("i2c_master_recv failed with %d -- "
"keeping last read buffer\n", ret);
} else {
rx->b[0] = keybuf[3];
rx->b[1] = keybuf[4];
rx->b[2] = keybuf[5];
dprintk("key (0x%02x/0x%02x)\n", rx->b[0], rx->b[1]);
}
/* key pressed ? */
if (rx->hdpvr_data_fmt) {
if (got_data && (keybuf[0] == 0x80)) {
ret = 0;
break;
} else if (got_data && (keybuf[0] == 0x00)) {
ret = -ENODATA;
break;
}
} else if ((rx->b[0] & 0x80) == 0) {
ret = got_data ? 0 : -ENODATA;
break;
}
/* look what we have */
code = (((__u16)rx->b[0] & 0x7f) << 6) | (rx->b[1] >> 2);
codes[0] = (code >> 8) & 0xff;
codes[1] = code & 0xff;
/* return it */
lirc_buffer_write(rbuf, codes);
++got_data;
ret = 0;
} while (!lirc_buffer_full(rbuf));
mutex_unlock(&rx->client_lock);
if (tx != NULL)
put_ir_tx(tx, false);
put_ir_rx(rx, false);
return ret;
}
/*
* Main function of the polling thread -- from lirc_dev.
* We don't fit the LIRC model at all anymore. This is horrible, but
* basically we have a single RX/TX device with a nasty failure mode
* that needs to be accounted for across the pair. lirc lets us provide
* fops, but prevents us from using the internal polling, etc. if we do
* so. Hence the replication. Might be neater to extend the LIRC model
* to account for this but I'd think it's a very special case of seriously
* messed up hardware.
*/
static int lirc_thread(void *arg)
{
struct IR *ir = arg;
struct lirc_buffer *rbuf = ir->l.rbuf;
dprintk("poll thread started\n");
while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
/* if device not opened, we can sleep half a second */
if (atomic_read(&ir->open_count) == 0) {
schedule_timeout(HZ/2);
continue;
}
/*
* This is ~113*2 + 24 + jitter (2*repeat gap + code length).
* We use this interval as the chip resets every time you poll
* it (bad!). This is therefore just sufficient to catch all
* of the button presses. It makes the remote much more
* responsive. You can see the difference by running irw and
* holding down a button. With 100ms, the old polling
* interval, you'll notice breaks in the repeat sequence
* corresponding to lost keypresses.
*/
schedule_timeout((260 * HZ) / 1000);
if (kthread_should_stop())
break;
if (!add_to_buf(ir))
wake_up_interruptible(&rbuf->wait_poll);
}
dprintk("poll thread ended\n");
return 0;
}
static int set_use_inc(void *data)
{
return 0;
}
static void set_use_dec(void *data)
{
return;
}
/* safe read of a uint32 (always network byte order) */
static int read_uint32(unsigned char **data,
unsigned char *endp, unsigned int *val)
{
if (*data + 4 > endp)
return 0;
*val = ((*data)[0] << 24) | ((*data)[1] << 16) |
((*data)[2] << 8) | (*data)[3];
*data += 4;
return 1;
}
/* safe read of a uint8 */
static int read_uint8(unsigned char **data,
unsigned char *endp, unsigned char *val)
{
if (*data + 1 > endp)
return 0;
*val = *((*data)++);
return 1;
}
/* safe skipping of N bytes */
static int skip(unsigned char **data,
unsigned char *endp, unsigned int distance)
{
if (*data + distance > endp)
return 0;
*data += distance;
return 1;
}
/* decompress key data into the given buffer */
static int get_key_data(unsigned char *buf,
unsigned int codeset, unsigned int key)
{
unsigned char *data, *endp, *diffs, *key_block;
unsigned char keys, ndiffs, id;
unsigned int base, lim, pos, i;
/* Binary search for the codeset */
for (base = 0, lim = tx_data->num_code_sets; lim; lim >>= 1) {
pos = base + (lim >> 1);
data = tx_data->code_sets[pos];
if (!read_uint32(&data, tx_data->endp, &i))
goto corrupt;
if (i == codeset)
break;
else if (codeset > i) {
base = pos + 1;
--lim;
}
}
/* Not found? */
if (!lim)
return -EPROTO;
/* Set end of data block */
endp = pos < tx_data->num_code_sets - 1 ?
tx_data->code_sets[pos + 1] : tx_data->endp;
/* Read the block header */
if (!read_uint8(&data, endp, &keys) ||
!read_uint8(&data, endp, &ndiffs) ||
ndiffs > TX_BLOCK_SIZE || keys == 0)
goto corrupt;
/* Save diffs & skip */
diffs = data;
if (!skip(&data, endp, ndiffs))
goto corrupt;
/* Read the id of the first key */
if (!read_uint8(&data, endp, &id))
goto corrupt;
/* Unpack the first key's data */
for (i = 0; i < TX_BLOCK_SIZE; ++i) {
if (tx_data->fixed[i] == -1) {
if (!read_uint8(&data, endp, &buf[i]))
goto corrupt;
} else {
buf[i] = (unsigned char)tx_data->fixed[i];
}
}
/* Early out key found/not found */
if (key == id)
return 0;
if (keys == 1)
return -EPROTO;
/* Sanity check */
key_block = data;
if (!skip(&data, endp, (keys - 1) * (ndiffs + 1)))
goto corrupt;
/* Binary search for the key */
for (base = 0, lim = keys - 1; lim; lim >>= 1) {
/* Seek to block */
unsigned char *key_data;
pos = base + (lim >> 1);
key_data = key_block + (ndiffs + 1) * pos;
if (*key_data == key) {
/* skip key id */
++key_data;
/* found, so unpack the diffs */
for (i = 0; i < ndiffs; ++i) {
unsigned char val;
if (!read_uint8(&key_data, endp, &val) ||
diffs[i] >= TX_BLOCK_SIZE)
goto corrupt;
buf[diffs[i]] = val;
}
return 0;
} else if (key > *key_data) {
base = pos + 1;
--lim;
}
}
/* Key not found */
return -EPROTO;
corrupt:
zilog_error("firmware is corrupt\n");
return -EFAULT;
}
/* send a block of data to the IR TX device */
static int send_data_block(struct IR_tx *tx, unsigned char *data_block)
{
int i, j, ret;
unsigned char buf[5];
for (i = 0; i < TX_BLOCK_SIZE;) {
int tosend = TX_BLOCK_SIZE - i;
if (tosend > 4)
tosend = 4;
buf[0] = (unsigned char)(i + 1);
for (j = 0; j < tosend; ++j)
buf[1 + j] = data_block[i + j];
dprintk("%02x %02x %02x %02x %02x",
buf[0], buf[1], buf[2], buf[3], buf[4]);
ret = i2c_master_send(tx->c, buf, tosend + 1);
if (ret != tosend + 1) {
zilog_error("i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
i += tosend;
}
return 0;
}
/* send boot data to the IR TX device */
static int send_boot_data(struct IR_tx *tx)
{
int ret, i;
unsigned char buf[4];
/* send the boot block */
ret = send_data_block(tx, tx_data->boot_data);
if (ret != 0)
return ret;
/* Hit the go button to activate the new boot data */
buf[0] = 0x00;
buf[1] = 0x20;
ret = i2c_master_send(tx->c, buf, 2);
if (ret != 2) {
zilog_error("i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
/*
* Wait for zilog to settle after hitting go post boot block upload.
* Without this delay, the HD-PVR and HVR-1950 both return an -EIO
* upon attempting to get firmware revision, and tx probe thus fails.
*/
for (i = 0; i < 10; i++) {
ret = i2c_master_send(tx->c, buf, 1);
if (ret == 1)
break;
udelay(100);
}
if (ret != 1) {
zilog_error("i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
/* Here comes the firmware version... (hopefully) */
ret = i2c_master_recv(tx->c, buf, 4);
if (ret != 4) {
zilog_error("i2c_master_recv failed with %d\n", ret);
return 0;
}
if ((buf[0] != 0x80) && (buf[0] != 0xa0)) {
zilog_error("unexpected IR TX init response: %02x\n", buf[0]);
return 0;
}
zilog_notify("Zilog/Hauppauge IR blaster firmware version "
"%d.%d.%d loaded\n", buf[1], buf[2], buf[3]);
return 0;
}
/* unload "firmware", lock held */
static void fw_unload_locked(void)
{
if (tx_data) {
if (tx_data->code_sets)
vfree(tx_data->code_sets);
if (tx_data->datap)
vfree(tx_data->datap);
vfree(tx_data);
tx_data = NULL;
dprintk("successfully unloaded IR blaster firmware\n");
}
}
/* unload "firmware" for the IR TX device */
static void fw_unload(void)
{
mutex_lock(&tx_data_lock);
fw_unload_locked();
mutex_unlock(&tx_data_lock);
}
/* load "firmware" for the IR TX device */
static int fw_load(struct IR_tx *tx)
{
int ret;
unsigned int i;
unsigned char *data, version, num_global_fixed;
const struct firmware *fw_entry;
/* Already loaded? */
mutex_lock(&tx_data_lock);
if (tx_data) {
ret = 0;
goto out;
}
/* Request codeset data file */
ret = request_firmware(&fw_entry, "haup-ir-blaster.bin", tx->ir->l.dev);
if (ret != 0) {
zilog_error("firmware haup-ir-blaster.bin not available "
"(%d)\n", ret);
ret = ret < 0 ? ret : -EFAULT;
goto out;
}
dprintk("firmware of size %zu loaded\n", fw_entry->size);
/* Parse the file */
tx_data = vmalloc(sizeof(*tx_data));
if (tx_data == NULL) {
zilog_error("out of memory\n");
release_firmware(fw_entry);
ret = -ENOMEM;
goto out;
}
tx_data->code_sets = NULL;
/* Copy the data so hotplug doesn't get confused and timeout */
tx_data->datap = vmalloc(fw_entry->size);
if (tx_data->datap == NULL) {
zilog_error("out of memory\n");
release_firmware(fw_entry);
vfree(tx_data);
ret = -ENOMEM;
goto out;
}
memcpy(tx_data->datap, fw_entry->data, fw_entry->size);
tx_data->endp = tx_data->datap + fw_entry->size;
release_firmware(fw_entry); fw_entry = NULL;
/* Check version */
data = tx_data->datap;
if (!read_uint8(&data, tx_data->endp, &version))
goto corrupt;
if (version != 1) {
zilog_error("unsupported code set file version (%u, expected"
"1) -- please upgrade to a newer driver",
version);
fw_unload_locked();
ret = -EFAULT;
goto out;
}
/* Save boot block for later */
tx_data->boot_data = data;
if (!skip(&data, tx_data->endp, TX_BLOCK_SIZE))
goto corrupt;
if (!read_uint32(&data, tx_data->endp,
&tx_data->num_code_sets))
goto corrupt;
dprintk("%u IR blaster codesets loaded\n", tx_data->num_code_sets);
tx_data->code_sets = vmalloc(
tx_data->num_code_sets * sizeof(char *));
if (tx_data->code_sets == NULL) {
fw_unload_locked();
ret = -ENOMEM;
goto out;
}
for (i = 0; i < TX_BLOCK_SIZE; ++i)
tx_data->fixed[i] = -1;
/* Read global fixed data template */
if (!read_uint8(&data, tx_data->endp, &num_global_fixed) ||
num_global_fixed > TX_BLOCK_SIZE)
goto corrupt;
for (i = 0; i < num_global_fixed; ++i) {
unsigned char pos, val;
if (!read_uint8(&data, tx_data->endp, &pos) ||
!read_uint8(&data, tx_data->endp, &val) ||
pos >= TX_BLOCK_SIZE)
goto corrupt;
tx_data->fixed[pos] = (int)val;
}
/* Filch out the position of each code set */
for (i = 0; i < tx_data->num_code_sets; ++i) {
unsigned int id;
unsigned char keys;
unsigned char ndiffs;
/* Save the codeset position */
tx_data->code_sets[i] = data;
/* Read header */
if (!read_uint32(&data, tx_data->endp, &id) ||
!read_uint8(&data, tx_data->endp, &keys) ||
!read_uint8(&data, tx_data->endp, &ndiffs) ||
ndiffs > TX_BLOCK_SIZE || keys == 0)
goto corrupt;
/* skip diff positions */
if (!skip(&data, tx_data->endp, ndiffs))
goto corrupt;
/*
* After the diffs we have the first key id + data -
* global fixed
*/
if (!skip(&data, tx_data->endp,
1 + TX_BLOCK_SIZE - num_global_fixed))
goto corrupt;
/* Then we have keys-1 blocks of key id+diffs */
if (!skip(&data, tx_data->endp,
(ndiffs + 1) * (keys - 1)))
goto corrupt;
}
ret = 0;
goto out;
corrupt:
zilog_error("firmware is corrupt\n");
fw_unload_locked();
ret = -EFAULT;
out:
mutex_unlock(&tx_data_lock);
return ret;
}
/* copied from lirc_dev */
static ssize_t read(struct file *filep, char *outbuf, size_t n, loff_t *ppos)
{
struct IR *ir = filep->private_data;
struct IR_rx *rx;
struct lirc_buffer *rbuf = ir->l.rbuf;
int ret = 0, written = 0, retries = 0;
unsigned int m;
DECLARE_WAITQUEUE(wait, current);
dprintk("read called\n");
if (n % rbuf->chunk_size) {
dprintk("read result = -EINVAL\n");
return -EINVAL;
}
rx = get_ir_rx(ir);
if (rx == NULL)
return -ENXIO;
/*
* we add ourselves to the task queue before buffer check
* to avoid losing scan code (in case when queue is awaken somewhere
* between while condition checking and scheduling)
*/
add_wait_queue(&rbuf->wait_poll, &wait);
set_current_state(TASK_INTERRUPTIBLE);
/*
* while we didn't provide 'length' bytes, device is opened in blocking
* mode and 'copy_to_user' is happy, wait for data.
*/
while (written < n && ret == 0) {
if (lirc_buffer_empty(rbuf)) {
/*
* According to the read(2) man page, 'written' can be
* returned as less than 'n', instead of blocking
* again, returning -EWOULDBLOCK, or returning
* -ERESTARTSYS
*/
if (written)
break;
if (filep->f_flags & O_NONBLOCK) {
ret = -EWOULDBLOCK;
break;
}
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
schedule();
set_current_state(TASK_INTERRUPTIBLE);
} else {
unsigned char buf[rbuf->chunk_size];
m = lirc_buffer_read(rbuf, buf);
if (m == rbuf->chunk_size) {
ret = copy_to_user((void *)outbuf+written, buf,
rbuf->chunk_size);
written += rbuf->chunk_size;
} else {
retries++;
}
if (retries >= 5) {
zilog_error("Buffer read failed!\n");
ret = -EIO;
}
}
}
remove_wait_queue(&rbuf->wait_poll, &wait);
put_ir_rx(rx, false);
set_current_state(TASK_RUNNING);
dprintk("read result = %d (%s)\n", ret, ret ? "Error" : "OK");
return ret ? ret : written;
}
/* send a keypress to the IR TX device */
static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key)
{
unsigned char data_block[TX_BLOCK_SIZE];
unsigned char buf[2];
int i, ret;
/* Get data for the codeset/key */
ret = get_key_data(data_block, code, key);
if (ret == -EPROTO) {
zilog_error("failed to get data for code %u, key %u -- check "
"lircd.conf entries\n", code, key);
return ret;
} else if (ret != 0)
return ret;
/* Send the data block */
ret = send_data_block(tx, data_block);
if (ret != 0)
return ret;
/* Send data block length? */
buf[0] = 0x00;
buf[1] = 0x40;
ret = i2c_master_send(tx->c, buf, 2);
if (ret != 2) {
zilog_error("i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
/* Give the z8 a moment to process data block */
for (i = 0; i < 10; i++) {
ret = i2c_master_send(tx->c, buf, 1);
if (ret == 1)
break;
udelay(100);
}
if (ret != 1) {
zilog_error("i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
/* Send finished download? */
ret = i2c_master_recv(tx->c, buf, 1);
if (ret != 1) {
zilog_error("i2c_master_recv failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
if (buf[0] != 0xA0) {
zilog_error("unexpected IR TX response #1: %02x\n",
buf[0]);
return -EFAULT;
}
/* Send prepare command? */
buf[0] = 0x00;
buf[1] = 0x80;
ret = i2c_master_send(tx->c, buf, 2);
if (ret != 2) {
zilog_error("i2c_master_send failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
/*
* The sleep bits aren't necessary on the HD PVR, and in fact, the
* last i2c_master_recv always fails with a -5, so for now, we're
* going to skip this whole mess and say we're done on the HD PVR
*/
if (!tx->post_tx_ready_poll) {
dprintk("sent code %u, key %u\n", code, key);
return 0;
}
/*
* This bit NAKs until the device is ready, so we retry it
* sleeping a bit each time. This seems to be what the windows
* driver does, approximately.
* Try for up to 1s.
*/
for (i = 0; i < 20; ++i) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout((50 * HZ + 999) / 1000);
ret = i2c_master_send(tx->c, buf, 1);
if (ret == 1)
break;
dprintk("NAK expected: i2c_master_send "
"failed with %d (try %d)\n", ret, i+1);
}
if (ret != 1) {
zilog_error("IR TX chip never got ready: last i2c_master_send "
"failed with %d\n", ret);
return ret < 0 ? ret : -EFAULT;
}
/* Seems to be an 'ok' response */
i = i2c_master_recv(tx->c, buf, 1);
if (i != 1) {
zilog_error("i2c_master_recv failed with %d\n", ret);
return -EFAULT;
}
if (buf[0] != 0x80) {
zilog_error("unexpected IR TX response #2: %02x\n", buf[0]);
return -EFAULT;
}
/* Oh good, it worked */
dprintk("sent code %u, key %u\n", code, key);
return 0;
}
/*
* Write a code to the device. We take in a 32-bit number (an int) and then
* decode this to a codeset/key index. The key data is then decompressed and
* sent to the device. We have a spin lock as per i2c documentation to prevent
* multiple concurrent sends which would probably cause the device to explode.
*/
static ssize_t write(struct file *filep, const char *buf, size_t n,
loff_t *ppos)
{
struct IR *ir = filep->private_data;
struct IR_tx *tx;
size_t i;
int failures = 0;
/* Validate user parameters */
if (n % sizeof(int))
return -EINVAL;
/* Get a struct IR_tx reference */
tx = get_ir_tx(ir);
if (tx == NULL)
return -ENXIO;
/* Ensure our tx->c i2c_client remains valid for the duration */
mutex_lock(&tx->client_lock);
if (tx->c == NULL) {
mutex_unlock(&tx->client_lock);
put_ir_tx(tx, false);
return -ENXIO;
}
/* Lock i2c bus for the duration */
mutex_lock(&ir->ir_lock);
/* Send each keypress */
for (i = 0; i < n;) {
int ret = 0;
int command;
if (copy_from_user(&command, buf + i, sizeof(command))) {
mutex_unlock(&ir->ir_lock);
mutex_unlock(&tx->client_lock);
put_ir_tx(tx, false);
return -EFAULT;
}
/* Send boot data first if required */
if (tx->need_boot == 1) {
/* Make sure we have the 'firmware' loaded, first */
ret = fw_load(tx);
if (ret != 0) {
mutex_unlock(&ir->ir_lock);
mutex_unlock(&tx->client_lock);
put_ir_tx(tx, false);
if (ret != -ENOMEM)
ret = -EIO;
return ret;
}
/* Prep the chip for transmitting codes */
ret = send_boot_data(tx);
if (ret == 0)
tx->need_boot = 0;
}
/* Send the code */
if (ret == 0) {
ret = send_code(tx, (unsigned)command >> 16,
(unsigned)command & 0xFFFF);
if (ret == -EPROTO) {
mutex_unlock(&ir->ir_lock);
mutex_unlock(&tx->client_lock);
put_ir_tx(tx, false);
return ret;
}
}
/*
* Hmm, a failure. If we've had a few then give up, otherwise
* try a reset
*/
if (ret != 0) {
/* Looks like the chip crashed, reset it */
zilog_error("sending to the IR transmitter chip "
"failed, trying reset\n");
if (failures >= 3) {
zilog_error("unable to send to the IR chip "
"after 3 resets, giving up\n");
mutex_unlock(&ir->ir_lock);
mutex_unlock(&tx->client_lock);
put_ir_tx(tx, false);
return ret;
}
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout((100 * HZ + 999) / 1000);
tx->need_boot = 1;
++failures;
} else
i += sizeof(int);
}
/* Release i2c bus */
mutex_unlock(&ir->ir_lock);
mutex_unlock(&tx->client_lock);
/* Give back our struct IR_tx reference */
put_ir_tx(tx, false);
/* All looks good */
return n;
}
/* copied from lirc_dev */
static unsigned int poll(struct file *filep, poll_table *wait)
{
struct IR *ir = filep->private_data;
struct IR_rx *rx;
struct lirc_buffer *rbuf = ir->l.rbuf;
unsigned int ret;
dprintk("poll called\n");
rx = get_ir_rx(ir);
if (rx == NULL) {
/*
* Revisit this, if our poll function ever reports writeable
* status for Tx
*/
dprintk("poll result = POLLERR\n");
return POLLERR;
}
/*
* Add our lirc_buffer's wait_queue to the poll_table. A wake up on
* that buffer's wait queue indicates we may have a new poll status.
*/
poll_wait(filep, &rbuf->wait_poll, wait);
/* Indicate what ops could happen immediately without blocking */
ret = lirc_buffer_empty(rbuf) ? 0 : (POLLIN|POLLRDNORM);
dprintk("poll result = %s\n", ret ? "POLLIN|POLLRDNORM" : "none");
return ret;
}
static long ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
struct IR *ir = filep->private_data;
int result;
unsigned long mode, features;
features = ir->l.features;
switch (cmd) {
case LIRC_GET_LENGTH:
result = put_user((unsigned long)13,
(unsigned long *)arg);
break;
case LIRC_GET_FEATURES:
result = put_user(features, (unsigned long *) arg);
break;
case LIRC_GET_REC_MODE:
if (!(features&LIRC_CAN_REC_MASK))
return -ENOSYS;
result = put_user(LIRC_REC2MODE
(features&LIRC_CAN_REC_MASK),
(unsigned long *)arg);
break;
case LIRC_SET_REC_MODE:
if (!(features&LIRC_CAN_REC_MASK))
return -ENOSYS;
result = get_user(mode, (unsigned long *)arg);
if (!result && !(LIRC_MODE2REC(mode) & features))
result = -EINVAL;
break;
case LIRC_GET_SEND_MODE:
if (!(features&LIRC_CAN_SEND_MASK))
return -ENOSYS;
result = put_user(LIRC_MODE_PULSE, (unsigned long *) arg);
break;
case LIRC_SET_SEND_MODE:
if (!(features&LIRC_CAN_SEND_MASK))
return -ENOSYS;
result = get_user(mode, (unsigned long *) arg);
if (!result && mode != LIRC_MODE_PULSE)
return -EINVAL;
break;
default:
return -EINVAL;
}
return result;
}
static struct IR *get_ir_device_by_minor(unsigned int minor)
{
struct IR *ir;
struct IR *ret = NULL;
mutex_lock(&ir_devices_lock);
if (!list_empty(&ir_devices_list)) {
list_for_each_entry(ir, &ir_devices_list, list) {
if (ir->l.minor == minor) {
ret = get_ir_device(ir, true);
break;
}
}
}
mutex_unlock(&ir_devices_lock);
return ret;
}
/*
* Open the IR device. Get hold of our IR structure and
* stash it in private_data for the file
*/
static int open(struct inode *node, struct file *filep)
{
struct IR *ir;
unsigned int minor = MINOR(node->i_rdev);
/* find our IR struct */
ir = get_ir_device_by_minor(minor);
if (ir == NULL)
return -ENODEV;
atomic_inc(&ir->open_count);
/* stash our IR struct */
filep->private_data = ir;
nonseekable_open(node, filep);
return 0;
}
/* Close the IR device */
static int close(struct inode *node, struct file *filep)
{
/* find our IR struct */
struct IR *ir = filep->private_data;
if (ir == NULL) {
zilog_error("close: no private_data attached to the file!\n");
return -ENODEV;
}
atomic_dec(&ir->open_count);
put_ir_device(ir, false);
return 0;
}
static int ir_remove(struct i2c_client *client);
static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id);
#define ID_FLAG_TX 0x01
#define ID_FLAG_HDPVR 0x02
static const struct i2c_device_id ir_transceiver_id[] = {
{ "ir_tx_z8f0811_haup", ID_FLAG_TX },
{ "ir_rx_z8f0811_haup", 0 },
{ "ir_tx_z8f0811_hdpvr", ID_FLAG_HDPVR | ID_FLAG_TX },
{ "ir_rx_z8f0811_hdpvr", ID_FLAG_HDPVR },
{ }
};
static struct i2c_driver driver = {
.driver = {
.owner = THIS_MODULE,
.name = "Zilog/Hauppauge i2c IR",
},
.probe = ir_probe,
.remove = ir_remove,
.id_table = ir_transceiver_id,
};
static const struct file_operations lirc_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.read = read,
.write = write,
.poll = poll,
.unlocked_ioctl = ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ioctl,
#endif
.open = open,
.release = close
};
static struct lirc_driver lirc_template = {
.name = "lirc_zilog",
.minor = -1,
.code_length = 13,
.buffer_size = BUFLEN / 2,
.sample_rate = 0, /* tell lirc_dev to not start its own kthread */
.chunk_size = 2,
.set_use_inc = set_use_inc,
.set_use_dec = set_use_dec,
.fops = &lirc_fops,
.owner = THIS_MODULE,
};
static int ir_remove(struct i2c_client *client)
{
if (strncmp("ir_tx_z8", client->name, 8) == 0) {
struct IR_tx *tx = i2c_get_clientdata(client);
if (tx != NULL) {
mutex_lock(&tx->client_lock);
tx->c = NULL;
mutex_unlock(&tx->client_lock);
put_ir_tx(tx, false);
}
} else if (strncmp("ir_rx_z8", client->name, 8) == 0) {
struct IR_rx *rx = i2c_get_clientdata(client);
if (rx != NULL) {
mutex_lock(&rx->client_lock);
rx->c = NULL;
mutex_unlock(&rx->client_lock);
put_ir_rx(rx, false);
}
}
return 0;
}
/* ir_devices_lock must be held */
static struct IR *get_ir_device_by_adapter(struct i2c_adapter *adapter)
{
struct IR *ir;
if (list_empty(&ir_devices_list))
return NULL;
list_for_each_entry(ir, &ir_devices_list, list)
if (ir->adapter == adapter) {
get_ir_device(ir, true);
return ir;
}
return NULL;
}
static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct IR *ir;
struct IR_tx *tx;
struct IR_rx *rx;
struct i2c_adapter *adap = client->adapter;
int ret;
bool tx_probe = false;
dprintk("%s: %s on i2c-%d (%s), client addr=0x%02x\n",
__func__, id->name, adap->nr, adap->name, client->addr);
/*
* The IR receiver is at i2c address 0x71.
* The IR transmitter is at i2c address 0x70.
*/
if (id->driver_data & ID_FLAG_TX)
tx_probe = true;
else if (tx_only) /* module option */
return -ENXIO;
zilog_info("probing IR %s on %s (i2c-%d)\n",
tx_probe ? "Tx" : "Rx", adap->name, adap->nr);
mutex_lock(&ir_devices_lock);
/* Use a single struct IR instance for both the Rx and Tx functions */
ir = get_ir_device_by_adapter(adap);
if (ir == NULL) {
ir = kzalloc(sizeof(struct IR), GFP_KERNEL);
if (ir == NULL) {
ret = -ENOMEM;
goto out_no_ir;
}
kref_init(&ir->ref);
/* store for use in ir_probe() again, and open() later on */
INIT_LIST_HEAD(&ir->list);
list_add_tail(&ir->list, &ir_devices_list);
ir->adapter = adap;
mutex_init(&ir->ir_lock);
atomic_set(&ir->open_count, 0);
spin_lock_init(&ir->tx_ref_lock);
spin_lock_init(&ir->rx_ref_lock);
/* set lirc_dev stuff */
memcpy(&ir->l, &lirc_template, sizeof(struct lirc_driver));
/*
* FIXME this is a pointer reference to us, but no refcount.
*
* This OK for now, since lirc_dev currently won't touch this
* buffer as we provide our own lirc_fops.
*
* Currently our own lirc_fops rely on this ir->l.rbuf pointer
*/
ir->l.rbuf = &ir->rbuf;
ir->l.dev = &adap->dev;
ret = lirc_buffer_init(ir->l.rbuf,
ir->l.chunk_size, ir->l.buffer_size);
if (ret)
goto out_put_ir;
}
if (tx_probe) {
/* Get the IR_rx instance for later, if already allocated */
rx = get_ir_rx(ir);
/* Set up a struct IR_tx instance */
tx = kzalloc(sizeof(struct IR_tx), GFP_KERNEL);
if (tx == NULL) {
ret = -ENOMEM;
goto out_put_xx;
}
kref_init(&tx->ref);
ir->tx = tx;
ir->l.features |= LIRC_CAN_SEND_PULSE;
mutex_init(&tx->client_lock);
tx->c = client;
tx->need_boot = 1;
tx->post_tx_ready_poll =
(id->driver_data & ID_FLAG_HDPVR) ? false : true;
/* An ir ref goes to the struct IR_tx instance */
tx->ir = get_ir_device(ir, true);
/* A tx ref goes to the i2c_client */
i2c_set_clientdata(client, get_ir_tx(ir));
/*
* Load the 'firmware'. We do this before registering with
* lirc_dev, so the first firmware load attempt does not happen
* after a open() or write() call on the device.
*
* Failure here is not deemed catastrophic, so the receiver will
* still be usable. Firmware load will be retried in write(),
* if it is needed.
*/
fw_load(tx);
/* Proceed only if the Rx client is also ready or not needed */
if (rx == NULL && !tx_only) {
zilog_info("probe of IR Tx on %s (i2c-%d) done. Waiting"
" on IR Rx.\n", adap->name, adap->nr);
goto out_ok;
}
} else {
/* Get the IR_tx instance for later, if already allocated */
tx = get_ir_tx(ir);
/* Set up a struct IR_rx instance */
rx = kzalloc(sizeof(struct IR_rx), GFP_KERNEL);
if (rx == NULL) {
ret = -ENOMEM;
goto out_put_xx;
}
kref_init(&rx->ref);
ir->rx = rx;
ir->l.features |= LIRC_CAN_REC_LIRCCODE;
mutex_init(&rx->client_lock);
rx->c = client;
rx->hdpvr_data_fmt =
(id->driver_data & ID_FLAG_HDPVR) ? true : false;
/* An ir ref goes to the struct IR_rx instance */
rx->ir = get_ir_device(ir, true);
/* An rx ref goes to the i2c_client */
i2c_set_clientdata(client, get_ir_rx(ir));
/*
* Start the polling thread.
* It will only perform an empty loop around schedule_timeout()
* until we register with lirc_dev and the first user open()
*/
/* An ir ref goes to the new rx polling kthread */
rx->task = kthread_run(lirc_thread, get_ir_device(ir, true),
"zilog-rx-i2c-%d", adap->nr);
if (IS_ERR(rx->task)) {
ret = PTR_ERR(rx->task);
zilog_error("%s: could not start IR Rx polling thread"
"\n", __func__);
/* Failed kthread, so put back the ir ref */
put_ir_device(ir, true);
/* Failure exit, so put back rx ref from i2c_client */
i2c_set_clientdata(client, NULL);
put_ir_rx(rx, true);
ir->l.features &= ~LIRC_CAN_REC_LIRCCODE;
goto out_put_xx;
}
/* Proceed only if the Tx client is also ready */
if (tx == NULL) {
zilog_info("probe of IR Rx on %s (i2c-%d) done. Waiting"
" on IR Tx.\n", adap->name, adap->nr);
goto out_ok;
}
}
/* register with lirc */
ir->l.minor = minor; /* module option: user requested minor number */
ir->l.minor = lirc_register_driver(&ir->l);
if (ir->l.minor < 0 || ir->l.minor >= MAX_IRCTL_DEVICES) {
zilog_error("%s: \"minor\" must be between 0 and %d (%d)!\n",
__func__, MAX_IRCTL_DEVICES-1, ir->l.minor);
ret = -EBADRQC;
goto out_put_xx;
}
zilog_info("IR unit on %s (i2c-%d) registered as lirc%d and ready\n",
adap->name, adap->nr, ir->l.minor);
out_ok:
if (rx != NULL)
put_ir_rx(rx, true);
if (tx != NULL)
put_ir_tx(tx, true);
put_ir_device(ir, true);
zilog_info("probe of IR %s on %s (i2c-%d) done\n",
tx_probe ? "Tx" : "Rx", adap->name, adap->nr);
mutex_unlock(&ir_devices_lock);
return 0;
out_put_xx:
if (rx != NULL)
put_ir_rx(rx, true);
if (tx != NULL)
put_ir_tx(tx, true);
out_put_ir:
put_ir_device(ir, true);
out_no_ir:
zilog_error("%s: probing IR %s on %s (i2c-%d) failed with %d\n",
__func__, tx_probe ? "Tx" : "Rx", adap->name, adap->nr,
ret);
mutex_unlock(&ir_devices_lock);
return ret;
}
static int __init zilog_init(void)
{
int ret;
zilog_notify("Zilog/Hauppauge IR driver initializing\n");
mutex_init(&tx_data_lock);
request_module("firmware_class");
ret = i2c_add_driver(&driver);
if (ret)
zilog_error("initialization failed\n");
else
zilog_notify("initialization complete\n");
return ret;
}
static void __exit zilog_exit(void)
{
i2c_del_driver(&driver);
/* if loaded */
fw_unload();
zilog_notify("Zilog/Hauppauge IR driver unloaded\n");
}
module_init(zilog_init);
module_exit(zilog_exit);
MODULE_DESCRIPTION("Zilog/Hauppauge infrared transmitter driver (i2c stack)");
MODULE_AUTHOR("Gerd Knorr, Michal Kochanowicz, Christoph Bartelmus, "
"Ulrich Mueller, Stefan Jahn, Jerome Brock, Mark Weaver, "
"Andy Walls");
MODULE_LICENSE("GPL");
/* for compat with old name, which isn't all that accurate anymore */
MODULE_ALIAS("lirc_pvr150");
module_param(minor, int, 0444);
MODULE_PARM_DESC(minor, "Preferred minor device number");
module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "Enable debugging messages");
module_param(tx_only, bool, 0644);
MODULE_PARM_DESC(tx_only, "Only handle the IR transmit function");
| gpl-2.0 |
TeamWin/android_kernel_motorola_msm8226 | arch/arm/mach-prima2/prima2.c | 4830 | 1063 | /*
* Defines machines for CSR SiRFprimaII
*
* Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
*
* Licensed under GPLv2 or later.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/sizes.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include "common.h"
static struct of_device_id sirfsoc_of_bus_ids[] __initdata = {
{ .compatible = "simple-bus", },
{},
};
void __init sirfsoc_mach_init(void)
{
of_platform_bus_probe(NULL, sirfsoc_of_bus_ids, NULL);
}
static const char *prima2cb_dt_match[] __initdata = {
"sirf,prima2-cb",
NULL
};
MACHINE_START(PRIMA2_EVB, "prima2cb")
/* Maintainer: Barry Song <baohua.song@csr.com> */
.atag_offset = 0x100,
.init_early = sirfsoc_of_clk_init,
.map_io = sirfsoc_map_lluart,
.init_irq = sirfsoc_of_irq_init,
.timer = &sirfsoc_timer,
.dma_zone_size = SZ_256M,
.init_machine = sirfsoc_mach_init,
.dt_compat = prima2cb_dt_match,
.restart = sirfsoc_restart,
MACHINE_END
| gpl-2.0 |
armani-dev/android_kernel_xiaomi_armani_OLD | sound/oss/pas2_card.c | 5086 | 9558 | /*
* sound/oss/pas2_card.c
*
* Detection routine for the Pro Audio Spectrum cards.
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include "sound_config.h"
#include "pas2.h"
#include "sb.h"
static unsigned char dma_bits[] = {
4, 1, 2, 3, 0, 5, 6, 7
};
static unsigned char irq_bits[] = {
0, 0, 1, 2, 3, 4, 5, 6, 0, 1, 7, 8, 9, 0, 10, 11
};
static unsigned char sb_irq_bits[] = {
0x00, 0x00, 0x08, 0x10, 0x00, 0x18, 0x00, 0x20,
0x00, 0x08, 0x28, 0x30, 0x38, 0, 0
};
static unsigned char sb_dma_bits[] = {
0x00, 0x40, 0x80, 0xC0, 0, 0, 0, 0
};
/*
* The Address Translation code is used to convert I/O register addresses to
* be relative to the given base -register
*/
int pas_translate_code = 0;
static int pas_intr_mask;
static int pas_irq;
static int pas_sb_base;
DEFINE_SPINLOCK(pas_lock);
#ifndef CONFIG_PAS_JOYSTICK
static bool joystick;
#else
static bool joystick = 1;
#endif
#ifdef SYMPHONY_PAS
static bool symphony = 1;
#else
static bool symphony;
#endif
#ifdef BROKEN_BUS_CLOCK
static bool broken_bus_clock = 1;
#else
static bool broken_bus_clock;
#endif
static struct address_info cfg;
static struct address_info cfg2;
char pas_model = 0;
static char *pas_model_names[] = {
"",
"Pro AudioSpectrum+",
"CDPC",
"Pro AudioSpectrum 16",
"Pro AudioSpectrum 16D"
};
/*
* pas_read() and pas_write() are equivalents of inb and outb
* These routines perform the I/O address translation required
* to support other than the default base address
*/
extern void mix_write(unsigned char data, int ioaddr);
unsigned char pas_read(int ioaddr)
{
return inb(ioaddr + pas_translate_code);
}
void pas_write(unsigned char data, int ioaddr)
{
outb((data), ioaddr + pas_translate_code);
}
/******************* Begin of the Interrupt Handler ********************/
static irqreturn_t pasintr(int irq, void *dev_id)
{
int status;
status = pas_read(0x0B89);
pas_write(status, 0x0B89); /* Clear interrupt */
if (status & 0x08)
{
pas_pcm_interrupt(status, 1);
status &= ~0x08;
}
if (status & 0x10)
{
pas_midi_interrupt();
status &= ~0x10;
}
return IRQ_HANDLED;
}
int pas_set_intr(int mask)
{
if (!mask)
return 0;
pas_intr_mask |= mask;
pas_write(pas_intr_mask, 0x0B8B);
return 0;
}
int pas_remove_intr(int mask)
{
if (!mask)
return 0;
pas_intr_mask &= ~mask;
pas_write(pas_intr_mask, 0x0B8B);
return 0;
}
/******************* End of the Interrupt handler **********************/
/******************* Begin of the Initialization Code ******************/
static int __init config_pas_hw(struct address_info *hw_config)
{
char ok = 1;
unsigned int_ptrs; /* scsi/sound interrupt pointers */
pas_irq = hw_config->irq;
pas_write(0x00, 0x0B8B);
pas_write(0x36, 0x138B);
pas_write(0x36, 0x1388);
pas_write(0, 0x1388);
pas_write(0x74, 0x138B);
pas_write(0x74, 0x1389);
pas_write(0, 0x1389);
pas_write(0x80 | 0x40 | 0x20 | 1, 0x0B8A);
pas_write(0x80 | 0x20 | 0x10 | 0x08 | 0x01, 0xF8A);
pas_write(0x01 | 0x02 | 0x04 | 0x10 /*
* |
* 0x80
*/ , 0xB88);
pas_write(0x80 | (joystick ? 0x40 : 0), 0xF388);
if (pas_irq < 0 || pas_irq > 15)
{
printk(KERN_ERR "PAS16: Invalid IRQ %d", pas_irq);
hw_config->irq=-1;
ok = 0;
}
else
{
int_ptrs = pas_read(0xF38A);
int_ptrs = (int_ptrs & 0xf0) | irq_bits[pas_irq];
pas_write(int_ptrs, 0xF38A);
if (!irq_bits[pas_irq])
{
printk(KERN_ERR "PAS16: Invalid IRQ %d", pas_irq);
hw_config->irq=-1;
ok = 0;
}
else
{
if (request_irq(pas_irq, pasintr, 0, "PAS16",hw_config) < 0) {
printk(KERN_ERR "PAS16: Cannot allocate IRQ %d\n",pas_irq);
hw_config->irq=-1;
ok = 0;
}
}
}
if (hw_config->dma < 0 || hw_config->dma > 7)
{
printk(KERN_ERR "PAS16: Invalid DMA selection %d", hw_config->dma);
hw_config->dma=-1;
ok = 0;
}
else
{
pas_write(dma_bits[hw_config->dma], 0xF389);
if (!dma_bits[hw_config->dma])
{
printk(KERN_ERR "PAS16: Invalid DMA selection %d", hw_config->dma);
hw_config->dma=-1;
ok = 0;
}
else
{
if (sound_alloc_dma(hw_config->dma, "PAS16"))
{
printk(KERN_ERR "pas2_card.c: Can't allocate DMA channel\n");
hw_config->dma=-1;
ok = 0;
}
}
}
/*
* This fixes the timing problems of the PAS due to the Symphony chipset
* as per Media Vision. Only define this if your PAS doesn't work correctly.
*/
if(symphony)
{
outb((0x05), 0xa8);
outb((0x60), 0xa9);
}
if(broken_bus_clock)
pas_write(0x01 | 0x10 | 0x20 | 0x04, 0x8388);
else
/*
* pas_write(0x01, 0x8388);
*/
pas_write(0x01 | 0x10 | 0x20, 0x8388);
pas_write(0x18, 0x838A); /* ??? */
pas_write(0x20 | 0x01, 0x0B8A); /* Mute off, filter = 17.897 kHz */
pas_write(8, 0xBF8A);
mix_write(0x80 | 5, 0x078B);
mix_write(5, 0x078B);
{
struct address_info *sb_config;
sb_config = &cfg2;
if (sb_config->io_base)
{
unsigned char irq_dma;
/*
* Turn on Sound Blaster compatibility
* bit 1 = SB emulation
* bit 0 = MPU401 emulation (CDPC only :-( )
*/
pas_write(0x02, 0xF788);
/*
* "Emulation address"
*/
pas_write((sb_config->io_base >> 4) & 0x0f, 0xF789);
pas_sb_base = sb_config->io_base;
if (!sb_dma_bits[sb_config->dma])
printk(KERN_ERR "PAS16 Warning: Invalid SB DMA %d\n\n", sb_config->dma);
if (!sb_irq_bits[sb_config->irq])
printk(KERN_ERR "PAS16 Warning: Invalid SB IRQ %d\n\n", sb_config->irq);
irq_dma = sb_dma_bits[sb_config->dma] |
sb_irq_bits[sb_config->irq];
pas_write(irq_dma, 0xFB8A);
}
else
pas_write(0x00, 0xF788);
}
if (!ok)
printk(KERN_WARNING "PAS16: Driver not enabled\n");
return ok;
}
static int __init detect_pas_hw(struct address_info *hw_config)
{
unsigned char board_id, foo;
/*
* WARNING: Setting an option like W:1 or so that disables warm boot reset
* of the card will screw up this detect code something fierce. Adding code
* to handle this means possibly interfering with other cards on the bus if
* you have something on base port 0x388. SO be forewarned.
*/
outb((0xBC), 0x9A01); /* Activate first board */
outb((hw_config->io_base >> 2), 0x9A01); /* Set base address */
pas_translate_code = hw_config->io_base - 0x388;
pas_write(1, 0xBF88); /* Select one wait states */
board_id = pas_read(0x0B8B);
if (board_id == 0xff)
return 0;
/*
* We probably have a PAS-series board, now check for a PAS16-series board
* by trying to change the board revision bits. PAS16-series hardware won't
* let you do this - the bits are read-only.
*/
foo = board_id ^ 0xe0;
pas_write(foo, 0x0B8B);
foo = pas_read(0x0B8B);
pas_write(board_id, 0x0B8B);
if (board_id != foo)
return 0;
pas_model = pas_read(0xFF88);
return pas_model;
}
static void __init attach_pas_card(struct address_info *hw_config)
{
pas_irq = hw_config->irq;
if (detect_pas_hw(hw_config))
{
if ((pas_model = pas_read(0xFF88)))
{
char temp[100];
sprintf(temp,
"%s rev %d", pas_model_names[(int) pas_model],
pas_read(0x2789));
conf_printf(temp, hw_config);
}
if (config_pas_hw(hw_config))
{
pas_pcm_init(hw_config);
pas_midi_init();
pas_init_mixer();
}
}
}
static inline int __init probe_pas(struct address_info *hw_config)
{
return detect_pas_hw(hw_config);
}
static void __exit unload_pas(struct address_info *hw_config)
{
extern int pas_audiodev;
extern int pas2_mididev;
if (hw_config->dma>0)
sound_free_dma(hw_config->dma);
if (hw_config->irq>0)
free_irq(hw_config->irq, hw_config);
if(pas_audiodev!=-1)
sound_unload_mixerdev(audio_devs[pas_audiodev]->mixer_dev);
if(pas2_mididev!=-1)
sound_unload_mididev(pas2_mididev);
if(pas_audiodev!=-1)
sound_unload_audiodev(pas_audiodev);
}
static int __initdata io = -1;
static int __initdata irq = -1;
static int __initdata dma = -1;
static int __initdata dma16 = -1; /* Set this for modules that need it */
static int __initdata sb_io = 0;
static int __initdata sb_irq = -1;
static int __initdata sb_dma = -1;
static int __initdata sb_dma16 = -1;
module_param(io, int, 0);
module_param(irq, int, 0);
module_param(dma, int, 0);
module_param(dma16, int, 0);
module_param(sb_io, int, 0);
module_param(sb_irq, int, 0);
module_param(sb_dma, int, 0);
module_param(sb_dma16, int, 0);
module_param(joystick, bool, 0);
module_param(symphony, bool, 0);
module_param(broken_bus_clock, bool, 0);
MODULE_LICENSE("GPL");
static int __init init_pas2(void)
{
printk(KERN_INFO "Pro Audio Spectrum driver Copyright (C) by Hannu Savolainen 1993-1996\n");
cfg.io_base = io;
cfg.irq = irq;
cfg.dma = dma;
cfg.dma2 = dma16;
cfg2.io_base = sb_io;
cfg2.irq = sb_irq;
cfg2.dma = sb_dma;
cfg2.dma2 = sb_dma16;
if (cfg.io_base == -1 || cfg.dma == -1 || cfg.irq == -1) {
printk(KERN_INFO "I/O, IRQ, DMA and type are mandatory\n");
return -EINVAL;
}
if (!probe_pas(&cfg))
return -ENODEV;
attach_pas_card(&cfg);
return 0;
}
static void __exit cleanup_pas2(void)
{
unload_pas(&cfg);
}
module_init(init_pas2);
module_exit(cleanup_pas2);
#ifndef MODULE
static int __init setup_pas2(char *str)
{
/* io, irq, dma, dma2, sb_io, sb_irq, sb_dma, sb_dma2 */
int ints[9];
str = get_options(str, ARRAY_SIZE(ints), ints);
io = ints[1];
irq = ints[2];
dma = ints[3];
dma16 = ints[4];
sb_io = ints[5];
sb_irq = ints[6];
sb_dma = ints[7];
sb_dma16 = ints[8];
return 1;
}
__setup("pas2=", setup_pas2);
#endif
| gpl-2.0 |
KylinUI/android_kernel_htc_m7 | drivers/cpufreq/exynos4210-cpufreq.c | 5086 | 7482 | /*
* Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* EXYNOS4210 - CPU frequency scaling support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/cpufreq.h>
#include <mach/regs-clock.h>
#include <mach/cpufreq.h>
#define CPUFREQ_LEVEL_END L5
static int max_support_idx = L0;
static int min_support_idx = (CPUFREQ_LEVEL_END - 1);
static struct clk *cpu_clk;
static struct clk *moutcore;
static struct clk *mout_mpll;
static struct clk *mout_apll;
struct cpufreq_clkdiv {
unsigned int index;
unsigned int clkdiv;
};
static unsigned int exynos4210_volt_table[CPUFREQ_LEVEL_END] = {
1250000, 1150000, 1050000, 975000, 950000,
};
static struct cpufreq_clkdiv exynos4210_clkdiv_table[CPUFREQ_LEVEL_END];
static struct cpufreq_frequency_table exynos4210_freq_table[] = {
{L0, 1200*1000},
{L1, 1000*1000},
{L2, 800*1000},
{L3, 500*1000},
{L4, 200*1000},
{0, CPUFREQ_TABLE_END},
};
static unsigned int clkdiv_cpu0[CPUFREQ_LEVEL_END][7] = {
/*
* Clock divider value for following
* { DIVCORE, DIVCOREM0, DIVCOREM1, DIVPERIPH,
* DIVATB, DIVPCLK_DBG, DIVAPLL }
*/
/* ARM L0: 1200MHz */
{ 0, 3, 7, 3, 4, 1, 7 },
/* ARM L1: 1000MHz */
{ 0, 3, 7, 3, 4, 1, 7 },
/* ARM L2: 800MHz */
{ 0, 3, 7, 3, 3, 1, 7 },
/* ARM L3: 500MHz */
{ 0, 3, 7, 3, 3, 1, 7 },
/* ARM L4: 200MHz */
{ 0, 1, 3, 1, 3, 1, 0 },
};
static unsigned int clkdiv_cpu1[CPUFREQ_LEVEL_END][2] = {
/*
* Clock divider value for following
* { DIVCOPY, DIVHPM }
*/
/* ARM L0: 1200MHz */
{ 5, 0 },
/* ARM L1: 1000MHz */
{ 4, 0 },
/* ARM L2: 800MHz */
{ 3, 0 },
/* ARM L3: 500MHz */
{ 3, 0 },
/* ARM L4: 200MHz */
{ 3, 0 },
};
static unsigned int exynos4210_apll_pms_table[CPUFREQ_LEVEL_END] = {
/* APLL FOUT L0: 1200MHz */
((150 << 16) | (3 << 8) | 1),
/* APLL FOUT L1: 1000MHz */
((250 << 16) | (6 << 8) | 1),
/* APLL FOUT L2: 800MHz */
((200 << 16) | (6 << 8) | 1),
/* APLL FOUT L3: 500MHz */
((250 << 16) | (6 << 8) | 2),
/* APLL FOUT L4: 200MHz */
((200 << 16) | (6 << 8) | 3),
};
static void exynos4210_set_clkdiv(unsigned int div_index)
{
unsigned int tmp;
/* Change Divider - CPU0 */
tmp = exynos4210_clkdiv_table[div_index].clkdiv;
__raw_writel(tmp, EXYNOS4_CLKDIV_CPU);
do {
tmp = __raw_readl(EXYNOS4_CLKDIV_STATCPU);
} while (tmp & 0x1111111);
/* Change Divider - CPU1 */
tmp = __raw_readl(EXYNOS4_CLKDIV_CPU1);
tmp &= ~((0x7 << 4) | 0x7);
tmp |= ((clkdiv_cpu1[div_index][0] << 4) |
(clkdiv_cpu1[div_index][1] << 0));
__raw_writel(tmp, EXYNOS4_CLKDIV_CPU1);
do {
tmp = __raw_readl(EXYNOS4_CLKDIV_STATCPU1);
} while (tmp & 0x11);
}
static void exynos4210_set_apll(unsigned int index)
{
unsigned int tmp;
/* 1. MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
clk_set_parent(moutcore, mout_mpll);
do {
tmp = (__raw_readl(EXYNOS4_CLKMUX_STATCPU)
>> EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT);
tmp &= 0x7;
} while (tmp != 0x2);
/* 2. Set APLL Lock time */
__raw_writel(EXYNOS4_APLL_LOCKTIME, EXYNOS4_APLL_LOCK);
/* 3. Change PLL PMS values */
tmp = __raw_readl(EXYNOS4_APLL_CON0);
tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
tmp |= exynos4210_apll_pms_table[index];
__raw_writel(tmp, EXYNOS4_APLL_CON0);
/* 4. wait_lock_time */
do {
tmp = __raw_readl(EXYNOS4_APLL_CON0);
} while (!(tmp & (0x1 << EXYNOS4_APLLCON0_LOCKED_SHIFT)));
/* 5. MUX_CORE_SEL = APLL */
clk_set_parent(moutcore, mout_apll);
do {
tmp = __raw_readl(EXYNOS4_CLKMUX_STATCPU);
tmp &= EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK;
} while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT));
}
bool exynos4210_pms_change(unsigned int old_index, unsigned int new_index)
{
unsigned int old_pm = (exynos4210_apll_pms_table[old_index] >> 8);
unsigned int new_pm = (exynos4210_apll_pms_table[new_index] >> 8);
return (old_pm == new_pm) ? 0 : 1;
}
static void exynos4210_set_frequency(unsigned int old_index,
unsigned int new_index)
{
unsigned int tmp;
if (old_index > new_index) {
if (!exynos4210_pms_change(old_index, new_index)) {
/* 1. Change the system clock divider values */
exynos4210_set_clkdiv(new_index);
/* 2. Change just s value in apll m,p,s value */
tmp = __raw_readl(EXYNOS4_APLL_CON0);
tmp &= ~(0x7 << 0);
tmp |= (exynos4210_apll_pms_table[new_index] & 0x7);
__raw_writel(tmp, EXYNOS4_APLL_CON0);
} else {
/* Clock Configuration Procedure */
/* 1. Change the system clock divider values */
exynos4210_set_clkdiv(new_index);
/* 2. Change the apll m,p,s value */
exynos4210_set_apll(new_index);
}
} else if (old_index < new_index) {
if (!exynos4210_pms_change(old_index, new_index)) {
/* 1. Change just s value in apll m,p,s value */
tmp = __raw_readl(EXYNOS4_APLL_CON0);
tmp &= ~(0x7 << 0);
tmp |= (exynos4210_apll_pms_table[new_index] & 0x7);
__raw_writel(tmp, EXYNOS4_APLL_CON0);
/* 2. Change the system clock divider values */
exynos4210_set_clkdiv(new_index);
} else {
/* Clock Configuration Procedure */
/* 1. Change the apll m,p,s value */
exynos4210_set_apll(new_index);
/* 2. Change the system clock divider values */
exynos4210_set_clkdiv(new_index);
}
}
}
int exynos4210_cpufreq_init(struct exynos_dvfs_info *info)
{
int i;
unsigned int tmp;
unsigned long rate;
cpu_clk = clk_get(NULL, "armclk");
if (IS_ERR(cpu_clk))
return PTR_ERR(cpu_clk);
moutcore = clk_get(NULL, "moutcore");
if (IS_ERR(moutcore))
goto err_moutcore;
mout_mpll = clk_get(NULL, "mout_mpll");
if (IS_ERR(mout_mpll))
goto err_mout_mpll;
rate = clk_get_rate(mout_mpll) / 1000;
mout_apll = clk_get(NULL, "mout_apll");
if (IS_ERR(mout_apll))
goto err_mout_apll;
tmp = __raw_readl(EXYNOS4_CLKDIV_CPU);
for (i = L0; i < CPUFREQ_LEVEL_END; i++) {
tmp &= ~(EXYNOS4_CLKDIV_CPU0_CORE_MASK |
EXYNOS4_CLKDIV_CPU0_COREM0_MASK |
EXYNOS4_CLKDIV_CPU0_COREM1_MASK |
EXYNOS4_CLKDIV_CPU0_PERIPH_MASK |
EXYNOS4_CLKDIV_CPU0_ATB_MASK |
EXYNOS4_CLKDIV_CPU0_PCLKDBG_MASK |
EXYNOS4_CLKDIV_CPU0_APLL_MASK);
tmp |= ((clkdiv_cpu0[i][0] << EXYNOS4_CLKDIV_CPU0_CORE_SHIFT) |
(clkdiv_cpu0[i][1] << EXYNOS4_CLKDIV_CPU0_COREM0_SHIFT) |
(clkdiv_cpu0[i][2] << EXYNOS4_CLKDIV_CPU0_COREM1_SHIFT) |
(clkdiv_cpu0[i][3] << EXYNOS4_CLKDIV_CPU0_PERIPH_SHIFT) |
(clkdiv_cpu0[i][4] << EXYNOS4_CLKDIV_CPU0_ATB_SHIFT) |
(clkdiv_cpu0[i][5] << EXYNOS4_CLKDIV_CPU0_PCLKDBG_SHIFT) |
(clkdiv_cpu0[i][6] << EXYNOS4_CLKDIV_CPU0_APLL_SHIFT));
exynos4210_clkdiv_table[i].clkdiv = tmp;
}
info->mpll_freq_khz = rate;
info->pm_lock_idx = L2;
info->pll_safe_idx = L2;
info->max_support_idx = max_support_idx;
info->min_support_idx = min_support_idx;
info->cpu_clk = cpu_clk;
info->volt_table = exynos4210_volt_table;
info->freq_table = exynos4210_freq_table;
info->set_freq = exynos4210_set_frequency;
info->need_apll_change = exynos4210_pms_change;
return 0;
err_mout_apll:
if (!IS_ERR(mout_mpll))
clk_put(mout_mpll);
err_mout_mpll:
if (!IS_ERR(moutcore))
clk_put(moutcore);
err_moutcore:
if (!IS_ERR(cpu_clk))
clk_put(cpu_clk);
pr_debug("%s: failed initialization\n", __func__);
return -EINVAL;
}
EXPORT_SYMBOL(exynos4210_cpufreq_init);
| gpl-2.0 |
HighwindONE/Kernel_GoldStar | drivers/gpu/drm/gma500/gem.c | 5342 | 8430 | /*
* psb GEM interface
*
* Copyright (c) 2011, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
* Authors: Alan Cox
*
* TODO:
* - we need to work out if the MMU is relevant (eg for
* accelerated operations on a GEM object)
*/
#include <drm/drmP.h>
#include <drm/drm.h>
#include "gma_drm.h"
#include "psb_drv.h"
int psb_gem_init_object(struct drm_gem_object *obj)
{
return -EINVAL;
}
void psb_gem_free_object(struct drm_gem_object *obj)
{
struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
drm_gem_object_release_wrap(obj);
/* This must occur last as it frees up the memory of the GEM object */
psb_gtt_free_range(obj->dev, gtt);
}
int psb_gem_get_aperture(struct drm_device *dev, void *data,
struct drm_file *file)
{
return -EINVAL;
}
/**
* psb_gem_dumb_map_gtt - buffer mapping for dumb interface
* @file: our drm client file
* @dev: drm device
* @handle: GEM handle to the object (from dumb_create)
*
* Do the necessary setup to allow the mapping of the frame buffer
* into user memory. We don't have to do much here at the moment.
*/
int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset)
{
int ret = 0;
struct drm_gem_object *obj;
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
mutex_lock(&dev->struct_mutex);
/* GEM does all our handle to object mapping */
obj = drm_gem_object_lookup(dev, file, handle);
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
}
/* What validation is needed here ? */
/* Make it mmapable */
if (!obj->map_list.map) {
ret = gem_create_mmap_offset(obj);
if (ret)
goto out;
}
/* GEM should really work out the hash offsets for us */
*offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
out:
drm_gem_object_unreference(obj);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
/**
* psb_gem_create - create a mappable object
* @file: the DRM file of the client
* @dev: our device
* @size: the size requested
* @handlep: returned handle (opaque number)
*
* Create a GEM object, fill in the boilerplate and attach a handle to
* it so that userspace can speak about it. This does the core work
* for the various methods that do/will create GEM objects for things
*/
static int psb_gem_create(struct drm_file *file,
struct drm_device *dev, uint64_t size, uint32_t *handlep)
{
struct gtt_range *r;
int ret;
u32 handle;
size = roundup(size, PAGE_SIZE);
/* Allocate our object - for now a direct gtt range which is not
stolen memory backed */
r = psb_gtt_alloc_range(dev, size, "gem", 0);
if (r == NULL) {
dev_err(dev->dev, "no memory for %lld byte GEM object\n", size);
return -ENOSPC;
}
/* Initialize the extra goodies GEM needs to do all the hard work */
if (drm_gem_object_init(dev, &r->gem, size) != 0) {
psb_gtt_free_range(dev, r);
/* GEM doesn't give an error code so use -ENOMEM */
dev_err(dev->dev, "GEM init failed for %lld\n", size);
return -ENOMEM;
}
/* Give the object a handle so we can carry it more easily */
ret = drm_gem_handle_create(file, &r->gem, &handle);
if (ret) {
dev_err(dev->dev, "GEM handle failed for %p, %lld\n",
&r->gem, size);
drm_gem_object_release(&r->gem);
psb_gtt_free_range(dev, r);
return ret;
}
/* We have the initial and handle reference but need only one now */
drm_gem_object_unreference(&r->gem);
*handlep = handle;
return 0;
}
/**
* psb_gem_dumb_create - create a dumb buffer
* @drm_file: our client file
* @dev: our device
* @args: the requested arguments copied from userspace
*
* Allocate a buffer suitable for use for a frame buffer of the
* form described by user space. Give userspace a handle by which
* to reference it.
*/
int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64);
args->size = args->pitch * args->height;
return psb_gem_create(file, dev, args->size, &args->handle);
}
/**
* psb_gem_dumb_destroy - destroy a dumb buffer
* @file: client file
* @dev: our DRM device
* @handle: the object handle
*
* Destroy a handle that was created via psb_gem_dumb_create, at least
* we hope it was created that way. i915 seems to assume the caller
* does the checking but that might be worth review ! FIXME
*/
int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
uint32_t handle)
{
/* No special work needed, drop the reference and see what falls out */
return drm_gem_handle_delete(file, handle);
}
/**
* psb_gem_fault - pagefault handler for GEM objects
* @vma: the VMA of the GEM object
* @vmf: fault detail
*
* Invoked when a fault occurs on an mmap of a GEM managed area. GEM
* does most of the work for us including the actual map/unmap calls
* but we need to do the actual page work.
*
* This code eventually needs to handle faulting objects in and out
* of the GTT and repacking it when we run out of space. We can put
* that off for now and for our simple uses
*
* The VMA was set up by GEM. In doing so it also ensured that the
* vma->vm_private_data points to the GEM object that is backing this
* mapping.
*/
int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_gem_object *obj;
struct gtt_range *r;
int ret;
unsigned long pfn;
pgoff_t page_offset;
struct drm_device *dev;
struct drm_psb_private *dev_priv;
obj = vma->vm_private_data; /* GEM object */
dev = obj->dev;
dev_priv = dev->dev_private;
r = container_of(obj, struct gtt_range, gem); /* Get the gtt range */
/* Make sure we don't parallel update on a fault, nor move or remove
something from beneath our feet */
mutex_lock(&dev->struct_mutex);
/* For now the mmap pins the object and it stays pinned. As things
stand that will do us no harm */
if (r->mmapping == 0) {
ret = psb_gtt_pin(r);
if (ret < 0) {
dev_err(dev->dev, "gma500: pin failed: %d\n", ret);
goto fail;
}
r->mmapping = 1;
}
/* Page relative to the VMA start - we must calculate this ourselves
because vmf->pgoff is the fake GEM offset */
page_offset = ((unsigned long) vmf->virtual_address - vma->vm_start)
>> PAGE_SHIFT;
/* CPU view of the page, don't go via the GART for CPU writes */
if (r->stolen)
pfn = (dev_priv->stolen_base + r->offset) >> PAGE_SHIFT;
else
pfn = page_to_pfn(r->pages[page_offset]);
ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
fail:
mutex_unlock(&dev->struct_mutex);
switch (ret) {
case 0:
case -ERESTARTSYS:
case -EINTR:
return VM_FAULT_NOPAGE;
case -ENOMEM:
return VM_FAULT_OOM;
default:
return VM_FAULT_SIGBUS;
}
}
static int psb_gem_create_stolen(struct drm_file *file, struct drm_device *dev,
int size, u32 *handle)
{
struct gtt_range *gtt = psb_gtt_alloc_range(dev, size, "gem", 1);
if (gtt == NULL)
return -ENOMEM;
if (drm_gem_private_object_init(dev, >t->gem, size) != 0)
goto free_gtt;
if (drm_gem_handle_create(file, >t->gem, handle) == 0)
return 0;
free_gtt:
psb_gtt_free_range(dev, gtt);
return -ENOMEM;
}
/*
* GEM interfaces for our specific client
*/
int psb_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_psb_gem_create *args = data;
int ret;
if (args->flags & GMA_GEM_CREATE_STOLEN) {
ret = psb_gem_create_stolen(file, dev, args->size,
&args->handle);
if (ret == 0)
return 0;
/* Fall throguh */
args->flags &= ~GMA_GEM_CREATE_STOLEN;
}
return psb_gem_create(file, dev, args->size, &args->handle);
}
int psb_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_psb_gem_mmap *args = data;
return dev->driver->dumb_map_offset(file, dev,
args->handle, &args->offset);
}
| gpl-2.0 |
Ken-Liu/OpenScrKernel_For_XC210 | drivers/mca/mca-bus.c | 9694 | 4717 | /* -*- mode: c; c-basic-offset: 8 -*- */
/*
* MCA bus support functions for sysfs.
*
* (C) 2002 James Bottomley <James.Bottomley@HansenPartnership.com>
*
**-----------------------------------------------------------------------------
**
** This program is free software; you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published by
** the Free Software Foundation; either version 2 of the License, or
** (at your option) any later version.
**
** This program is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
**
** You should have received a copy of the GNU General Public License
** along with this program; if not, write to the Free Software
** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
**
**-----------------------------------------------------------------------------
*/
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/mca.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
/* Very few machines have more than one MCA bus. However, there are
* those that do (Voyager 35xx/5xxx), so we do it this way for future
* expansion. None that I know have more than 2 */
static struct mca_bus *mca_root_busses[MAX_MCA_BUSSES];
#define MCA_DEVINFO(i,s) { .pos = i, .name = s }
struct mca_device_info {
short pos_id; /* the 2 byte pos id for this card */
char name[50];
};
static int mca_bus_match (struct device *dev, struct device_driver *drv)
{
struct mca_device *mca_dev = to_mca_device (dev);
struct mca_driver *mca_drv = to_mca_driver (drv);
const unsigned short *mca_ids = mca_drv->id_table;
int i = 0;
if (mca_ids) {
for(i = 0; mca_ids[i]; i++) {
if (mca_ids[i] == mca_dev->pos_id) {
mca_dev->index = i;
return 1;
}
}
}
/* If the integrated id is present, treat it as though it were an
* additional id in the id_table (it can't be because by definition,
* integrated id's overflow a short */
if (mca_drv->integrated_id && mca_dev->pos_id ==
mca_drv->integrated_id) {
mca_dev->index = i;
return 1;
}
return 0;
}
struct bus_type mca_bus_type = {
.name = "MCA",
.match = mca_bus_match,
};
EXPORT_SYMBOL (mca_bus_type);
static ssize_t mca_show_pos_id(struct device *dev, struct device_attribute *attr, char *buf)
{
/* four digits, \n and trailing \0 */
struct mca_device *mca_dev = to_mca_device(dev);
int len;
if(mca_dev->pos_id < MCA_DUMMY_POS_START)
len = sprintf(buf, "%04x\n", mca_dev->pos_id);
else
len = sprintf(buf, "none\n");
return len;
}
static ssize_t mca_show_pos(struct device *dev, struct device_attribute *attr, char *buf)
{
/* enough for 8 two byte hex chars plus space and new line */
int j, len=0;
struct mca_device *mca_dev = to_mca_device(dev);
for(j=0; j<8; j++)
len += sprintf(buf+len, "%02x ", mca_dev->pos[j]);
/* change last trailing space to new line */
buf[len-1] = '\n';
return len;
}
static DEVICE_ATTR(id, S_IRUGO, mca_show_pos_id, NULL);
static DEVICE_ATTR(pos, S_IRUGO, mca_show_pos, NULL);
int __init mca_register_device(int bus, struct mca_device *mca_dev)
{
struct mca_bus *mca_bus = mca_root_busses[bus];
int rc;
mca_dev->dev.parent = &mca_bus->dev;
mca_dev->dev.bus = &mca_bus_type;
dev_set_name(&mca_dev->dev, "%02d:%02X", bus, mca_dev->slot);
mca_dev->dma_mask = mca_bus->default_dma_mask;
mca_dev->dev.dma_mask = &mca_dev->dma_mask;
mca_dev->dev.coherent_dma_mask = mca_dev->dma_mask;
rc = device_register(&mca_dev->dev);
if (rc)
goto err_out;
rc = device_create_file(&mca_dev->dev, &dev_attr_id);
if (rc) goto err_out_devreg;
rc = device_create_file(&mca_dev->dev, &dev_attr_pos);
if (rc) goto err_out_id;
return 1;
err_out_id:
device_remove_file(&mca_dev->dev, &dev_attr_id);
err_out_devreg:
device_unregister(&mca_dev->dev);
err_out:
return 0;
}
/* */
struct mca_bus * __devinit mca_attach_bus(int bus)
{
struct mca_bus *mca_bus;
if (unlikely(mca_root_busses[bus] != NULL)) {
/* This should never happen, but just in case */
printk(KERN_EMERG "MCA tried to add already existing bus %d\n",
bus);
dump_stack();
return NULL;
}
mca_bus = kzalloc(sizeof(struct mca_bus), GFP_KERNEL);
if (!mca_bus)
return NULL;
dev_set_name(&mca_bus->dev, "mca%d", bus);
sprintf(mca_bus->name,"Host %s MCA Bridge", bus ? "Secondary" : "Primary");
if (device_register(&mca_bus->dev)) {
kfree(mca_bus);
return NULL;
}
mca_root_busses[bus] = mca_bus;
return mca_bus;
}
int __init mca_system_init (void)
{
return bus_register(&mca_bus_type);
}
| gpl-2.0 |
awifi-dev/android_kernel_lge_awifi_test | arch/sparc/mm/extable.c | 13534 | 2252 | /*
* linux/arch/sparc/mm/extable.c
*/
#include <linux/module.h>
#include <asm/uaccess.h>
void sort_extable(struct exception_table_entry *start,
struct exception_table_entry *finish)
{
}
/* Caller knows they are in a range if ret->fixup == 0 */
const struct exception_table_entry *
search_extable(const struct exception_table_entry *start,
const struct exception_table_entry *last,
unsigned long value)
{
const struct exception_table_entry *walk;
/* Single insn entries are encoded as:
* word 1: insn address
* word 2: fixup code address
*
* Range entries are encoded as:
* word 1: first insn address
* word 2: 0
* word 3: last insn address + 4 bytes
* word 4: fixup code address
*
* Deleted entries are encoded as:
* word 1: unused
* word 2: -1
*
* See asm/uaccess.h for more details.
*/
/* 1. Try to find an exact match. */
for (walk = start; walk <= last; walk++) {
if (walk->fixup == 0) {
/* A range entry, skip both parts. */
walk++;
continue;
}
/* A deleted entry; see trim_init_extable */
if (walk->fixup == -1)
continue;
if (walk->insn == value)
return walk;
}
/* 2. Try to find a range match. */
for (walk = start; walk <= (last - 1); walk++) {
if (walk->fixup)
continue;
if (walk[0].insn <= value && walk[1].insn > value)
return walk;
walk++;
}
return NULL;
}
#ifdef CONFIG_MODULES
/* We could memmove them around; easier to mark the trimmed ones. */
void trim_init_extable(struct module *m)
{
unsigned int i;
bool range;
for (i = 0; i < m->num_exentries; i += range ? 2 : 1) {
range = m->extable[i].fixup == 0;
if (within_module_init(m->extable[i].insn, m)) {
m->extable[i].fixup = -1;
if (range)
m->extable[i+1].fixup = -1;
}
if (range)
i++;
}
}
#endif /* CONFIG_MODULES */
/* Special extable search, which handles ranges. Returns fixup */
unsigned long search_extables_range(unsigned long addr, unsigned long *g2)
{
const struct exception_table_entry *entry;
entry = search_exception_tables(addr);
if (!entry)
return 0;
/* Inside range? Fix g2 and return correct fixup */
if (!entry->fixup) {
*g2 = (addr - entry->insn) / 4;
return (entry + 1)->fixup;
}
return entry->fixup;
}
| gpl-2.0 |
lsigithub/axxia_yocto_linux_4.1 | drivers/mfd/lpc_ich.c | 223 | 30401 | /*
* lpc_ich.c - LPC interface for Intel ICH
*
* LPC bridge function of the Intel ICH contains many other
* functional units, such as Interrupt controllers, Timers,
* Power Management, System Management, GPIO, RTC, and LPC
* Configuration Registers.
*
* This driver is derived from lpc_sch.
* Copyright (c) 2011 Extreme Engineering Solution, Inc.
* Author: Aaron Sierra <asierra@xes-inc.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License 2 as published
* by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
* This driver supports the following I/O Controller hubs:
* (See the intel documentation on http://developer.intel.com.)
* document number 290655-003, 290677-014: 82801AA (ICH), 82801AB (ICHO)
* document number 290687-002, 298242-027: 82801BA (ICH2)
* document number 290733-003, 290739-013: 82801CA (ICH3-S)
* document number 290716-001, 290718-007: 82801CAM (ICH3-M)
* document number 290744-001, 290745-025: 82801DB (ICH4)
* document number 252337-001, 252663-008: 82801DBM (ICH4-M)
* document number 273599-001, 273645-002: 82801E (C-ICH)
* document number 252516-001, 252517-028: 82801EB (ICH5), 82801ER (ICH5R)
* document number 300641-004, 300884-013: 6300ESB
* document number 301473-002, 301474-026: 82801F (ICH6)
* document number 313082-001, 313075-006: 631xESB, 632xESB
* document number 307013-003, 307014-024: 82801G (ICH7)
* document number 322896-001, 322897-001: NM10
* document number 313056-003, 313057-017: 82801H (ICH8)
* document number 316972-004, 316973-012: 82801I (ICH9)
* document number 319973-002, 319974-002: 82801J (ICH10)
* document number 322169-001, 322170-003: 5 Series, 3400 Series (PCH)
* document number 320066-003, 320257-008: EP80597 (IICH)
* document number 324645-001, 324646-001: Cougar Point (CPT)
* document number TBD : Patsburg (PBG)
* document number TBD : DH89xxCC
* document number TBD : Panther Point
* document number TBD : Lynx Point
* document number TBD : Lynx Point-LP
* document number TBD : Wellsburg
* document number TBD : Avoton SoC
* document number TBD : Coleto Creek
* document number TBD : Wildcat Point-LP
* document number TBD : 9 Series
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/acpi.h>
#include <linux/pci.h>
#include <linux/mfd/core.h>
#include <linux/mfd/lpc_ich.h>
#define ACPIBASE 0x40
#define ACPIBASE_GPE_OFF 0x28
#define ACPIBASE_GPE_END 0x2f
#define ACPIBASE_SMI_OFF 0x30
#define ACPIBASE_SMI_END 0x33
#define ACPIBASE_PMC_OFF 0x08
#define ACPIBASE_PMC_END 0x0c
#define ACPIBASE_TCO_OFF 0x60
#define ACPIBASE_TCO_END 0x7f
#define ACPICTRL_PMCBASE 0x44
#define ACPIBASE_GCS_OFF 0x3410
#define ACPIBASE_GCS_END 0x3414
#define GPIOBASE_ICH0 0x58
#define GPIOCTRL_ICH0 0x5C
#define GPIOBASE_ICH6 0x48
#define GPIOCTRL_ICH6 0x4C
#define RCBABASE 0xf0
#define wdt_io_res(i) wdt_res(0, i)
#define wdt_mem_res(i) wdt_res(ICH_RES_MEM_OFF, i)
#define wdt_res(b, i) (&wdt_ich_res[(b) + (i)])
struct lpc_ich_priv {
int chipset;
int abase; /* ACPI base */
int actrl_pbase; /* ACPI control or PMC base */
int gbase; /* GPIO base */
int gctrl; /* GPIO control */
int abase_save; /* Cached ACPI base value */
int actrl_pbase_save; /* Cached ACPI control or PMC base value */
int gctrl_save; /* Cached GPIO control value */
};
static struct resource wdt_ich_res[] = {
/* ACPI - TCO */
{
.flags = IORESOURCE_IO,
},
/* ACPI - SMI */
{
.flags = IORESOURCE_IO,
},
/* GCS or PMC */
{
.flags = IORESOURCE_MEM,
},
};
static struct resource gpio_ich_res[] = {
/* GPIO */
{
.flags = IORESOURCE_IO,
},
/* ACPI - GPE0 */
{
.flags = IORESOURCE_IO,
},
};
enum lpc_cells {
LPC_WDT = 0,
LPC_GPIO,
};
static struct mfd_cell lpc_ich_cells[] = {
[LPC_WDT] = {
.name = "iTCO_wdt",
.num_resources = ARRAY_SIZE(wdt_ich_res),
.resources = wdt_ich_res,
.ignore_resource_conflicts = true,
},
[LPC_GPIO] = {
.name = "gpio_ich",
.num_resources = ARRAY_SIZE(gpio_ich_res),
.resources = gpio_ich_res,
.ignore_resource_conflicts = true,
},
};
/* chipset related info */
enum lpc_chipsets {
LPC_ICH = 0, /* ICH */
LPC_ICH0, /* ICH0 */
LPC_ICH2, /* ICH2 */
LPC_ICH2M, /* ICH2-M */
LPC_ICH3, /* ICH3-S */
LPC_ICH3M, /* ICH3-M */
LPC_ICH4, /* ICH4 */
LPC_ICH4M, /* ICH4-M */
LPC_CICH, /* C-ICH */
LPC_ICH5, /* ICH5 & ICH5R */
LPC_6300ESB, /* 6300ESB */
LPC_ICH6, /* ICH6 & ICH6R */
LPC_ICH6M, /* ICH6-M */
LPC_ICH6W, /* ICH6W & ICH6RW */
LPC_631XESB, /* 631xESB/632xESB */
LPC_ICH7, /* ICH7 & ICH7R */
LPC_ICH7DH, /* ICH7DH */
LPC_ICH7M, /* ICH7-M & ICH7-U */
LPC_ICH7MDH, /* ICH7-M DH */
LPC_NM10, /* NM10 */
LPC_ICH8, /* ICH8 & ICH8R */
LPC_ICH8DH, /* ICH8DH */
LPC_ICH8DO, /* ICH8DO */
LPC_ICH8M, /* ICH8M */
LPC_ICH8ME, /* ICH8M-E */
LPC_ICH9, /* ICH9 */
LPC_ICH9R, /* ICH9R */
LPC_ICH9DH, /* ICH9DH */
LPC_ICH9DO, /* ICH9DO */
LPC_ICH9M, /* ICH9M */
LPC_ICH9ME, /* ICH9M-E */
LPC_ICH10, /* ICH10 */
LPC_ICH10R, /* ICH10R */
LPC_ICH10D, /* ICH10D */
LPC_ICH10DO, /* ICH10DO */
LPC_PCH, /* PCH Desktop Full Featured */
LPC_PCHM, /* PCH Mobile Full Featured */
LPC_P55, /* P55 */
LPC_PM55, /* PM55 */
LPC_H55, /* H55 */
LPC_QM57, /* QM57 */
LPC_H57, /* H57 */
LPC_HM55, /* HM55 */
LPC_Q57, /* Q57 */
LPC_HM57, /* HM57 */
LPC_PCHMSFF, /* PCH Mobile SFF Full Featured */
LPC_QS57, /* QS57 */
LPC_3400, /* 3400 */
LPC_3420, /* 3420 */
LPC_3450, /* 3450 */
LPC_EP80579, /* EP80579 */
LPC_CPT, /* Cougar Point */
LPC_CPTD, /* Cougar Point Desktop */
LPC_CPTM, /* Cougar Point Mobile */
LPC_PBG, /* Patsburg */
LPC_DH89XXCC, /* DH89xxCC */
LPC_PPT, /* Panther Point */
LPC_LPT, /* Lynx Point */
LPC_LPT_LP, /* Lynx Point-LP */
LPC_WBG, /* Wellsburg */
LPC_AVN, /* Avoton SoC */
LPC_BAYTRAIL, /* Bay Trail SoC */
LPC_COLETO, /* Coleto Creek */
LPC_WPT_LP, /* Wildcat Point-LP */
LPC_BRASWELL, /* Braswell SoC */
LPC_9S, /* 9 Series */
};
static struct lpc_ich_info lpc_chipset_info[] = {
[LPC_ICH] = {
.name = "ICH",
.iTCO_version = 1,
},
[LPC_ICH0] = {
.name = "ICH0",
.iTCO_version = 1,
},
[LPC_ICH2] = {
.name = "ICH2",
.iTCO_version = 1,
},
[LPC_ICH2M] = {
.name = "ICH2-M",
.iTCO_version = 1,
},
[LPC_ICH3] = {
.name = "ICH3-S",
.iTCO_version = 1,
},
[LPC_ICH3M] = {
.name = "ICH3-M",
.iTCO_version = 1,
},
[LPC_ICH4] = {
.name = "ICH4",
.iTCO_version = 1,
},
[LPC_ICH4M] = {
.name = "ICH4-M",
.iTCO_version = 1,
},
[LPC_CICH] = {
.name = "C-ICH",
.iTCO_version = 1,
},
[LPC_ICH5] = {
.name = "ICH5 or ICH5R",
.iTCO_version = 1,
},
[LPC_6300ESB] = {
.name = "6300ESB",
.iTCO_version = 1,
},
[LPC_ICH6] = {
.name = "ICH6 or ICH6R",
.iTCO_version = 2,
.gpio_version = ICH_V6_GPIO,
},
[LPC_ICH6M] = {
.name = "ICH6-M",
.iTCO_version = 2,
.gpio_version = ICH_V6_GPIO,
},
[LPC_ICH6W] = {
.name = "ICH6W or ICH6RW",
.iTCO_version = 2,
.gpio_version = ICH_V6_GPIO,
},
[LPC_631XESB] = {
.name = "631xESB/632xESB",
.iTCO_version = 2,
.gpio_version = ICH_V6_GPIO,
},
[LPC_ICH7] = {
.name = "ICH7 or ICH7R",
.iTCO_version = 2,
.gpio_version = ICH_V7_GPIO,
},
[LPC_ICH7DH] = {
.name = "ICH7DH",
.iTCO_version = 2,
.gpio_version = ICH_V7_GPIO,
},
[LPC_ICH7M] = {
.name = "ICH7-M or ICH7-U",
.iTCO_version = 2,
.gpio_version = ICH_V7_GPIO,
},
[LPC_ICH7MDH] = {
.name = "ICH7-M DH",
.iTCO_version = 2,
.gpio_version = ICH_V7_GPIO,
},
[LPC_NM10] = {
.name = "NM10",
.iTCO_version = 2,
.gpio_version = ICH_V7_GPIO,
},
[LPC_ICH8] = {
.name = "ICH8 or ICH8R",
.iTCO_version = 2,
.gpio_version = ICH_V7_GPIO,
},
[LPC_ICH8DH] = {
.name = "ICH8DH",
.iTCO_version = 2,
.gpio_version = ICH_V7_GPIO,
},
[LPC_ICH8DO] = {
.name = "ICH8DO",
.iTCO_version = 2,
.gpio_version = ICH_V7_GPIO,
},
[LPC_ICH8M] = {
.name = "ICH8M",
.iTCO_version = 2,
.gpio_version = ICH_V7_GPIO,
},
[LPC_ICH8ME] = {
.name = "ICH8M-E",
.iTCO_version = 2,
.gpio_version = ICH_V7_GPIO,
},
[LPC_ICH9] = {
.name = "ICH9",
.iTCO_version = 2,
.gpio_version = ICH_V9_GPIO,
},
[LPC_ICH9R] = {
.name = "ICH9R",
.iTCO_version = 2,
.gpio_version = ICH_V9_GPIO,
},
[LPC_ICH9DH] = {
.name = "ICH9DH",
.iTCO_version = 2,
.gpio_version = ICH_V9_GPIO,
},
[LPC_ICH9DO] = {
.name = "ICH9DO",
.iTCO_version = 2,
.gpio_version = ICH_V9_GPIO,
},
[LPC_ICH9M] = {
.name = "ICH9M",
.iTCO_version = 2,
.gpio_version = ICH_V9_GPIO,
},
[LPC_ICH9ME] = {
.name = "ICH9M-E",
.iTCO_version = 2,
.gpio_version = ICH_V9_GPIO,
},
[LPC_ICH10] = {
.name = "ICH10",
.iTCO_version = 2,
.gpio_version = ICH_V10CONS_GPIO,
},
[LPC_ICH10R] = {
.name = "ICH10R",
.iTCO_version = 2,
.gpio_version = ICH_V10CONS_GPIO,
},
[LPC_ICH10D] = {
.name = "ICH10D",
.iTCO_version = 2,
.gpio_version = ICH_V10CORP_GPIO,
},
[LPC_ICH10DO] = {
.name = "ICH10DO",
.iTCO_version = 2,
.gpio_version = ICH_V10CORP_GPIO,
},
[LPC_PCH] = {
.name = "PCH Desktop Full Featured",
.iTCO_version = 2,
.gpio_version = ICH_V5_GPIO,
},
[LPC_PCHM] = {
.name = "PCH Mobile Full Featured",
.iTCO_version = 2,
.gpio_version = ICH_V5_GPIO,
},
[LPC_P55] = {
.name = "P55",
.iTCO_version = 2,
.gpio_version = ICH_V5_GPIO,
},
[LPC_PM55] = {
.name = "PM55",
.iTCO_version = 2,
.gpio_version = ICH_V5_GPIO,
},
[LPC_H55] = {
.name = "H55",
.iTCO_version = 2,
.gpio_version = ICH_V5_GPIO,
},
[LPC_QM57] = {
.name = "QM57",
.iTCO_version = 2,
.gpio_version = ICH_V5_GPIO,
},
[LPC_H57] = {
.name = "H57",
.iTCO_version = 2,
.gpio_version = ICH_V5_GPIO,
},
[LPC_HM55] = {
.name = "HM55",
.iTCO_version = 2,
.gpio_version = ICH_V5_GPIO,
},
[LPC_Q57] = {
.name = "Q57",
.iTCO_version = 2,
.gpio_version = ICH_V5_GPIO,
},
[LPC_HM57] = {
.name = "HM57",
.iTCO_version = 2,
.gpio_version = ICH_V5_GPIO,
},
[LPC_PCHMSFF] = {
.name = "PCH Mobile SFF Full Featured",
.iTCO_version = 2,
.gpio_version = ICH_V5_GPIO,
},
[LPC_QS57] = {
.name = "QS57",
.iTCO_version = 2,
.gpio_version = ICH_V5_GPIO,
},
[LPC_3400] = {
.name = "3400",
.iTCO_version = 2,
.gpio_version = ICH_V5_GPIO,
},
[LPC_3420] = {
.name = "3420",
.iTCO_version = 2,
.gpio_version = ICH_V5_GPIO,
},
[LPC_3450] = {
.name = "3450",
.iTCO_version = 2,
.gpio_version = ICH_V5_GPIO,
},
[LPC_EP80579] = {
.name = "EP80579",
.iTCO_version = 2,
},
[LPC_CPT] = {
.name = "Cougar Point",
.iTCO_version = 2,
.gpio_version = ICH_V5_GPIO,
},
[LPC_CPTD] = {
.name = "Cougar Point Desktop",
.iTCO_version = 2,
.gpio_version = ICH_V5_GPIO,
},
[LPC_CPTM] = {
.name = "Cougar Point Mobile",
.iTCO_version = 2,
.gpio_version = ICH_V5_GPIO,
},
[LPC_PBG] = {
.name = "Patsburg",
.iTCO_version = 2,
},
[LPC_DH89XXCC] = {
.name = "DH89xxCC",
.iTCO_version = 2,
},
[LPC_PPT] = {
.name = "Panther Point",
.iTCO_version = 2,
.gpio_version = ICH_V5_GPIO,
},
[LPC_LPT] = {
.name = "Lynx Point",
.iTCO_version = 2,
},
[LPC_LPT_LP] = {
.name = "Lynx Point_LP",
.iTCO_version = 2,
},
[LPC_WBG] = {
.name = "Wellsburg",
.iTCO_version = 2,
},
[LPC_AVN] = {
.name = "Avoton SoC",
.iTCO_version = 3,
.gpio_version = AVOTON_GPIO,
},
[LPC_BAYTRAIL] = {
.name = "Bay Trail SoC",
.iTCO_version = 3,
},
[LPC_COLETO] = {
.name = "Coleto Creek",
.iTCO_version = 2,
},
[LPC_WPT_LP] = {
.name = "Wildcat Point_LP",
.iTCO_version = 2,
},
[LPC_BRASWELL] = {
.name = "Braswell SoC",
.iTCO_version = 3,
},
[LPC_9S] = {
.name = "9 Series",
.iTCO_version = 2,
},
};
/*
* This data only exists for exporting the supported PCI ids
* via MODULE_DEVICE_TABLE. We do not actually register a
* pci_driver, because the I/O Controller Hub has also other
* functions that probably will be registered by other drivers.
*/
static const struct pci_device_id lpc_ich_ids[] = {
{ PCI_VDEVICE(INTEL, 0x0f1c), LPC_BAYTRAIL},
{ PCI_VDEVICE(INTEL, 0x1c41), LPC_CPT},
{ PCI_VDEVICE(INTEL, 0x1c42), LPC_CPTD},
{ PCI_VDEVICE(INTEL, 0x1c43), LPC_CPTM},
{ PCI_VDEVICE(INTEL, 0x1c44), LPC_CPT},
{ PCI_VDEVICE(INTEL, 0x1c45), LPC_CPT},
{ PCI_VDEVICE(INTEL, 0x1c46), LPC_CPT},
{ PCI_VDEVICE(INTEL, 0x1c47), LPC_CPT},
{ PCI_VDEVICE(INTEL, 0x1c48), LPC_CPT},
{ PCI_VDEVICE(INTEL, 0x1c49), LPC_CPT},
{ PCI_VDEVICE(INTEL, 0x1c4a), LPC_CPT},
{ PCI_VDEVICE(INTEL, 0x1c4b), LPC_CPT},
{ PCI_VDEVICE(INTEL, 0x1c4c), LPC_CPT},
{ PCI_VDEVICE(INTEL, 0x1c4d), LPC_CPT},
{ PCI_VDEVICE(INTEL, 0x1c4e), LPC_CPT},
{ PCI_VDEVICE(INTEL, 0x1c4f), LPC_CPT},
{ PCI_VDEVICE(INTEL, 0x1c50), LPC_CPT},
{ PCI_VDEVICE(INTEL, 0x1c51), LPC_CPT},
{ PCI_VDEVICE(INTEL, 0x1c52), LPC_CPT},
{ PCI_VDEVICE(INTEL, 0x1c53), LPC_CPT},
{ PCI_VDEVICE(INTEL, 0x1c54), LPC_CPT},
{ PCI_VDEVICE(INTEL, 0x1c55), LPC_CPT},
{ PCI_VDEVICE(INTEL, 0x1c56), LPC_CPT},
{ PCI_VDEVICE(INTEL, 0x1c57), LPC_CPT},
{ PCI_VDEVICE(INTEL, 0x1c58), LPC_CPT},
{ PCI_VDEVICE(INTEL, 0x1c59), LPC_CPT},
{ PCI_VDEVICE(INTEL, 0x1c5a), LPC_CPT},
{ PCI_VDEVICE(INTEL, 0x1c5b), LPC_CPT},
{ PCI_VDEVICE(INTEL, 0x1c5c), LPC_CPT},
{ PCI_VDEVICE(INTEL, 0x1c5d), LPC_CPT},
{ PCI_VDEVICE(INTEL, 0x1c5e), LPC_CPT},
{ PCI_VDEVICE(INTEL, 0x1c5f), LPC_CPT},
{ PCI_VDEVICE(INTEL, 0x1d40), LPC_PBG},
{ PCI_VDEVICE(INTEL, 0x1d41), LPC_PBG},
{ PCI_VDEVICE(INTEL, 0x1e40), LPC_PPT},
{ PCI_VDEVICE(INTEL, 0x1e41), LPC_PPT},
{ PCI_VDEVICE(INTEL, 0x1e42), LPC_PPT},
{ PCI_VDEVICE(INTEL, 0x1e43), LPC_PPT},
{ PCI_VDEVICE(INTEL, 0x1e44), LPC_PPT},
{ PCI_VDEVICE(INTEL, 0x1e45), LPC_PPT},
{ PCI_VDEVICE(INTEL, 0x1e46), LPC_PPT},
{ PCI_VDEVICE(INTEL, 0x1e47), LPC_PPT},
{ PCI_VDEVICE(INTEL, 0x1e48), LPC_PPT},
{ PCI_VDEVICE(INTEL, 0x1e49), LPC_PPT},
{ PCI_VDEVICE(INTEL, 0x1e4a), LPC_PPT},
{ PCI_VDEVICE(INTEL, 0x1e4b), LPC_PPT},
{ PCI_VDEVICE(INTEL, 0x1e4c), LPC_PPT},
{ PCI_VDEVICE(INTEL, 0x1e4d), LPC_PPT},
{ PCI_VDEVICE(INTEL, 0x1e4e), LPC_PPT},
{ PCI_VDEVICE(INTEL, 0x1e4f), LPC_PPT},
{ PCI_VDEVICE(INTEL, 0x1e50), LPC_PPT},
{ PCI_VDEVICE(INTEL, 0x1e51), LPC_PPT},
{ PCI_VDEVICE(INTEL, 0x1e52), LPC_PPT},
{ PCI_VDEVICE(INTEL, 0x1e53), LPC_PPT},
{ PCI_VDEVICE(INTEL, 0x1e54), LPC_PPT},
{ PCI_VDEVICE(INTEL, 0x1e55), LPC_PPT},
{ PCI_VDEVICE(INTEL, 0x1e56), LPC_PPT},
{ PCI_VDEVICE(INTEL, 0x1e57), LPC_PPT},
{ PCI_VDEVICE(INTEL, 0x1e58), LPC_PPT},
{ PCI_VDEVICE(INTEL, 0x1e59), LPC_PPT},
{ PCI_VDEVICE(INTEL, 0x1e5a), LPC_PPT},
{ PCI_VDEVICE(INTEL, 0x1e5b), LPC_PPT},
{ PCI_VDEVICE(INTEL, 0x1e5c), LPC_PPT},
{ PCI_VDEVICE(INTEL, 0x1e5d), LPC_PPT},
{ PCI_VDEVICE(INTEL, 0x1e5e), LPC_PPT},
{ PCI_VDEVICE(INTEL, 0x1e5f), LPC_PPT},
{ PCI_VDEVICE(INTEL, 0x1f38), LPC_AVN},
{ PCI_VDEVICE(INTEL, 0x1f39), LPC_AVN},
{ PCI_VDEVICE(INTEL, 0x1f3a), LPC_AVN},
{ PCI_VDEVICE(INTEL, 0x1f3b), LPC_AVN},
{ PCI_VDEVICE(INTEL, 0x229c), LPC_BRASWELL},
{ PCI_VDEVICE(INTEL, 0x2310), LPC_DH89XXCC},
{ PCI_VDEVICE(INTEL, 0x2390), LPC_COLETO},
{ PCI_VDEVICE(INTEL, 0x2410), LPC_ICH},
{ PCI_VDEVICE(INTEL, 0x2420), LPC_ICH0},
{ PCI_VDEVICE(INTEL, 0x2440), LPC_ICH2},
{ PCI_VDEVICE(INTEL, 0x244c), LPC_ICH2M},
{ PCI_VDEVICE(INTEL, 0x2450), LPC_CICH},
{ PCI_VDEVICE(INTEL, 0x2480), LPC_ICH3},
{ PCI_VDEVICE(INTEL, 0x248c), LPC_ICH3M},
{ PCI_VDEVICE(INTEL, 0x24c0), LPC_ICH4},
{ PCI_VDEVICE(INTEL, 0x24cc), LPC_ICH4M},
{ PCI_VDEVICE(INTEL, 0x24d0), LPC_ICH5},
{ PCI_VDEVICE(INTEL, 0x25a1), LPC_6300ESB},
{ PCI_VDEVICE(INTEL, 0x2640), LPC_ICH6},
{ PCI_VDEVICE(INTEL, 0x2641), LPC_ICH6M},
{ PCI_VDEVICE(INTEL, 0x2642), LPC_ICH6W},
{ PCI_VDEVICE(INTEL, 0x2670), LPC_631XESB},
{ PCI_VDEVICE(INTEL, 0x2671), LPC_631XESB},
{ PCI_VDEVICE(INTEL, 0x2672), LPC_631XESB},
{ PCI_VDEVICE(INTEL, 0x2673), LPC_631XESB},
{ PCI_VDEVICE(INTEL, 0x2674), LPC_631XESB},
{ PCI_VDEVICE(INTEL, 0x2675), LPC_631XESB},
{ PCI_VDEVICE(INTEL, 0x2676), LPC_631XESB},
{ PCI_VDEVICE(INTEL, 0x2677), LPC_631XESB},
{ PCI_VDEVICE(INTEL, 0x2678), LPC_631XESB},
{ PCI_VDEVICE(INTEL, 0x2679), LPC_631XESB},
{ PCI_VDEVICE(INTEL, 0x267a), LPC_631XESB},
{ PCI_VDEVICE(INTEL, 0x267b), LPC_631XESB},
{ PCI_VDEVICE(INTEL, 0x267c), LPC_631XESB},
{ PCI_VDEVICE(INTEL, 0x267d), LPC_631XESB},
{ PCI_VDEVICE(INTEL, 0x267e), LPC_631XESB},
{ PCI_VDEVICE(INTEL, 0x267f), LPC_631XESB},
{ PCI_VDEVICE(INTEL, 0x27b0), LPC_ICH7DH},
{ PCI_VDEVICE(INTEL, 0x27b8), LPC_ICH7},
{ PCI_VDEVICE(INTEL, 0x27b9), LPC_ICH7M},
{ PCI_VDEVICE(INTEL, 0x27bc), LPC_NM10},
{ PCI_VDEVICE(INTEL, 0x27bd), LPC_ICH7MDH},
{ PCI_VDEVICE(INTEL, 0x2810), LPC_ICH8},
{ PCI_VDEVICE(INTEL, 0x2811), LPC_ICH8ME},
{ PCI_VDEVICE(INTEL, 0x2812), LPC_ICH8DH},
{ PCI_VDEVICE(INTEL, 0x2814), LPC_ICH8DO},
{ PCI_VDEVICE(INTEL, 0x2815), LPC_ICH8M},
{ PCI_VDEVICE(INTEL, 0x2912), LPC_ICH9DH},
{ PCI_VDEVICE(INTEL, 0x2914), LPC_ICH9DO},
{ PCI_VDEVICE(INTEL, 0x2916), LPC_ICH9R},
{ PCI_VDEVICE(INTEL, 0x2917), LPC_ICH9ME},
{ PCI_VDEVICE(INTEL, 0x2918), LPC_ICH9},
{ PCI_VDEVICE(INTEL, 0x2919), LPC_ICH9M},
{ PCI_VDEVICE(INTEL, 0x3a14), LPC_ICH10DO},
{ PCI_VDEVICE(INTEL, 0x3a16), LPC_ICH10R},
{ PCI_VDEVICE(INTEL, 0x3a18), LPC_ICH10},
{ PCI_VDEVICE(INTEL, 0x3a1a), LPC_ICH10D},
{ PCI_VDEVICE(INTEL, 0x3b00), LPC_PCH},
{ PCI_VDEVICE(INTEL, 0x3b01), LPC_PCHM},
{ PCI_VDEVICE(INTEL, 0x3b02), LPC_P55},
{ PCI_VDEVICE(INTEL, 0x3b03), LPC_PM55},
{ PCI_VDEVICE(INTEL, 0x3b06), LPC_H55},
{ PCI_VDEVICE(INTEL, 0x3b07), LPC_QM57},
{ PCI_VDEVICE(INTEL, 0x3b08), LPC_H57},
{ PCI_VDEVICE(INTEL, 0x3b09), LPC_HM55},
{ PCI_VDEVICE(INTEL, 0x3b0a), LPC_Q57},
{ PCI_VDEVICE(INTEL, 0x3b0b), LPC_HM57},
{ PCI_VDEVICE(INTEL, 0x3b0d), LPC_PCHMSFF},
{ PCI_VDEVICE(INTEL, 0x3b0f), LPC_QS57},
{ PCI_VDEVICE(INTEL, 0x3b12), LPC_3400},
{ PCI_VDEVICE(INTEL, 0x3b14), LPC_3420},
{ PCI_VDEVICE(INTEL, 0x3b16), LPC_3450},
{ PCI_VDEVICE(INTEL, 0x5031), LPC_EP80579},
{ PCI_VDEVICE(INTEL, 0x8c40), LPC_LPT},
{ PCI_VDEVICE(INTEL, 0x8c41), LPC_LPT},
{ PCI_VDEVICE(INTEL, 0x8c42), LPC_LPT},
{ PCI_VDEVICE(INTEL, 0x8c43), LPC_LPT},
{ PCI_VDEVICE(INTEL, 0x8c44), LPC_LPT},
{ PCI_VDEVICE(INTEL, 0x8c45), LPC_LPT},
{ PCI_VDEVICE(INTEL, 0x8c46), LPC_LPT},
{ PCI_VDEVICE(INTEL, 0x8c47), LPC_LPT},
{ PCI_VDEVICE(INTEL, 0x8c48), LPC_LPT},
{ PCI_VDEVICE(INTEL, 0x8c49), LPC_LPT},
{ PCI_VDEVICE(INTEL, 0x8c4a), LPC_LPT},
{ PCI_VDEVICE(INTEL, 0x8c4b), LPC_LPT},
{ PCI_VDEVICE(INTEL, 0x8c4c), LPC_LPT},
{ PCI_VDEVICE(INTEL, 0x8c4d), LPC_LPT},
{ PCI_VDEVICE(INTEL, 0x8c4e), LPC_LPT},
{ PCI_VDEVICE(INTEL, 0x8c4f), LPC_LPT},
{ PCI_VDEVICE(INTEL, 0x8c50), LPC_LPT},
{ PCI_VDEVICE(INTEL, 0x8c51), LPC_LPT},
{ PCI_VDEVICE(INTEL, 0x8c52), LPC_LPT},
{ PCI_VDEVICE(INTEL, 0x8c53), LPC_LPT},
{ PCI_VDEVICE(INTEL, 0x8c54), LPC_LPT},
{ PCI_VDEVICE(INTEL, 0x8c55), LPC_LPT},
{ PCI_VDEVICE(INTEL, 0x8c56), LPC_LPT},
{ PCI_VDEVICE(INTEL, 0x8c57), LPC_LPT},
{ PCI_VDEVICE(INTEL, 0x8c58), LPC_LPT},
{ PCI_VDEVICE(INTEL, 0x8c59), LPC_LPT},
{ PCI_VDEVICE(INTEL, 0x8c5a), LPC_LPT},
{ PCI_VDEVICE(INTEL, 0x8c5b), LPC_LPT},
{ PCI_VDEVICE(INTEL, 0x8c5c), LPC_LPT},
{ PCI_VDEVICE(INTEL, 0x8c5d), LPC_LPT},
{ PCI_VDEVICE(INTEL, 0x8c5e), LPC_LPT},
{ PCI_VDEVICE(INTEL, 0x8c5f), LPC_LPT},
{ PCI_VDEVICE(INTEL, 0x8cc1), LPC_9S},
{ PCI_VDEVICE(INTEL, 0x8cc2), LPC_9S},
{ PCI_VDEVICE(INTEL, 0x8cc3), LPC_9S},
{ PCI_VDEVICE(INTEL, 0x8cc4), LPC_9S},
{ PCI_VDEVICE(INTEL, 0x8cc6), LPC_9S},
{ PCI_VDEVICE(INTEL, 0x8d40), LPC_WBG},
{ PCI_VDEVICE(INTEL, 0x8d41), LPC_WBG},
{ PCI_VDEVICE(INTEL, 0x8d42), LPC_WBG},
{ PCI_VDEVICE(INTEL, 0x8d43), LPC_WBG},
{ PCI_VDEVICE(INTEL, 0x8d44), LPC_WBG},
{ PCI_VDEVICE(INTEL, 0x8d45), LPC_WBG},
{ PCI_VDEVICE(INTEL, 0x8d46), LPC_WBG},
{ PCI_VDEVICE(INTEL, 0x8d47), LPC_WBG},
{ PCI_VDEVICE(INTEL, 0x8d48), LPC_WBG},
{ PCI_VDEVICE(INTEL, 0x8d49), LPC_WBG},
{ PCI_VDEVICE(INTEL, 0x8d4a), LPC_WBG},
{ PCI_VDEVICE(INTEL, 0x8d4b), LPC_WBG},
{ PCI_VDEVICE(INTEL, 0x8d4c), LPC_WBG},
{ PCI_VDEVICE(INTEL, 0x8d4d), LPC_WBG},
{ PCI_VDEVICE(INTEL, 0x8d4e), LPC_WBG},
{ PCI_VDEVICE(INTEL, 0x8d4f), LPC_WBG},
{ PCI_VDEVICE(INTEL, 0x8d50), LPC_WBG},
{ PCI_VDEVICE(INTEL, 0x8d51), LPC_WBG},
{ PCI_VDEVICE(INTEL, 0x8d52), LPC_WBG},
{ PCI_VDEVICE(INTEL, 0x8d53), LPC_WBG},
{ PCI_VDEVICE(INTEL, 0x8d54), LPC_WBG},
{ PCI_VDEVICE(INTEL, 0x8d55), LPC_WBG},
{ PCI_VDEVICE(INTEL, 0x8d56), LPC_WBG},
{ PCI_VDEVICE(INTEL, 0x8d57), LPC_WBG},
{ PCI_VDEVICE(INTEL, 0x8d58), LPC_WBG},
{ PCI_VDEVICE(INTEL, 0x8d59), LPC_WBG},
{ PCI_VDEVICE(INTEL, 0x8d5a), LPC_WBG},
{ PCI_VDEVICE(INTEL, 0x8d5b), LPC_WBG},
{ PCI_VDEVICE(INTEL, 0x8d5c), LPC_WBG},
{ PCI_VDEVICE(INTEL, 0x8d5d), LPC_WBG},
{ PCI_VDEVICE(INTEL, 0x8d5e), LPC_WBG},
{ PCI_VDEVICE(INTEL, 0x8d5f), LPC_WBG},
{ PCI_VDEVICE(INTEL, 0x9c40), LPC_LPT_LP},
{ PCI_VDEVICE(INTEL, 0x9c41), LPC_LPT_LP},
{ PCI_VDEVICE(INTEL, 0x9c42), LPC_LPT_LP},
{ PCI_VDEVICE(INTEL, 0x9c43), LPC_LPT_LP},
{ PCI_VDEVICE(INTEL, 0x9c44), LPC_LPT_LP},
{ PCI_VDEVICE(INTEL, 0x9c45), LPC_LPT_LP},
{ PCI_VDEVICE(INTEL, 0x9c46), LPC_LPT_LP},
{ PCI_VDEVICE(INTEL, 0x9c47), LPC_LPT_LP},
{ PCI_VDEVICE(INTEL, 0x9cc1), LPC_WPT_LP},
{ PCI_VDEVICE(INTEL, 0x9cc2), LPC_WPT_LP},
{ PCI_VDEVICE(INTEL, 0x9cc3), LPC_WPT_LP},
{ PCI_VDEVICE(INTEL, 0x9cc5), LPC_WPT_LP},
{ PCI_VDEVICE(INTEL, 0x9cc6), LPC_WPT_LP},
{ PCI_VDEVICE(INTEL, 0x9cc7), LPC_WPT_LP},
{ PCI_VDEVICE(INTEL, 0x9cc9), LPC_WPT_LP},
{ 0, }, /* End of list */
};
MODULE_DEVICE_TABLE(pci, lpc_ich_ids);
static void lpc_ich_restore_config_space(struct pci_dev *dev)
{
struct lpc_ich_priv *priv = pci_get_drvdata(dev);
if (priv->abase_save >= 0) {
pci_write_config_byte(dev, priv->abase, priv->abase_save);
priv->abase_save = -1;
}
if (priv->actrl_pbase_save >= 0) {
pci_write_config_byte(dev, priv->actrl_pbase,
priv->actrl_pbase_save);
priv->actrl_pbase_save = -1;
}
if (priv->gctrl_save >= 0) {
pci_write_config_byte(dev, priv->gctrl, priv->gctrl_save);
priv->gctrl_save = -1;
}
}
static void lpc_ich_enable_acpi_space(struct pci_dev *dev)
{
struct lpc_ich_priv *priv = pci_get_drvdata(dev);
u8 reg_save;
switch (lpc_chipset_info[priv->chipset].iTCO_version) {
case 3:
/*
* Some chipsets (eg Avoton) enable the ACPI space in the
* ACPI BASE register.
*/
pci_read_config_byte(dev, priv->abase, ®_save);
pci_write_config_byte(dev, priv->abase, reg_save | 0x2);
priv->abase_save = reg_save;
break;
default:
/*
* Most chipsets enable the ACPI space in the ACPI control
* register.
*/
pci_read_config_byte(dev, priv->actrl_pbase, ®_save);
pci_write_config_byte(dev, priv->actrl_pbase, reg_save | 0x80);
priv->actrl_pbase_save = reg_save;
break;
}
}
static void lpc_ich_enable_gpio_space(struct pci_dev *dev)
{
struct lpc_ich_priv *priv = pci_get_drvdata(dev);
u8 reg_save;
pci_read_config_byte(dev, priv->gctrl, ®_save);
pci_write_config_byte(dev, priv->gctrl, reg_save | 0x10);
priv->gctrl_save = reg_save;
}
static void lpc_ich_enable_pmc_space(struct pci_dev *dev)
{
struct lpc_ich_priv *priv = pci_get_drvdata(dev);
u8 reg_save;
pci_read_config_byte(dev, priv->actrl_pbase, ®_save);
pci_write_config_byte(dev, priv->actrl_pbase, reg_save | 0x2);
priv->actrl_pbase_save = reg_save;
}
static void lpc_ich_finalize_cell(struct pci_dev *dev, struct mfd_cell *cell)
{
struct lpc_ich_priv *priv = pci_get_drvdata(dev);
cell->platform_data = &lpc_chipset_info[priv->chipset];
cell->pdata_size = sizeof(struct lpc_ich_info);
}
/*
* We don't check for resource conflict globally. There are 2 or 3 independent
* GPIO groups and it's enough to have access to one of these to instantiate
* the device.
*/
static int lpc_ich_check_conflict_gpio(struct resource *res)
{
int ret;
u8 use_gpio = 0;
if (resource_size(res) >= 0x50 &&
!acpi_check_region(res->start + 0x40, 0x10, "LPC ICH GPIO3"))
use_gpio |= 1 << 2;
if (!acpi_check_region(res->start + 0x30, 0x10, "LPC ICH GPIO2"))
use_gpio |= 1 << 1;
ret = acpi_check_region(res->start + 0x00, 0x30, "LPC ICH GPIO1");
if (!ret)
use_gpio |= 1 << 0;
return use_gpio ? use_gpio : ret;
}
static int lpc_ich_init_gpio(struct pci_dev *dev)
{
struct lpc_ich_priv *priv = pci_get_drvdata(dev);
u32 base_addr_cfg;
u32 base_addr;
int ret;
bool acpi_conflict = false;
struct resource *res;
/* Setup power management base register */
pci_read_config_dword(dev, priv->abase, &base_addr_cfg);
base_addr = base_addr_cfg & 0x0000ff80;
if (!base_addr) {
dev_notice(&dev->dev, "I/O space for ACPI uninitialized\n");
lpc_ich_cells[LPC_GPIO].num_resources--;
goto gpe0_done;
}
res = &gpio_ich_res[ICH_RES_GPE0];
res->start = base_addr + ACPIBASE_GPE_OFF;
res->end = base_addr + ACPIBASE_GPE_END;
ret = acpi_check_resource_conflict(res);
if (ret) {
/*
* This isn't fatal for the GPIO, but we have to make sure that
* the platform_device subsystem doesn't see this resource
* or it will register an invalid region.
*/
lpc_ich_cells[LPC_GPIO].num_resources--;
acpi_conflict = true;
} else {
lpc_ich_enable_acpi_space(dev);
}
gpe0_done:
/* Setup GPIO base register */
pci_read_config_dword(dev, priv->gbase, &base_addr_cfg);
base_addr = base_addr_cfg & 0x0000ff80;
if (!base_addr) {
dev_notice(&dev->dev, "I/O space for GPIO uninitialized\n");
ret = -ENODEV;
goto gpio_done;
}
/* Older devices provide fewer GPIO and have a smaller resource size. */
res = &gpio_ich_res[ICH_RES_GPIO];
res->start = base_addr;
switch (lpc_chipset_info[priv->chipset].gpio_version) {
case ICH_V5_GPIO:
case ICH_V10CORP_GPIO:
res->end = res->start + 128 - 1;
break;
default:
res->end = res->start + 64 - 1;
break;
}
ret = lpc_ich_check_conflict_gpio(res);
if (ret < 0) {
/* this isn't necessarily fatal for the GPIO */
acpi_conflict = true;
goto gpio_done;
}
lpc_chipset_info[priv->chipset].use_gpio = ret;
lpc_ich_enable_gpio_space(dev);
lpc_ich_finalize_cell(dev, &lpc_ich_cells[LPC_GPIO]);
ret = mfd_add_devices(&dev->dev, -1, &lpc_ich_cells[LPC_GPIO],
1, NULL, 0, NULL);
gpio_done:
if (acpi_conflict)
pr_warn("Resource conflict(s) found affecting %s\n",
lpc_ich_cells[LPC_GPIO].name);
return ret;
}
static int lpc_ich_init_wdt(struct pci_dev *dev)
{
struct lpc_ich_priv *priv = pci_get_drvdata(dev);
u32 base_addr_cfg;
u32 base_addr;
int ret;
struct resource *res;
/* Setup power management base register */
pci_read_config_dword(dev, priv->abase, &base_addr_cfg);
base_addr = base_addr_cfg & 0x0000ff80;
if (!base_addr) {
dev_notice(&dev->dev, "I/O space for ACPI uninitialized\n");
ret = -ENODEV;
goto wdt_done;
}
res = wdt_io_res(ICH_RES_IO_TCO);
res->start = base_addr + ACPIBASE_TCO_OFF;
res->end = base_addr + ACPIBASE_TCO_END;
res = wdt_io_res(ICH_RES_IO_SMI);
res->start = base_addr + ACPIBASE_SMI_OFF;
res->end = base_addr + ACPIBASE_SMI_END;
lpc_ich_enable_acpi_space(dev);
/*
* iTCO v2:
* Get the Memory-Mapped GCS register. To get access to it
* we have to read RCBA from PCI Config space 0xf0 and use
* it as base. GCS = RCBA + ICH6_GCS(0x3410).
*
* iTCO v3:
* Get the Power Management Configuration register. To get access
* to it we have to read the PMC BASE from config space and address
* the register at offset 0x8.
*/
if (lpc_chipset_info[priv->chipset].iTCO_version == 1) {
/* Don't register iomem for TCO ver 1 */
lpc_ich_cells[LPC_WDT].num_resources--;
} else if (lpc_chipset_info[priv->chipset].iTCO_version == 2) {
pci_read_config_dword(dev, RCBABASE, &base_addr_cfg);
base_addr = base_addr_cfg & 0xffffc000;
if (!(base_addr_cfg & 1)) {
dev_notice(&dev->dev, "RCBA is disabled by "
"hardware/BIOS, device disabled\n");
ret = -ENODEV;
goto wdt_done;
}
res = wdt_mem_res(ICH_RES_MEM_GCS_PMC);
res->start = base_addr + ACPIBASE_GCS_OFF;
res->end = base_addr + ACPIBASE_GCS_END;
} else if (lpc_chipset_info[priv->chipset].iTCO_version == 3) {
lpc_ich_enable_pmc_space(dev);
pci_read_config_dword(dev, ACPICTRL_PMCBASE, &base_addr_cfg);
base_addr = base_addr_cfg & 0xfffffe00;
res = wdt_mem_res(ICH_RES_MEM_GCS_PMC);
res->start = base_addr + ACPIBASE_PMC_OFF;
res->end = base_addr + ACPIBASE_PMC_END;
}
lpc_ich_finalize_cell(dev, &lpc_ich_cells[LPC_WDT]);
ret = mfd_add_devices(&dev->dev, -1, &lpc_ich_cells[LPC_WDT],
1, NULL, 0, NULL);
wdt_done:
return ret;
}
static int lpc_ich_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
struct lpc_ich_priv *priv;
int ret;
bool cell_added = false;
priv = devm_kzalloc(&dev->dev,
sizeof(struct lpc_ich_priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->chipset = id->driver_data;
priv->actrl_pbase_save = -1;
priv->abase_save = -1;
priv->abase = ACPIBASE;
priv->actrl_pbase = ACPICTRL_PMCBASE;
priv->gctrl_save = -1;
if (priv->chipset <= LPC_ICH5) {
priv->gbase = GPIOBASE_ICH0;
priv->gctrl = GPIOCTRL_ICH0;
} else {
priv->gbase = GPIOBASE_ICH6;
priv->gctrl = GPIOCTRL_ICH6;
}
pci_set_drvdata(dev, priv);
if (lpc_chipset_info[priv->chipset].iTCO_version) {
ret = lpc_ich_init_wdt(dev);
if (!ret)
cell_added = true;
}
if (lpc_chipset_info[priv->chipset].gpio_version) {
ret = lpc_ich_init_gpio(dev);
if (!ret)
cell_added = true;
}
/*
* We only care if at least one or none of the cells registered
* successfully.
*/
if (!cell_added) {
dev_warn(&dev->dev, "No MFD cells added\n");
lpc_ich_restore_config_space(dev);
return -ENODEV;
}
return 0;
}
static void lpc_ich_remove(struct pci_dev *dev)
{
mfd_remove_devices(&dev->dev);
lpc_ich_restore_config_space(dev);
}
static struct pci_driver lpc_ich_driver = {
.name = "lpc_ich",
.id_table = lpc_ich_ids,
.probe = lpc_ich_probe,
.remove = lpc_ich_remove,
};
module_pci_driver(lpc_ich_driver);
MODULE_AUTHOR("Aaron Sierra <asierra@xes-inc.com>");
MODULE_DESCRIPTION("LPC interface for Intel ICH");
MODULE_LICENSE("GPL");
| gpl-2.0 |
aosm/gcc | gcc/testsuite/gcc.c-torture/execute/20020226-1.c | 223 | 2372 | /* This tests the rotate patterns that some machines support. */
#include <limits.h>
#ifndef CHAR_BIT
#define CHAR_BIT 8
#endif
#define ROR(a,b) (((a) >> (b)) | ((a) << ((sizeof (a) * CHAR_BIT) - (b))))
#define ROL(a,b) (((a) << (b)) | ((a) >> ((sizeof (a) * CHAR_BIT) - (b))))
#define CHAR_VALUE ((unsigned char)0x1234U)
#define SHORT_VALUE ((unsigned short)0x1234U)
#define INT_VALUE 0x1234U
#define LONG_VALUE 0x12345678LU
#define LL_VALUE 0x12345678abcdef0LLU
#define SHIFT1 4
#define SHIFT2 ((sizeof (long long) * CHAR_BIT) - SHIFT1)
unsigned char uc = CHAR_VALUE;
unsigned short us = SHORT_VALUE;
unsigned int ui = INT_VALUE;
unsigned long ul = LONG_VALUE;
unsigned long long ull = LL_VALUE;
int shift1 = SHIFT1;
int shift2 = SHIFT2;
main ()
{
if (ROR (uc, shift1) != ROR (CHAR_VALUE, SHIFT1))
abort ();
if (ROR (uc, SHIFT1) != ROR (CHAR_VALUE, SHIFT1))
abort ();
if (ROR (us, shift1) != ROR (SHORT_VALUE, SHIFT1))
abort ();
if (ROR (us, SHIFT1) != ROR (SHORT_VALUE, SHIFT1))
abort ();
if (ROR (ui, shift1) != ROR (INT_VALUE, SHIFT1))
abort ();
if (ROR (ui, SHIFT1) != ROR (INT_VALUE, SHIFT1))
abort ();
if (ROR (ul, shift1) != ROR (LONG_VALUE, SHIFT1))
abort ();
if (ROR (ul, SHIFT1) != ROR (LONG_VALUE, SHIFT1))
abort ();
if (ROR (ull, shift1) != ROR (LL_VALUE, SHIFT1))
abort ();
if (ROR (ull, SHIFT1) != ROR (LL_VALUE, SHIFT1))
abort ();
if (ROR (ull, shift2) != ROR (LL_VALUE, SHIFT2))
abort ();
if (ROR (ull, SHIFT2) != ROR (LL_VALUE, SHIFT2))
abort ();
if (ROL (uc, shift1) != ROL (CHAR_VALUE, SHIFT1))
abort ();
if (ROL (uc, SHIFT1) != ROL (CHAR_VALUE, SHIFT1))
abort ();
if (ROL (us, shift1) != ROL (SHORT_VALUE, SHIFT1))
abort ();
if (ROL (us, SHIFT1) != ROL (SHORT_VALUE, SHIFT1))
abort ();
if (ROL (ui, shift1) != ROL (INT_VALUE, SHIFT1))
abort ();
if (ROL (ui, SHIFT1) != ROL (INT_VALUE, SHIFT1))
abort ();
if (ROL (ul, shift1) != ROL (LONG_VALUE, SHIFT1))
abort ();
if (ROL (ul, SHIFT1) != ROL (LONG_VALUE, SHIFT1))
abort ();
if (ROL (ull, shift1) != ROL (LL_VALUE, SHIFT1))
abort ();
if (ROL (ull, SHIFT1) != ROL (LL_VALUE, SHIFT1))
abort ();
if (ROL (ull, shift2) != ROL (LL_VALUE, SHIFT2))
abort ();
if (ROL (ull, SHIFT2) != ROL (LL_VALUE, SHIFT2))
abort ();
exit (0);
}
| gpl-2.0 |
kaylorchen/Linux_for_mini2440 | net/decnet/dn_dev.c | 479 | 33778 | /*
* DECnet An implementation of the DECnet protocol suite for the LINUX
* operating system. DECnet is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* DECnet Device Layer
*
* Authors: Steve Whitehouse <SteveW@ACM.org>
* Eduardo Marcelo Serrat <emserrat@geocities.com>
*
* Changes:
* Steve Whitehouse : Devices now see incoming frames so they
* can mark on who it came from.
* Steve Whitehouse : Fixed bug in creating neighbours. Each neighbour
* can now have a device specific setup func.
* Steve Whitehouse : Added /proc/sys/net/decnet/conf/<dev>/
* Steve Whitehouse : Fixed bug which sometimes killed timer
* Steve Whitehouse : Multiple ifaddr support
* Steve Whitehouse : SIOCGIFCONF is now a compile time option
* Steve Whitehouse : /proc/sys/net/decnet/conf/<sys>/forwarding
* Steve Whitehouse : Removed timer1 - it's a user space issue now
* Patrick Caulfield : Fixed router hello message format
* Steve Whitehouse : Got rid of constant sizes for blksize for
* devices. All mtu based now.
*/
#include <linux/capability.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/net.h>
#include <linux/netdevice.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/if_addr.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/skbuff.h>
#include <linux/sysctl.h>
#include <linux/notifier.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include <net/net_namespace.h>
#include <net/neighbour.h>
#include <net/dst.h>
#include <net/flow.h>
#include <net/fib_rules.h>
#include <net/netlink.h>
#include <net/dn.h>
#include <net/dn_dev.h>
#include <net/dn_route.h>
#include <net/dn_neigh.h>
#include <net/dn_fib.h>
#define DN_IFREQ_SIZE (sizeof(struct ifreq) - sizeof(struct sockaddr) + sizeof(struct sockaddr_dn))
static char dn_rt_all_end_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x04,0x00,0x00};
static char dn_rt_all_rt_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x03,0x00,0x00};
static char dn_hiord[ETH_ALEN] = {0xAA,0x00,0x04,0x00,0x00,0x00};
static unsigned char dn_eco_version[3] = {0x02,0x00,0x00};
extern struct neigh_table dn_neigh_table;
/*
* decnet_address is kept in network order.
*/
__le16 decnet_address = 0;
static DEFINE_RWLOCK(dndev_lock);
static struct net_device *decnet_default_device;
static BLOCKING_NOTIFIER_HEAD(dnaddr_chain);
static struct dn_dev *dn_dev_create(struct net_device *dev, int *err);
static void dn_dev_delete(struct net_device *dev);
static void dn_ifaddr_notify(int event, struct dn_ifaddr *ifa);
static int dn_eth_up(struct net_device *);
static void dn_eth_down(struct net_device *);
static void dn_send_brd_hello(struct net_device *dev, struct dn_ifaddr *ifa);
static void dn_send_ptp_hello(struct net_device *dev, struct dn_ifaddr *ifa);
static struct dn_dev_parms dn_dev_list[] = {
{
.type = ARPHRD_ETHER, /* Ethernet */
.mode = DN_DEV_BCAST,
.state = DN_DEV_S_RU,
.t2 = 1,
.t3 = 10,
.name = "ethernet",
.ctl_name = NET_DECNET_CONF_ETHER,
.up = dn_eth_up,
.down = dn_eth_down,
.timer3 = dn_send_brd_hello,
},
{
.type = ARPHRD_IPGRE, /* DECnet tunneled over GRE in IP */
.mode = DN_DEV_BCAST,
.state = DN_DEV_S_RU,
.t2 = 1,
.t3 = 10,
.name = "ipgre",
.ctl_name = NET_DECNET_CONF_GRE,
.timer3 = dn_send_brd_hello,
},
#if 0
{
.type = ARPHRD_X25, /* Bog standard X.25 */
.mode = DN_DEV_UCAST,
.state = DN_DEV_S_DS,
.t2 = 1,
.t3 = 120,
.name = "x25",
.ctl_name = NET_DECNET_CONF_X25,
.timer3 = dn_send_ptp_hello,
},
#endif
#if 0
{
.type = ARPHRD_PPP, /* DECnet over PPP */
.mode = DN_DEV_BCAST,
.state = DN_DEV_S_RU,
.t2 = 1,
.t3 = 10,
.name = "ppp",
.ctl_name = NET_DECNET_CONF_PPP,
.timer3 = dn_send_brd_hello,
},
#endif
{
.type = ARPHRD_DDCMP, /* DECnet over DDCMP */
.mode = DN_DEV_UCAST,
.state = DN_DEV_S_DS,
.t2 = 1,
.t3 = 120,
.name = "ddcmp",
.ctl_name = NET_DECNET_CONF_DDCMP,
.timer3 = dn_send_ptp_hello,
},
{
.type = ARPHRD_LOOPBACK, /* Loopback interface - always last */
.mode = DN_DEV_BCAST,
.state = DN_DEV_S_RU,
.t2 = 1,
.t3 = 10,
.name = "loopback",
.ctl_name = NET_DECNET_CONF_LOOPBACK,
.timer3 = dn_send_brd_hello,
}
};
#define DN_DEV_LIST_SIZE ARRAY_SIZE(dn_dev_list)
#define DN_DEV_PARMS_OFFSET(x) offsetof(struct dn_dev_parms, x)
#ifdef CONFIG_SYSCTL
static int min_t2[] = { 1 };
static int max_t2[] = { 60 }; /* No max specified, but this seems sensible */
static int min_t3[] = { 1 };
static int max_t3[] = { 8191 }; /* Must fit in 16 bits when multiplied by BCT3MULT or T3MULT */
static int min_priority[1];
static int max_priority[] = { 127 }; /* From DECnet spec */
static int dn_forwarding_proc(ctl_table *, int,
void __user *, size_t *, loff_t *);
static int dn_forwarding_sysctl(ctl_table *table,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen);
static struct dn_dev_sysctl_table {
struct ctl_table_header *sysctl_header;
ctl_table dn_dev_vars[5];
} dn_dev_sysctl = {
NULL,
{
{
.ctl_name = NET_DECNET_CONF_DEV_FORWARDING,
.procname = "forwarding",
.data = (void *)DN_DEV_PARMS_OFFSET(forwarding),
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = dn_forwarding_proc,
.strategy = dn_forwarding_sysctl,
},
{
.ctl_name = NET_DECNET_CONF_DEV_PRIORITY,
.procname = "priority",
.data = (void *)DN_DEV_PARMS_OFFSET(priority),
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.strategy = sysctl_intvec,
.extra1 = &min_priority,
.extra2 = &max_priority
},
{
.ctl_name = NET_DECNET_CONF_DEV_T2,
.procname = "t2",
.data = (void *)DN_DEV_PARMS_OFFSET(t2),
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.strategy = sysctl_intvec,
.extra1 = &min_t2,
.extra2 = &max_t2
},
{
.ctl_name = NET_DECNET_CONF_DEV_T3,
.procname = "t3",
.data = (void *)DN_DEV_PARMS_OFFSET(t3),
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.strategy = sysctl_intvec,
.extra1 = &min_t3,
.extra2 = &max_t3
},
{0}
},
};
static void dn_dev_sysctl_register(struct net_device *dev, struct dn_dev_parms *parms)
{
struct dn_dev_sysctl_table *t;
int i;
#define DN_CTL_PATH_DEV 3
struct ctl_path dn_ctl_path[] = {
{ .procname = "net", .ctl_name = CTL_NET, },
{ .procname = "decnet", .ctl_name = NET_DECNET, },
{ .procname = "conf", .ctl_name = NET_DECNET_CONF, },
{ /* to be set */ },
{ },
};
t = kmemdup(&dn_dev_sysctl, sizeof(*t), GFP_KERNEL);
if (t == NULL)
return;
for(i = 0; i < ARRAY_SIZE(t->dn_dev_vars) - 1; i++) {
long offset = (long)t->dn_dev_vars[i].data;
t->dn_dev_vars[i].data = ((char *)parms) + offset;
}
if (dev) {
dn_ctl_path[DN_CTL_PATH_DEV].procname = dev->name;
dn_ctl_path[DN_CTL_PATH_DEV].ctl_name = dev->ifindex;
} else {
dn_ctl_path[DN_CTL_PATH_DEV].procname = parms->name;
dn_ctl_path[DN_CTL_PATH_DEV].ctl_name = parms->ctl_name;
}
t->dn_dev_vars[0].extra1 = (void *)dev;
t->sysctl_header = register_sysctl_paths(dn_ctl_path, t->dn_dev_vars);
if (t->sysctl_header == NULL)
kfree(t);
else
parms->sysctl = t;
}
static void dn_dev_sysctl_unregister(struct dn_dev_parms *parms)
{
if (parms->sysctl) {
struct dn_dev_sysctl_table *t = parms->sysctl;
parms->sysctl = NULL;
unregister_sysctl_table(t->sysctl_header);
kfree(t);
}
}
static int dn_forwarding_proc(ctl_table *table, int write,
void __user *buffer,
size_t *lenp, loff_t *ppos)
{
#ifdef CONFIG_DECNET_ROUTER
struct net_device *dev = table->extra1;
struct dn_dev *dn_db;
int err;
int tmp, old;
if (table->extra1 == NULL)
return -EINVAL;
dn_db = dev->dn_ptr;
old = dn_db->parms.forwarding;
err = proc_dointvec(table, write, buffer, lenp, ppos);
if ((err >= 0) && write) {
if (dn_db->parms.forwarding < 0)
dn_db->parms.forwarding = 0;
if (dn_db->parms.forwarding > 2)
dn_db->parms.forwarding = 2;
/*
* What an ugly hack this is... its works, just. It
* would be nice if sysctl/proc were just that little
* bit more flexible so I don't have to write a special
* routine, or suffer hacks like this - SJW
*/
tmp = dn_db->parms.forwarding;
dn_db->parms.forwarding = old;
if (dn_db->parms.down)
dn_db->parms.down(dev);
dn_db->parms.forwarding = tmp;
if (dn_db->parms.up)
dn_db->parms.up(dev);
}
return err;
#else
return -EINVAL;
#endif
}
static int dn_forwarding_sysctl(ctl_table *table,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen)
{
#ifdef CONFIG_DECNET_ROUTER
struct net_device *dev = table->extra1;
struct dn_dev *dn_db;
int value;
if (table->extra1 == NULL)
return -EINVAL;
dn_db = dev->dn_ptr;
if (newval && newlen) {
if (newlen != sizeof(int))
return -EINVAL;
if (get_user(value, (int __user *)newval))
return -EFAULT;
if (value < 0)
return -EINVAL;
if (value > 2)
return -EINVAL;
if (dn_db->parms.down)
dn_db->parms.down(dev);
dn_db->parms.forwarding = value;
if (dn_db->parms.up)
dn_db->parms.up(dev);
}
return 0;
#else
return -EINVAL;
#endif
}
#else /* CONFIG_SYSCTL */
static void dn_dev_sysctl_unregister(struct dn_dev_parms *parms)
{
}
static void dn_dev_sysctl_register(struct net_device *dev, struct dn_dev_parms *parms)
{
}
#endif /* CONFIG_SYSCTL */
static inline __u16 mtu2blksize(struct net_device *dev)
{
u32 blksize = dev->mtu;
if (blksize > 0xffff)
blksize = 0xffff;
if (dev->type == ARPHRD_ETHER ||
dev->type == ARPHRD_PPP ||
dev->type == ARPHRD_IPGRE ||
dev->type == ARPHRD_LOOPBACK)
blksize -= 2;
return (__u16)blksize;
}
static struct dn_ifaddr *dn_dev_alloc_ifa(void)
{
struct dn_ifaddr *ifa;
ifa = kzalloc(sizeof(*ifa), GFP_KERNEL);
return ifa;
}
static __inline__ void dn_dev_free_ifa(struct dn_ifaddr *ifa)
{
kfree(ifa);
}
static void dn_dev_del_ifa(struct dn_dev *dn_db, struct dn_ifaddr **ifap, int destroy)
{
struct dn_ifaddr *ifa1 = *ifap;
unsigned char mac_addr[6];
struct net_device *dev = dn_db->dev;
ASSERT_RTNL();
*ifap = ifa1->ifa_next;
if (dn_db->dev->type == ARPHRD_ETHER) {
if (ifa1->ifa_local != dn_eth2dn(dev->dev_addr)) {
dn_dn2eth(mac_addr, ifa1->ifa_local);
dev_mc_delete(dev, mac_addr, ETH_ALEN, 0);
}
}
dn_ifaddr_notify(RTM_DELADDR, ifa1);
blocking_notifier_call_chain(&dnaddr_chain, NETDEV_DOWN, ifa1);
if (destroy) {
dn_dev_free_ifa(ifa1);
if (dn_db->ifa_list == NULL)
dn_dev_delete(dn_db->dev);
}
}
static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa)
{
struct net_device *dev = dn_db->dev;
struct dn_ifaddr *ifa1;
unsigned char mac_addr[6];
ASSERT_RTNL();
/* Check for duplicates */
for(ifa1 = dn_db->ifa_list; ifa1; ifa1 = ifa1->ifa_next) {
if (ifa1->ifa_local == ifa->ifa_local)
return -EEXIST;
}
if (dev->type == ARPHRD_ETHER) {
if (ifa->ifa_local != dn_eth2dn(dev->dev_addr)) {
dn_dn2eth(mac_addr, ifa->ifa_local);
dev_mc_add(dev, mac_addr, ETH_ALEN, 0);
}
}
ifa->ifa_next = dn_db->ifa_list;
dn_db->ifa_list = ifa;
dn_ifaddr_notify(RTM_NEWADDR, ifa);
blocking_notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa);
return 0;
}
static int dn_dev_set_ifa(struct net_device *dev, struct dn_ifaddr *ifa)
{
struct dn_dev *dn_db = dev->dn_ptr;
int rv;
if (dn_db == NULL) {
int err;
dn_db = dn_dev_create(dev, &err);
if (dn_db == NULL)
return err;
}
ifa->ifa_dev = dn_db;
if (dev->flags & IFF_LOOPBACK)
ifa->ifa_scope = RT_SCOPE_HOST;
rv = dn_dev_insert_ifa(dn_db, ifa);
if (rv)
dn_dev_free_ifa(ifa);
return rv;
}
int dn_dev_ioctl(unsigned int cmd, void __user *arg)
{
char buffer[DN_IFREQ_SIZE];
struct ifreq *ifr = (struct ifreq *)buffer;
struct sockaddr_dn *sdn = (struct sockaddr_dn *)&ifr->ifr_addr;
struct dn_dev *dn_db;
struct net_device *dev;
struct dn_ifaddr *ifa = NULL, **ifap = NULL;
int ret = 0;
if (copy_from_user(ifr, arg, DN_IFREQ_SIZE))
return -EFAULT;
ifr->ifr_name[IFNAMSIZ-1] = 0;
dev_load(&init_net, ifr->ifr_name);
switch(cmd) {
case SIOCGIFADDR:
break;
case SIOCSIFADDR:
if (!capable(CAP_NET_ADMIN))
return -EACCES;
if (sdn->sdn_family != AF_DECnet)
return -EINVAL;
break;
default:
return -EINVAL;
}
rtnl_lock();
if ((dev = __dev_get_by_name(&init_net, ifr->ifr_name)) == NULL) {
ret = -ENODEV;
goto done;
}
if ((dn_db = dev->dn_ptr) != NULL) {
for (ifap = &dn_db->ifa_list; (ifa=*ifap) != NULL; ifap = &ifa->ifa_next)
if (strcmp(ifr->ifr_name, ifa->ifa_label) == 0)
break;
}
if (ifa == NULL && cmd != SIOCSIFADDR) {
ret = -EADDRNOTAVAIL;
goto done;
}
switch(cmd) {
case SIOCGIFADDR:
*((__le16 *)sdn->sdn_nodeaddr) = ifa->ifa_local;
goto rarok;
case SIOCSIFADDR:
if (!ifa) {
if ((ifa = dn_dev_alloc_ifa()) == NULL) {
ret = -ENOBUFS;
break;
}
memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
} else {
if (ifa->ifa_local == dn_saddr2dn(sdn))
break;
dn_dev_del_ifa(dn_db, ifap, 0);
}
ifa->ifa_local = ifa->ifa_address = dn_saddr2dn(sdn);
ret = dn_dev_set_ifa(dev, ifa);
}
done:
rtnl_unlock();
return ret;
rarok:
if (copy_to_user(arg, ifr, DN_IFREQ_SIZE))
ret = -EFAULT;
goto done;
}
struct net_device *dn_dev_get_default(void)
{
struct net_device *dev;
read_lock(&dndev_lock);
dev = decnet_default_device;
if (dev) {
if (dev->dn_ptr)
dev_hold(dev);
else
dev = NULL;
}
read_unlock(&dndev_lock);
return dev;
}
int dn_dev_set_default(struct net_device *dev, int force)
{
struct net_device *old = NULL;
int rv = -EBUSY;
if (!dev->dn_ptr)
return -ENODEV;
write_lock(&dndev_lock);
if (force || decnet_default_device == NULL) {
old = decnet_default_device;
decnet_default_device = dev;
rv = 0;
}
write_unlock(&dndev_lock);
if (old)
dev_put(old);
return rv;
}
static void dn_dev_check_default(struct net_device *dev)
{
write_lock(&dndev_lock);
if (dev == decnet_default_device) {
decnet_default_device = NULL;
} else {
dev = NULL;
}
write_unlock(&dndev_lock);
if (dev)
dev_put(dev);
}
static struct dn_dev *dn_dev_by_index(int ifindex)
{
struct net_device *dev;
struct dn_dev *dn_dev = NULL;
dev = dev_get_by_index(&init_net, ifindex);
if (dev) {
dn_dev = dev->dn_ptr;
dev_put(dev);
}
return dn_dev;
}
static const struct nla_policy dn_ifa_policy[IFA_MAX+1] = {
[IFA_ADDRESS] = { .type = NLA_U16 },
[IFA_LOCAL] = { .type = NLA_U16 },
[IFA_LABEL] = { .type = NLA_STRING,
.len = IFNAMSIZ - 1 },
};
static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
{
struct net *net = sock_net(skb->sk);
struct nlattr *tb[IFA_MAX+1];
struct dn_dev *dn_db;
struct ifaddrmsg *ifm;
struct dn_ifaddr *ifa, **ifap;
int err = -EINVAL;
if (net != &init_net)
goto errout;
err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, dn_ifa_policy);
if (err < 0)
goto errout;
err = -ENODEV;
ifm = nlmsg_data(nlh);
if ((dn_db = dn_dev_by_index(ifm->ifa_index)) == NULL)
goto errout;
err = -EADDRNOTAVAIL;
for (ifap = &dn_db->ifa_list; (ifa = *ifap); ifap = &ifa->ifa_next) {
if (tb[IFA_LOCAL] &&
nla_memcmp(tb[IFA_LOCAL], &ifa->ifa_local, 2))
continue;
if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label))
continue;
dn_dev_del_ifa(dn_db, ifap, 1);
return 0;
}
errout:
return err;
}
static int dn_nl_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
{
struct net *net = sock_net(skb->sk);
struct nlattr *tb[IFA_MAX+1];
struct net_device *dev;
struct dn_dev *dn_db;
struct ifaddrmsg *ifm;
struct dn_ifaddr *ifa;
int err;
if (net != &init_net)
return -EINVAL;
err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, dn_ifa_policy);
if (err < 0)
return err;
if (tb[IFA_LOCAL] == NULL)
return -EINVAL;
ifm = nlmsg_data(nlh);
if ((dev = __dev_get_by_index(&init_net, ifm->ifa_index)) == NULL)
return -ENODEV;
if ((dn_db = dev->dn_ptr) == NULL) {
dn_db = dn_dev_create(dev, &err);
if (!dn_db)
return err;
}
if ((ifa = dn_dev_alloc_ifa()) == NULL)
return -ENOBUFS;
if (tb[IFA_ADDRESS] == NULL)
tb[IFA_ADDRESS] = tb[IFA_LOCAL];
ifa->ifa_local = nla_get_le16(tb[IFA_LOCAL]);
ifa->ifa_address = nla_get_le16(tb[IFA_ADDRESS]);
ifa->ifa_flags = ifm->ifa_flags;
ifa->ifa_scope = ifm->ifa_scope;
ifa->ifa_dev = dn_db;
if (tb[IFA_LABEL])
nla_strlcpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ);
else
memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
err = dn_dev_insert_ifa(dn_db, ifa);
if (err)
dn_dev_free_ifa(ifa);
return err;
}
static inline size_t dn_ifaddr_nlmsg_size(void)
{
return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
+ nla_total_size(IFNAMSIZ) /* IFA_LABEL */
+ nla_total_size(2) /* IFA_ADDRESS */
+ nla_total_size(2); /* IFA_LOCAL */
}
static int dn_nl_fill_ifaddr(struct sk_buff *skb, struct dn_ifaddr *ifa,
u32 pid, u32 seq, int event, unsigned int flags)
{
struct ifaddrmsg *ifm;
struct nlmsghdr *nlh;
nlh = nlmsg_put(skb, pid, seq, event, sizeof(*ifm), flags);
if (nlh == NULL)
return -EMSGSIZE;
ifm = nlmsg_data(nlh);
ifm->ifa_family = AF_DECnet;
ifm->ifa_prefixlen = 16;
ifm->ifa_flags = ifa->ifa_flags | IFA_F_PERMANENT;
ifm->ifa_scope = ifa->ifa_scope;
ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
if (ifa->ifa_address)
NLA_PUT_LE16(skb, IFA_ADDRESS, ifa->ifa_address);
if (ifa->ifa_local)
NLA_PUT_LE16(skb, IFA_LOCAL, ifa->ifa_local);
if (ifa->ifa_label[0])
NLA_PUT_STRING(skb, IFA_LABEL, ifa->ifa_label);
return nlmsg_end(skb, nlh);
nla_put_failure:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
static void dn_ifaddr_notify(int event, struct dn_ifaddr *ifa)
{
struct sk_buff *skb;
int err = -ENOBUFS;
skb = alloc_skb(dn_ifaddr_nlmsg_size(), GFP_KERNEL);
if (skb == NULL)
goto errout;
err = dn_nl_fill_ifaddr(skb, ifa, 0, 0, event, 0);
if (err < 0) {
/* -EMSGSIZE implies BUG in dn_ifaddr_nlmsg_size() */
WARN_ON(err == -EMSGSIZE);
kfree_skb(skb);
goto errout;
}
rtnl_notify(skb, &init_net, 0, RTNLGRP_DECnet_IFADDR, NULL, GFP_KERNEL);
return;
errout:
if (err < 0)
rtnl_set_sk_err(&init_net, RTNLGRP_DECnet_IFADDR, err);
}
static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
int idx, dn_idx = 0, skip_ndevs, skip_naddr;
struct net_device *dev;
struct dn_dev *dn_db;
struct dn_ifaddr *ifa;
if (net != &init_net)
return 0;
skip_ndevs = cb->args[0];
skip_naddr = cb->args[1];
idx = 0;
for_each_netdev(&init_net, dev) {
if (idx < skip_ndevs)
goto cont;
else if (idx > skip_ndevs) {
/* Only skip over addresses for first dev dumped
* in this iteration (idx == skip_ndevs) */
skip_naddr = 0;
}
if ((dn_db = dev->dn_ptr) == NULL)
goto cont;
for (ifa = dn_db->ifa_list, dn_idx = 0; ifa;
ifa = ifa->ifa_next, dn_idx++) {
if (dn_idx < skip_naddr)
continue;
if (dn_nl_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, RTM_NEWADDR,
NLM_F_MULTI) < 0)
goto done;
}
cont:
idx++;
}
done:
cb->args[0] = idx;
cb->args[1] = dn_idx;
return skb->len;
}
static int dn_dev_get_first(struct net_device *dev, __le16 *addr)
{
struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr;
struct dn_ifaddr *ifa;
int rv = -ENODEV;
if (dn_db == NULL)
goto out;
ifa = dn_db->ifa_list;
if (ifa != NULL) {
*addr = ifa->ifa_local;
rv = 0;
}
out:
return rv;
}
/*
* Find a default address to bind to.
*
* This is one of those areas where the initial VMS concepts don't really
* map onto the Linux concepts, and since we introduced multiple addresses
* per interface we have to cope with slightly odd ways of finding out what
* "our address" really is. Mostly it's not a problem; for this we just guess
* a sensible default. Eventually the routing code will take care of all the
* nasties for us I hope.
*/
int dn_dev_bind_default(__le16 *addr)
{
struct net_device *dev;
int rv;
dev = dn_dev_get_default();
last_chance:
if (dev) {
read_lock(&dev_base_lock);
rv = dn_dev_get_first(dev, addr);
read_unlock(&dev_base_lock);
dev_put(dev);
if (rv == 0 || dev == init_net.loopback_dev)
return rv;
}
dev = init_net.loopback_dev;
dev_hold(dev);
goto last_chance;
}
static void dn_send_endnode_hello(struct net_device *dev, struct dn_ifaddr *ifa)
{
struct endnode_hello_message *msg;
struct sk_buff *skb = NULL;
__le16 *pktlen;
struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr;
if ((skb = dn_alloc_skb(NULL, sizeof(*msg), GFP_ATOMIC)) == NULL)
return;
skb->dev = dev;
msg = (struct endnode_hello_message *)skb_put(skb,sizeof(*msg));
msg->msgflg = 0x0D;
memcpy(msg->tiver, dn_eco_version, 3);
dn_dn2eth(msg->id, ifa->ifa_local);
msg->iinfo = DN_RT_INFO_ENDN;
msg->blksize = cpu_to_le16(mtu2blksize(dev));
msg->area = 0x00;
memset(msg->seed, 0, 8);
memcpy(msg->neighbor, dn_hiord, ETH_ALEN);
if (dn_db->router) {
struct dn_neigh *dn = (struct dn_neigh *)dn_db->router;
dn_dn2eth(msg->neighbor, dn->addr);
}
msg->timer = cpu_to_le16((unsigned short)dn_db->parms.t3);
msg->mpd = 0x00;
msg->datalen = 0x02;
memset(msg->data, 0xAA, 2);
pktlen = (__le16 *)skb_push(skb,2);
*pktlen = cpu_to_le16(skb->len - 2);
skb_reset_network_header(skb);
dn_rt_finish_output(skb, dn_rt_all_rt_mcast, msg->id);
}
#define DRDELAY (5 * HZ)
static int dn_am_i_a_router(struct dn_neigh *dn, struct dn_dev *dn_db, struct dn_ifaddr *ifa)
{
/* First check time since device went up */
if ((jiffies - dn_db->uptime) < DRDELAY)
return 0;
/* If there is no router, then yes... */
if (!dn_db->router)
return 1;
/* otherwise only if we have a higher priority or.. */
if (dn->priority < dn_db->parms.priority)
return 1;
/* if we have equal priority and a higher node number */
if (dn->priority != dn_db->parms.priority)
return 0;
if (le16_to_cpu(dn->addr) < le16_to_cpu(ifa->ifa_local))
return 1;
return 0;
}
static void dn_send_router_hello(struct net_device *dev, struct dn_ifaddr *ifa)
{
int n;
struct dn_dev *dn_db = dev->dn_ptr;
struct dn_neigh *dn = (struct dn_neigh *)dn_db->router;
struct sk_buff *skb;
size_t size;
unsigned char *ptr;
unsigned char *i1, *i2;
__le16 *pktlen;
char *src;
if (mtu2blksize(dev) < (26 + 7))
return;
n = mtu2blksize(dev) - 26;
n /= 7;
if (n > 32)
n = 32;
size = 2 + 26 + 7 * n;
if ((skb = dn_alloc_skb(NULL, size, GFP_ATOMIC)) == NULL)
return;
skb->dev = dev;
ptr = skb_put(skb, size);
*ptr++ = DN_RT_PKT_CNTL | DN_RT_PKT_ERTH;
*ptr++ = 2; /* ECO */
*ptr++ = 0;
*ptr++ = 0;
dn_dn2eth(ptr, ifa->ifa_local);
src = ptr;
ptr += ETH_ALEN;
*ptr++ = dn_db->parms.forwarding == 1 ?
DN_RT_INFO_L1RT : DN_RT_INFO_L2RT;
*((__le16 *)ptr) = cpu_to_le16(mtu2blksize(dev));
ptr += 2;
*ptr++ = dn_db->parms.priority; /* Priority */
*ptr++ = 0; /* Area: Reserved */
*((__le16 *)ptr) = cpu_to_le16((unsigned short)dn_db->parms.t3);
ptr += 2;
*ptr++ = 0; /* MPD: Reserved */
i1 = ptr++;
memset(ptr, 0, 7); /* Name: Reserved */
ptr += 7;
i2 = ptr++;
n = dn_neigh_elist(dev, ptr, n);
*i2 = 7 * n;
*i1 = 8 + *i2;
skb_trim(skb, (27 + *i2));
pktlen = (__le16 *)skb_push(skb, 2);
*pktlen = cpu_to_le16(skb->len - 2);
skb_reset_network_header(skb);
if (dn_am_i_a_router(dn, dn_db, ifa)) {
struct sk_buff *skb2 = skb_copy(skb, GFP_ATOMIC);
if (skb2) {
dn_rt_finish_output(skb2, dn_rt_all_end_mcast, src);
}
}
dn_rt_finish_output(skb, dn_rt_all_rt_mcast, src);
}
static void dn_send_brd_hello(struct net_device *dev, struct dn_ifaddr *ifa)
{
struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr;
if (dn_db->parms.forwarding == 0)
dn_send_endnode_hello(dev, ifa);
else
dn_send_router_hello(dev, ifa);
}
static void dn_send_ptp_hello(struct net_device *dev, struct dn_ifaddr *ifa)
{
int tdlen = 16;
int size = dev->hard_header_len + 2 + 4 + tdlen;
struct sk_buff *skb = dn_alloc_skb(NULL, size, GFP_ATOMIC);
int i;
unsigned char *ptr;
char src[ETH_ALEN];
if (skb == NULL)
return ;
skb->dev = dev;
skb_push(skb, dev->hard_header_len);
ptr = skb_put(skb, 2 + 4 + tdlen);
*ptr++ = DN_RT_PKT_HELO;
*((__le16 *)ptr) = ifa->ifa_local;
ptr += 2;
*ptr++ = tdlen;
for(i = 0; i < tdlen; i++)
*ptr++ = 0252;
dn_dn2eth(src, ifa->ifa_local);
dn_rt_finish_output(skb, dn_rt_all_rt_mcast, src);
}
static int dn_eth_up(struct net_device *dev)
{
struct dn_dev *dn_db = dev->dn_ptr;
if (dn_db->parms.forwarding == 0)
dev_mc_add(dev, dn_rt_all_end_mcast, ETH_ALEN, 0);
else
dev_mc_add(dev, dn_rt_all_rt_mcast, ETH_ALEN, 0);
dn_db->use_long = 1;
return 0;
}
static void dn_eth_down(struct net_device *dev)
{
struct dn_dev *dn_db = dev->dn_ptr;
if (dn_db->parms.forwarding == 0)
dev_mc_delete(dev, dn_rt_all_end_mcast, ETH_ALEN, 0);
else
dev_mc_delete(dev, dn_rt_all_rt_mcast, ETH_ALEN, 0);
}
static void dn_dev_set_timer(struct net_device *dev);
static void dn_dev_timer_func(unsigned long arg)
{
struct net_device *dev = (struct net_device *)arg;
struct dn_dev *dn_db = dev->dn_ptr;
struct dn_ifaddr *ifa;
if (dn_db->t3 <= dn_db->parms.t2) {
if (dn_db->parms.timer3) {
for(ifa = dn_db->ifa_list; ifa; ifa = ifa->ifa_next) {
if (!(ifa->ifa_flags & IFA_F_SECONDARY))
dn_db->parms.timer3(dev, ifa);
}
}
dn_db->t3 = dn_db->parms.t3;
} else {
dn_db->t3 -= dn_db->parms.t2;
}
dn_dev_set_timer(dev);
}
static void dn_dev_set_timer(struct net_device *dev)
{
struct dn_dev *dn_db = dev->dn_ptr;
if (dn_db->parms.t2 > dn_db->parms.t3)
dn_db->parms.t2 = dn_db->parms.t3;
dn_db->timer.data = (unsigned long)dev;
dn_db->timer.function = dn_dev_timer_func;
dn_db->timer.expires = jiffies + (dn_db->parms.t2 * HZ);
add_timer(&dn_db->timer);
}
static struct dn_dev *dn_dev_create(struct net_device *dev, int *err)
{
int i;
struct dn_dev_parms *p = dn_dev_list;
struct dn_dev *dn_db;
for(i = 0; i < DN_DEV_LIST_SIZE; i++, p++) {
if (p->type == dev->type)
break;
}
*err = -ENODEV;
if (i == DN_DEV_LIST_SIZE)
return NULL;
*err = -ENOBUFS;
if ((dn_db = kzalloc(sizeof(struct dn_dev), GFP_ATOMIC)) == NULL)
return NULL;
memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms));
smp_wmb();
dev->dn_ptr = dn_db;
dn_db->dev = dev;
init_timer(&dn_db->timer);
dn_db->uptime = jiffies;
dn_db->neigh_parms = neigh_parms_alloc(dev, &dn_neigh_table);
if (!dn_db->neigh_parms) {
dev->dn_ptr = NULL;
kfree(dn_db);
return NULL;
}
if (dn_db->parms.up) {
if (dn_db->parms.up(dev) < 0) {
neigh_parms_release(&dn_neigh_table, dn_db->neigh_parms);
dev->dn_ptr = NULL;
kfree(dn_db);
return NULL;
}
}
dn_dev_sysctl_register(dev, &dn_db->parms);
dn_dev_set_timer(dev);
*err = 0;
return dn_db;
}
/*
* This processes a device up event. We only start up
* the loopback device & ethernet devices with correct
* MAC addreses automatically. Others must be started
* specifically.
*
* FIXME: How should we configure the loopback address ? If we could dispense
* with using decnet_address here and for autobind, it will be one less thing
* for users to worry about setting up.
*/
void dn_dev_up(struct net_device *dev)
{
struct dn_ifaddr *ifa;
__le16 addr = decnet_address;
int maybe_default = 0;
struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr;
if ((dev->type != ARPHRD_ETHER) && (dev->type != ARPHRD_LOOPBACK))
return;
/*
* Need to ensure that loopback device has a dn_db attached to it
* to allow creation of neighbours against it, even though it might
* not have a local address of its own. Might as well do the same for
* all autoconfigured interfaces.
*/
if (dn_db == NULL) {
int err;
dn_db = dn_dev_create(dev, &err);
if (dn_db == NULL)
return;
}
if (dev->type == ARPHRD_ETHER) {
if (memcmp(dev->dev_addr, dn_hiord, 4) != 0)
return;
addr = dn_eth2dn(dev->dev_addr);
maybe_default = 1;
}
if (addr == 0)
return;
if ((ifa = dn_dev_alloc_ifa()) == NULL)
return;
ifa->ifa_local = ifa->ifa_address = addr;
ifa->ifa_flags = 0;
ifa->ifa_scope = RT_SCOPE_UNIVERSE;
strcpy(ifa->ifa_label, dev->name);
dn_dev_set_ifa(dev, ifa);
/*
* Automagically set the default device to the first automatically
* configured ethernet card in the system.
*/
if (maybe_default) {
dev_hold(dev);
if (dn_dev_set_default(dev, 0))
dev_put(dev);
}
}
static void dn_dev_delete(struct net_device *dev)
{
struct dn_dev *dn_db = dev->dn_ptr;
if (dn_db == NULL)
return;
del_timer_sync(&dn_db->timer);
dn_dev_sysctl_unregister(&dn_db->parms);
dn_dev_check_default(dev);
neigh_ifdown(&dn_neigh_table, dev);
if (dn_db->parms.down)
dn_db->parms.down(dev);
dev->dn_ptr = NULL;
neigh_parms_release(&dn_neigh_table, dn_db->neigh_parms);
neigh_ifdown(&dn_neigh_table, dev);
if (dn_db->router)
neigh_release(dn_db->router);
if (dn_db->peer)
neigh_release(dn_db->peer);
kfree(dn_db);
}
void dn_dev_down(struct net_device *dev)
{
struct dn_dev *dn_db = dev->dn_ptr;
struct dn_ifaddr *ifa;
if (dn_db == NULL)
return;
while((ifa = dn_db->ifa_list) != NULL) {
dn_dev_del_ifa(dn_db, &dn_db->ifa_list, 0);
dn_dev_free_ifa(ifa);
}
dn_dev_delete(dev);
}
void dn_dev_init_pkt(struct sk_buff *skb)
{
return;
}
void dn_dev_veri_pkt(struct sk_buff *skb)
{
return;
}
void dn_dev_hello(struct sk_buff *skb)
{
return;
}
void dn_dev_devices_off(void)
{
struct net_device *dev;
rtnl_lock();
for_each_netdev(&init_net, dev)
dn_dev_down(dev);
rtnl_unlock();
}
void dn_dev_devices_on(void)
{
struct net_device *dev;
rtnl_lock();
for_each_netdev(&init_net, dev) {
if (dev->flags & IFF_UP)
dn_dev_up(dev);
}
rtnl_unlock();
}
int register_dnaddr_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&dnaddr_chain, nb);
}
int unregister_dnaddr_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(&dnaddr_chain, nb);
}
#ifdef CONFIG_PROC_FS
static inline int is_dn_dev(struct net_device *dev)
{
return dev->dn_ptr != NULL;
}
static void *dn_dev_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(&dev_base_lock)
{
int i;
struct net_device *dev;
read_lock(&dev_base_lock);
if (*pos == 0)
return SEQ_START_TOKEN;
i = 1;
for_each_netdev(&init_net, dev) {
if (!is_dn_dev(dev))
continue;
if (i++ == *pos)
return dev;
}
return NULL;
}
static void *dn_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct net_device *dev;
++*pos;
dev = (struct net_device *)v;
if (v == SEQ_START_TOKEN)
dev = net_device_entry(&init_net.dev_base_head);
for_each_netdev_continue(&init_net, dev) {
if (!is_dn_dev(dev))
continue;
return dev;
}
return NULL;
}
static void dn_dev_seq_stop(struct seq_file *seq, void *v)
__releases(&dev_base_lock)
{
read_unlock(&dev_base_lock);
}
static char *dn_type2asc(char type)
{
switch(type) {
case DN_DEV_BCAST:
return "B";
case DN_DEV_UCAST:
return "U";
case DN_DEV_MPOINT:
return "M";
}
return "?";
}
static int dn_dev_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
seq_puts(seq, "Name Flags T1 Timer1 T3 Timer3 BlkSize Pri State DevType Router Peer\n");
else {
struct net_device *dev = v;
char peer_buf[DN_ASCBUF_LEN];
char router_buf[DN_ASCBUF_LEN];
struct dn_dev *dn_db = dev->dn_ptr;
seq_printf(seq, "%-8s %1s %04u %04u %04lu %04lu"
" %04hu %03d %02x %-10s %-7s %-7s\n",
dev->name ? dev->name : "???",
dn_type2asc(dn_db->parms.mode),
0, 0,
dn_db->t3, dn_db->parms.t3,
mtu2blksize(dev),
dn_db->parms.priority,
dn_db->parms.state, dn_db->parms.name,
dn_db->router ? dn_addr2asc(le16_to_cpu(*(__le16 *)dn_db->router->primary_key), router_buf) : "",
dn_db->peer ? dn_addr2asc(le16_to_cpu(*(__le16 *)dn_db->peer->primary_key), peer_buf) : "");
}
return 0;
}
static const struct seq_operations dn_dev_seq_ops = {
.start = dn_dev_seq_start,
.next = dn_dev_seq_next,
.stop = dn_dev_seq_stop,
.show = dn_dev_seq_show,
};
static int dn_dev_seq_open(struct inode *inode, struct file *file)
{
return seq_open(file, &dn_dev_seq_ops);
}
static const struct file_operations dn_dev_seq_fops = {
.owner = THIS_MODULE,
.open = dn_dev_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
#endif /* CONFIG_PROC_FS */
static int addr[2];
module_param_array(addr, int, NULL, 0444);
MODULE_PARM_DESC(addr, "The DECnet address of this machine: area,node");
void __init dn_dev_init(void)
{
if (addr[0] > 63 || addr[0] < 0) {
printk(KERN_ERR "DECnet: Area must be between 0 and 63");
return;
}
if (addr[1] > 1023 || addr[1] < 0) {
printk(KERN_ERR "DECnet: Node must be between 0 and 1023");
return;
}
decnet_address = cpu_to_le16((addr[0] << 10) | addr[1]);
dn_dev_devices_on();
rtnl_register(PF_DECnet, RTM_NEWADDR, dn_nl_newaddr, NULL);
rtnl_register(PF_DECnet, RTM_DELADDR, dn_nl_deladdr, NULL);
rtnl_register(PF_DECnet, RTM_GETADDR, NULL, dn_nl_dump_ifaddr);
proc_net_fops_create(&init_net, "decnet_dev", S_IRUGO, &dn_dev_seq_fops);
#ifdef CONFIG_SYSCTL
{
int i;
for(i = 0; i < DN_DEV_LIST_SIZE; i++)
dn_dev_sysctl_register(NULL, &dn_dev_list[i]);
}
#endif /* CONFIG_SYSCTL */
}
void __exit dn_dev_cleanup(void)
{
#ifdef CONFIG_SYSCTL
{
int i;
for(i = 0; i < DN_DEV_LIST_SIZE; i++)
dn_dev_sysctl_unregister(&dn_dev_list[i]);
}
#endif /* CONFIG_SYSCTL */
proc_net_remove(&init_net, "decnet_dev");
dn_dev_devices_off();
}
| gpl-2.0 |
preludedrew/htc-kernel-msm7x30_fork | sound/oss/pss.c | 991 | 32902 | /*
* sound/oss/pss.c
*
* The low level driver for the Personal Sound System (ECHO ESC614).
*
*
* Copyright (C) by Hannu Savolainen 1993-1997
*
* OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
* Version 2 (June 1991). See the "COPYING" file distributed with this software
* for more info.
*
*
* Thomas Sailer ioctl code reworked (vmalloc/vfree removed)
* Alan Cox modularisation, clean up.
*
* 98-02-21: Vladimir Michl <vladimir.michl@upol.cz>
* Added mixer device for Beethoven ADSP-16 (master volume,
* bass, treble, synth), only for speakers.
* Fixed bug in pss_write (exchange parameters)
* Fixed config port of SB
* Requested two regions for PSS (PSS mixer, PSS config)
* Modified pss_download_boot
* To probe_pss_mss added test for initialize AD1848
* 98-05-28: Vladimir Michl <vladimir.michl@upol.cz>
* Fixed computation of mixer volumes
* 04-05-1999: Anthony Barbachan <barbcode@xmen.cis.fordham.edu>
* Added code that allows the user to enable his cdrom and/or
* joystick through the module parameters pss_cdrom_port and
* pss_enable_joystick. pss_cdrom_port takes a port address as its
* argument. pss_enable_joystick takes either a 0 or a non-0 as its
* argument.
* 04-06-1999: Anthony Barbachan <barbcode@xmen.cis.fordham.edu>
* Separated some code into new functions for easier reuse.
* Cleaned up and streamlined new code. Added code to allow a user
* to only use this driver for enabling non-sound components
* through the new module parameter pss_no_sound (flag). Added
* code that would allow a user to decide whether the driver should
* reset the configured hardware settings for the PSS board through
* the module parameter pss_keep_settings (flag). This flag will
* allow a user to free up resources in use by this card if needbe,
* furthermore it allows him to use this driver to just enable the
* emulations and then be unloaded as it is no longer needed. Both
* new settings are only available to this driver if compiled as a
* module. The default settings of all new parameters are set to
* load the driver as it did in previous versions.
* 04-07-1999: Anthony Barbachan <barbcode@xmen.cis.fordham.edu>
* Added module parameter pss_firmware to allow the user to tell
* the driver where the firmware file is located. The default
* setting is the previous hardcoded setting "/etc/sound/pss_synth".
* 00-03-03: Christoph Hellwig <chhellwig@infradead.org>
* Adapted to module_init/module_exit
* 11-10-2000: Bartlomiej Zolnierkiewicz <bkz@linux-ide.org>
* Added __init to probe_pss(), attach_pss() and probe_pss_mpu()
* 02-Jan-2001: Chris Rankin
* Specify that this module owns the coprocessor
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include "sound_config.h"
#include "sound_firmware.h"
#include "ad1848.h"
#include "mpu401.h"
/*
* PSS registers.
*/
#define REG(x) (devc->base+x)
#define PSS_DATA 0
#define PSS_STATUS 2
#define PSS_CONTROL 2
#define PSS_ID 4
#define PSS_IRQACK 4
#define PSS_PIO 0x1a
/*
* Config registers
*/
#define CONF_PSS 0x10
#define CONF_WSS 0x12
#define CONF_SB 0x14
#define CONF_CDROM 0x16
#define CONF_MIDI 0x18
/*
* Status bits.
*/
#define PSS_FLAG3 0x0800
#define PSS_FLAG2 0x0400
#define PSS_FLAG1 0x1000
#define PSS_FLAG0 0x0800
#define PSS_WRITE_EMPTY 0x8000
#define PSS_READ_FULL 0x4000
/*
* WSS registers
*/
#define WSS_INDEX 4
#define WSS_DATA 5
/*
* WSS status bits
*/
#define WSS_INITIALIZING 0x80
#define WSS_AUTOCALIBRATION 0x20
#define NO_WSS_MIXER -1
#include "coproc.h"
#include "pss_boot.h"
/* If compiled into kernel, it enable or disable pss mixer */
#ifdef CONFIG_PSS_MIXER
static int pss_mixer = 1;
#else
static int pss_mixer;
#endif
typedef struct pss_mixerdata {
unsigned int volume_l;
unsigned int volume_r;
unsigned int bass;
unsigned int treble;
unsigned int synth;
} pss_mixerdata;
typedef struct pss_confdata {
int base;
int irq;
int dma;
int *osp;
pss_mixerdata mixer;
int ad_mixer_dev;
} pss_confdata;
static pss_confdata pss_data;
static pss_confdata *devc = &pss_data;
static DEFINE_SPINLOCK(lock);
static int pss_initialized;
static int nonstandard_microcode;
static int pss_cdrom_port = -1; /* Parameter for the PSS cdrom port */
static int pss_enable_joystick; /* Parameter for enabling the joystick */
static coproc_operations pss_coproc_operations;
static void pss_write(pss_confdata *devc, int data)
{
unsigned long i, limit;
limit = jiffies + HZ/10; /* The timeout is 0.1 seconds */
/*
* Note! the i<5000000 is an emergency exit. The dsp_command() is sometimes
* called while interrupts are disabled. This means that the timer is
* disabled also. However the timeout situation is a abnormal condition.
* Normally the DSP should be ready to accept commands after just couple of
* loops.
*/
for (i = 0; i < 5000000 && time_before(jiffies, limit); i++)
{
if (inw(REG(PSS_STATUS)) & PSS_WRITE_EMPTY)
{
outw(data, REG(PSS_DATA));
return;
}
}
printk(KERN_WARNING "PSS: DSP Command (%04x) Timeout.\n", data);
}
static int __init probe_pss(struct address_info *hw_config)
{
unsigned short id;
int irq, dma;
devc->base = hw_config->io_base;
irq = devc->irq = hw_config->irq;
dma = devc->dma = hw_config->dma;
devc->osp = hw_config->osp;
if (devc->base != 0x220 && devc->base != 0x240)
if (devc->base != 0x230 && devc->base != 0x250) /* Some cards use these */
return 0;
if (!request_region(devc->base, 0x10, "PSS mixer, SB emulation")) {
printk(KERN_ERR "PSS: I/O port conflict\n");
return 0;
}
id = inw(REG(PSS_ID));
if ((id >> 8) != 'E') {
printk(KERN_ERR "No PSS signature detected at 0x%x (0x%x)\n", devc->base, id);
release_region(devc->base, 0x10);
return 0;
}
if (!request_region(devc->base + 0x10, 0x9, "PSS config")) {
printk(KERN_ERR "PSS: I/O port conflict\n");
release_region(devc->base, 0x10);
return 0;
}
return 1;
}
static int set_irq(pss_confdata * devc, int dev, int irq)
{
static unsigned short irq_bits[16] =
{
0x0000, 0x0000, 0x0000, 0x0008,
0x0000, 0x0010, 0x0000, 0x0018,
0x0000, 0x0020, 0x0028, 0x0030,
0x0038, 0x0000, 0x0000, 0x0000
};
unsigned short tmp, bits;
if (irq < 0 || irq > 15)
return 0;
tmp = inw(REG(dev)) & ~0x38; /* Load confreg, mask IRQ bits out */
if ((bits = irq_bits[irq]) == 0 && irq != 0)
{
printk(KERN_ERR "PSS: Invalid IRQ %d\n", irq);
return 0;
}
outw(tmp | bits, REG(dev));
return 1;
}
static void set_io_base(pss_confdata * devc, int dev, int base)
{
unsigned short tmp = inw(REG(dev)) & 0x003f;
unsigned short bits = (base & 0x0ffc) << 4;
outw(bits | tmp, REG(dev));
}
static int set_dma(pss_confdata * devc, int dev, int dma)
{
static unsigned short dma_bits[8] =
{
0x0001, 0x0002, 0x0000, 0x0003,
0x0000, 0x0005, 0x0006, 0x0007
};
unsigned short tmp, bits;
if (dma < 0 || dma > 7)
return 0;
tmp = inw(REG(dev)) & ~0x07; /* Load confreg, mask DMA bits out */
if ((bits = dma_bits[dma]) == 0 && dma != 4)
{
printk(KERN_ERR "PSS: Invalid DMA %d\n", dma);
return 0;
}
outw(tmp | bits, REG(dev));
return 1;
}
static int pss_reset_dsp(pss_confdata * devc)
{
unsigned long i, limit = jiffies + HZ/10;
outw(0x2000, REG(PSS_CONTROL));
for (i = 0; i < 32768 && time_after_eq(limit, jiffies); i++)
inw(REG(PSS_CONTROL));
outw(0x0000, REG(PSS_CONTROL));
return 1;
}
static int pss_put_dspword(pss_confdata * devc, unsigned short word)
{
int i, val;
for (i = 0; i < 327680; i++)
{
val = inw(REG(PSS_STATUS));
if (val & PSS_WRITE_EMPTY)
{
outw(word, REG(PSS_DATA));
return 1;
}
}
return 0;
}
static int pss_get_dspword(pss_confdata * devc, unsigned short *word)
{
int i, val;
for (i = 0; i < 327680; i++)
{
val = inw(REG(PSS_STATUS));
if (val & PSS_READ_FULL)
{
*word = inw(REG(PSS_DATA));
return 1;
}
}
return 0;
}
static int pss_download_boot(pss_confdata * devc, unsigned char *block, int size, int flags)
{
int i, val, count;
unsigned long limit;
if (flags & CPF_FIRST)
{
/*_____ Warn DSP software that a boot is coming */
outw(0x00fe, REG(PSS_DATA));
limit = jiffies + HZ/10;
for (i = 0; i < 32768 && time_before(jiffies, limit); i++)
if (inw(REG(PSS_DATA)) == 0x5500)
break;
outw(*block++, REG(PSS_DATA));
pss_reset_dsp(devc);
}
count = 1;
while ((flags&CPF_LAST) || count<size )
{
int j;
for (j = 0; j < 327670; j++)
{
/*_____ Wait for BG to appear */
if (inw(REG(PSS_STATUS)) & PSS_FLAG3)
break;
}
if (j == 327670)
{
/* It's ok we timed out when the file was empty */
if (count >= size && flags & CPF_LAST)
break;
else
{
printk("\n");
printk(KERN_ERR "PSS: Download timeout problems, byte %d=%d\n", count, size);
return 0;
}
}
/*_____ Send the next byte */
if (count >= size)
{
/* If not data in block send 0xffff */
outw (0xffff, REG (PSS_DATA));
}
else
{
/*_____ Send the next byte */
outw (*block++, REG (PSS_DATA));
};
count++;
}
if (flags & CPF_LAST)
{
/*_____ Why */
outw(0, REG(PSS_DATA));
limit = jiffies + HZ/10;
for (i = 0; i < 32768 && time_after_eq(limit, jiffies); i++)
val = inw(REG(PSS_STATUS));
limit = jiffies + HZ/10;
for (i = 0; i < 32768 && time_after_eq(limit, jiffies); i++)
{
val = inw(REG(PSS_STATUS));
if (val & 0x4000)
break;
}
/* now read the version */
for (i = 0; i < 32000; i++)
{
val = inw(REG(PSS_STATUS));
if (val & PSS_READ_FULL)
break;
}
if (i == 32000)
return 0;
val = inw(REG(PSS_DATA));
/* printk( "<PSS: microcode version %d.%d loaded>", val/16, val % 16); */
}
return 1;
}
/* Mixer */
static void set_master_volume(pss_confdata *devc, int left, int right)
{
static unsigned char log_scale[101] = {
0xdb, 0xe0, 0xe3, 0xe5, 0xe7, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xed, 0xee,
0xef, 0xef, 0xf0, 0xf0, 0xf1, 0xf1, 0xf2, 0xf2, 0xf2, 0xf3, 0xf3, 0xf3,
0xf4, 0xf4, 0xf4, 0xf5, 0xf5, 0xf5, 0xf5, 0xf6, 0xf6, 0xf6, 0xf6, 0xf7,
0xf7, 0xf7, 0xf7, 0xf7, 0xf8, 0xf8, 0xf8, 0xf8, 0xf8, 0xf9, 0xf9, 0xf9,
0xf9, 0xf9, 0xf9, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfa, 0xfb, 0xfb,
0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfb, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc, 0xfc,
0xfc, 0xfc, 0xfc, 0xfc, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd, 0xfd,
0xfd, 0xfd, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe,
0xfe, 0xfe, 0xff, 0xff, 0xff
};
pss_write(devc, 0x0010);
pss_write(devc, log_scale[left] | 0x0000);
pss_write(devc, 0x0010);
pss_write(devc, log_scale[right] | 0x0100);
}
static void set_synth_volume(pss_confdata *devc, int volume)
{
int vol = ((0x8000*volume)/100L);
pss_write(devc, 0x0080);
pss_write(devc, vol);
pss_write(devc, 0x0081);
pss_write(devc, vol);
}
static void set_bass(pss_confdata *devc, int level)
{
int vol = (int)(((0xfd - 0xf0) * level)/100L) + 0xf0;
pss_write(devc, 0x0010);
pss_write(devc, vol | 0x0200);
};
static void set_treble(pss_confdata *devc, int level)
{
int vol = (((0xfd - 0xf0) * level)/100L) + 0xf0;
pss_write(devc, 0x0010);
pss_write(devc, vol | 0x0300);
};
static void pss_mixer_reset(pss_confdata *devc)
{
set_master_volume(devc, 33, 33);
set_bass(devc, 50);
set_treble(devc, 50);
set_synth_volume(devc, 30);
pss_write (devc, 0x0010);
pss_write (devc, 0x0800 | 0xce); /* Stereo */
if(pss_mixer)
{
devc->mixer.volume_l = devc->mixer.volume_r = 33;
devc->mixer.bass = 50;
devc->mixer.treble = 50;
devc->mixer.synth = 30;
}
}
static int set_volume_mono(unsigned __user *p, unsigned int *aleft)
{
unsigned int left, volume;
if (get_user(volume, p))
return -EFAULT;
left = volume & 0xff;
if (left > 100)
left = 100;
*aleft = left;
return 0;
}
static int set_volume_stereo(unsigned __user *p,
unsigned int *aleft,
unsigned int *aright)
{
unsigned int left, right, volume;
if (get_user(volume, p))
return -EFAULT;
left = volume & 0xff;
if (left > 100)
left = 100;
right = (volume >> 8) & 0xff;
if (right > 100)
right = 100;
*aleft = left;
*aright = right;
return 0;
}
static int ret_vol_mono(int left)
{
return ((left << 8) | left);
}
static int ret_vol_stereo(int left, int right)
{
return ((right << 8) | left);
}
static int call_ad_mixer(pss_confdata *devc,unsigned int cmd, void __user *arg)
{
if (devc->ad_mixer_dev != NO_WSS_MIXER)
return mixer_devs[devc->ad_mixer_dev]->ioctl(devc->ad_mixer_dev, cmd, arg);
else
return -EINVAL;
}
static int pss_mixer_ioctl (int dev, unsigned int cmd, void __user *arg)
{
pss_confdata *devc = mixer_devs[dev]->devc;
int cmdf = cmd & 0xff;
if ((cmdf != SOUND_MIXER_VOLUME) && (cmdf != SOUND_MIXER_BASS) &&
(cmdf != SOUND_MIXER_TREBLE) && (cmdf != SOUND_MIXER_SYNTH) &&
(cmdf != SOUND_MIXER_DEVMASK) && (cmdf != SOUND_MIXER_STEREODEVS) &&
(cmdf != SOUND_MIXER_RECMASK) && (cmdf != SOUND_MIXER_CAPS) &&
(cmdf != SOUND_MIXER_RECSRC))
{
return call_ad_mixer(devc, cmd, arg);
}
if (((cmd >> 8) & 0xff) != 'M')
return -EINVAL;
if (_SIOC_DIR (cmd) & _SIOC_WRITE)
{
switch (cmdf)
{
case SOUND_MIXER_RECSRC:
if (devc->ad_mixer_dev != NO_WSS_MIXER)
return call_ad_mixer(devc, cmd, arg);
else
{
int v;
if (get_user(v, (int __user *)arg))
return -EFAULT;
if (v != 0)
return -EINVAL;
return 0;
}
case SOUND_MIXER_VOLUME:
if (set_volume_stereo(arg,
&devc->mixer.volume_l,
&devc->mixer.volume_r))
return -EFAULT;
set_master_volume(devc, devc->mixer.volume_l,
devc->mixer.volume_r);
return ret_vol_stereo(devc->mixer.volume_l,
devc->mixer.volume_r);
case SOUND_MIXER_BASS:
if (set_volume_mono(arg, &devc->mixer.bass))
return -EFAULT;
set_bass(devc, devc->mixer.bass);
return ret_vol_mono(devc->mixer.bass);
case SOUND_MIXER_TREBLE:
if (set_volume_mono(arg, &devc->mixer.treble))
return -EFAULT;
set_treble(devc, devc->mixer.treble);
return ret_vol_mono(devc->mixer.treble);
case SOUND_MIXER_SYNTH:
if (set_volume_mono(arg, &devc->mixer.synth))
return -EFAULT;
set_synth_volume(devc, devc->mixer.synth);
return ret_vol_mono(devc->mixer.synth);
default:
return -EINVAL;
}
}
else
{
int val, and_mask = 0, or_mask = 0;
/*
* Return parameters
*/
switch (cmdf)
{
case SOUND_MIXER_DEVMASK:
if (call_ad_mixer(devc, cmd, arg) == -EINVAL)
break;
and_mask = ~0;
or_mask = SOUND_MASK_VOLUME | SOUND_MASK_BASS | SOUND_MASK_TREBLE | SOUND_MASK_SYNTH;
break;
case SOUND_MIXER_STEREODEVS:
if (call_ad_mixer(devc, cmd, arg) == -EINVAL)
break;
and_mask = ~0;
or_mask = SOUND_MASK_VOLUME;
break;
case SOUND_MIXER_RECMASK:
if (devc->ad_mixer_dev != NO_WSS_MIXER)
return call_ad_mixer(devc, cmd, arg);
break;
case SOUND_MIXER_CAPS:
if (devc->ad_mixer_dev != NO_WSS_MIXER)
return call_ad_mixer(devc, cmd, arg);
or_mask = SOUND_CAP_EXCL_INPUT;
break;
case SOUND_MIXER_RECSRC:
if (devc->ad_mixer_dev != NO_WSS_MIXER)
return call_ad_mixer(devc, cmd, arg);
break;
case SOUND_MIXER_VOLUME:
or_mask = ret_vol_stereo(devc->mixer.volume_l, devc->mixer.volume_r);
break;
case SOUND_MIXER_BASS:
or_mask = ret_vol_mono(devc->mixer.bass);
break;
case SOUND_MIXER_TREBLE:
or_mask = ret_vol_mono(devc->mixer.treble);
break;
case SOUND_MIXER_SYNTH:
or_mask = ret_vol_mono(devc->mixer.synth);
break;
default:
return -EINVAL;
}
if (get_user(val, (int __user *)arg))
return -EFAULT;
val &= and_mask;
val |= or_mask;
if (put_user(val, (int __user *)arg))
return -EFAULT;
return val;
}
}
static struct mixer_operations pss_mixer_operations =
{
.owner = THIS_MODULE,
.id = "SOUNDPORT",
.name = "PSS-AD1848",
.ioctl = pss_mixer_ioctl
};
static void disable_all_emulations(void)
{
outw(0x0000, REG(CONF_PSS)); /* 0x0400 enables joystick */
outw(0x0000, REG(CONF_WSS));
outw(0x0000, REG(CONF_SB));
outw(0x0000, REG(CONF_MIDI));
outw(0x0000, REG(CONF_CDROM));
}
static void configure_nonsound_components(void)
{
/* Configure Joystick port */
if(pss_enable_joystick)
{
outw(0x0400, REG(CONF_PSS)); /* 0x0400 enables joystick */
printk(KERN_INFO "PSS: joystick enabled.\n");
}
else
{
printk(KERN_INFO "PSS: joystick port not enabled.\n");
}
/* Configure CDROM port */
if (pss_cdrom_port == -1) { /* If cdrom port enablation wasn't requested */
printk(KERN_INFO "PSS: CDROM port not enabled.\n");
} else if (check_region(pss_cdrom_port, 2)) {
printk(KERN_ERR "PSS: CDROM I/O port conflict.\n");
} else {
set_io_base(devc, CONF_CDROM, pss_cdrom_port);
printk(KERN_INFO "PSS: CDROM I/O port set to 0x%x.\n", pss_cdrom_port);
}
}
static int __init attach_pss(struct address_info *hw_config)
{
unsigned short id;
char tmp[100];
devc->base = hw_config->io_base;
devc->irq = hw_config->irq;
devc->dma = hw_config->dma;
devc->osp = hw_config->osp;
devc->ad_mixer_dev = NO_WSS_MIXER;
if (!probe_pss(hw_config))
return 0;
id = inw(REG(PSS_ID)) & 0x00ff;
/*
* Disable all emulations. Will be enabled later (if required).
*/
disable_all_emulations();
#ifdef YOU_REALLY_WANT_TO_ALLOCATE_THESE_RESOURCES
if (sound_alloc_dma(hw_config->dma, "PSS"))
{
printk("pss.c: Can't allocate DMA channel.\n");
release_region(hw_config->io_base, 0x10);
release_region(hw_config->io_base+0x10, 0x9);
return 0;
}
if (!set_irq(devc, CONF_PSS, devc->irq))
{
printk("PSS: IRQ allocation error.\n");
release_region(hw_config->io_base, 0x10);
release_region(hw_config->io_base+0x10, 0x9);
return 0;
}
if (!set_dma(devc, CONF_PSS, devc->dma))
{
printk(KERN_ERR "PSS: DMA allocation error\n");
release_region(hw_config->io_base, 0x10);
release_region(hw_config->io_base+0x10, 0x9);
return 0;
}
#endif
configure_nonsound_components();
pss_initialized = 1;
sprintf(tmp, "ECHO-PSS Rev. %d", id);
conf_printf(tmp, hw_config);
return 1;
}
static int __init probe_pss_mpu(struct address_info *hw_config)
{
struct resource *ports;
int timeout;
if (!pss_initialized)
return 0;
ports = request_region(hw_config->io_base, 2, "mpu401");
if (!ports) {
printk(KERN_ERR "PSS: MPU I/O port conflict\n");
return 0;
}
set_io_base(devc, CONF_MIDI, hw_config->io_base);
if (!set_irq(devc, CONF_MIDI, hw_config->irq)) {
printk(KERN_ERR "PSS: MIDI IRQ allocation error.\n");
goto fail;
}
if (!pss_synthLen) {
printk(KERN_ERR "PSS: Can't enable MPU. MIDI synth microcode not available.\n");
goto fail;
}
if (!pss_download_boot(devc, pss_synth, pss_synthLen, CPF_FIRST | CPF_LAST)) {
printk(KERN_ERR "PSS: Unable to load MIDI synth microcode to DSP.\n");
goto fail;
}
/*
* Finally wait until the DSP algorithm has initialized itself and
* deactivates receive interrupt.
*/
for (timeout = 900000; timeout > 0; timeout--)
{
if ((inb(hw_config->io_base + 1) & 0x80) == 0) /* Input data avail */
inb(hw_config->io_base); /* Discard it */
else
break; /* No more input */
}
if (!probe_mpu401(hw_config, ports))
goto fail;
attach_mpu401(hw_config, THIS_MODULE); /* Slot 1 */
if (hw_config->slots[1] != -1) /* The MPU driver installed itself */
midi_devs[hw_config->slots[1]]->coproc = &pss_coproc_operations;
return 1;
fail:
release_region(hw_config->io_base, 2);
return 0;
}
static int pss_coproc_open(void *dev_info, int sub_device)
{
switch (sub_device)
{
case COPR_MIDI:
if (pss_synthLen == 0)
{
printk(KERN_ERR "PSS: MIDI synth microcode not available.\n");
return -EIO;
}
if (nonstandard_microcode)
if (!pss_download_boot(devc, pss_synth, pss_synthLen, CPF_FIRST | CPF_LAST))
{
printk(KERN_ERR "PSS: Unable to load MIDI synth microcode to DSP.\n");
return -EIO;
}
nonstandard_microcode = 0;
break;
default:
break;
}
return 0;
}
static void pss_coproc_close(void *dev_info, int sub_device)
{
return;
}
static void pss_coproc_reset(void *dev_info)
{
if (pss_synthLen)
if (!pss_download_boot(devc, pss_synth, pss_synthLen, CPF_FIRST | CPF_LAST))
{
printk(KERN_ERR "PSS: Unable to load MIDI synth microcode to DSP.\n");
}
nonstandard_microcode = 0;
}
static int download_boot_block(void *dev_info, copr_buffer * buf)
{
if (buf->len <= 0 || buf->len > sizeof(buf->data))
return -EINVAL;
if (!pss_download_boot(devc, buf->data, buf->len, buf->flags))
{
printk(KERN_ERR "PSS: Unable to load microcode block to DSP.\n");
return -EIO;
}
nonstandard_microcode = 1; /* The MIDI microcode has been overwritten */
return 0;
}
static int pss_coproc_ioctl(void *dev_info, unsigned int cmd, void __user *arg, int local)
{
copr_buffer *buf;
copr_msg *mbuf;
copr_debug_buf dbuf;
unsigned short tmp;
unsigned long flags;
unsigned short *data;
int i, err;
/* printk( "PSS coproc ioctl %x %x %d\n", cmd, arg, local); */
switch (cmd)
{
case SNDCTL_COPR_RESET:
pss_coproc_reset(dev_info);
return 0;
case SNDCTL_COPR_LOAD:
buf = (copr_buffer *) vmalloc(sizeof(copr_buffer));
if (buf == NULL)
return -ENOSPC;
if (copy_from_user(buf, arg, sizeof(copr_buffer))) {
vfree(buf);
return -EFAULT;
}
err = download_boot_block(dev_info, buf);
vfree(buf);
return err;
case SNDCTL_COPR_SENDMSG:
mbuf = (copr_msg *)vmalloc(sizeof(copr_msg));
if (mbuf == NULL)
return -ENOSPC;
if (copy_from_user(mbuf, arg, sizeof(copr_msg))) {
vfree(mbuf);
return -EFAULT;
}
data = (unsigned short *)(mbuf->data);
spin_lock_irqsave(&lock, flags);
for (i = 0; i < mbuf->len; i++) {
if (!pss_put_dspword(devc, *data++)) {
spin_unlock_irqrestore(&lock,flags);
mbuf->len = i; /* feed back number of WORDs sent */
err = copy_to_user(arg, mbuf, sizeof(copr_msg));
vfree(mbuf);
return err ? -EFAULT : -EIO;
}
}
spin_unlock_irqrestore(&lock,flags);
vfree(mbuf);
return 0;
case SNDCTL_COPR_RCVMSG:
err = 0;
mbuf = (copr_msg *)vmalloc(sizeof(copr_msg));
if (mbuf == NULL)
return -ENOSPC;
data = (unsigned short *)mbuf->data;
spin_lock_irqsave(&lock, flags);
for (i = 0; i < sizeof(mbuf->data)/sizeof(unsigned short); i++) {
mbuf->len = i; /* feed back number of WORDs read */
if (!pss_get_dspword(devc, data++)) {
if (i == 0)
err = -EIO;
break;
}
}
spin_unlock_irqrestore(&lock,flags);
if (copy_to_user(arg, mbuf, sizeof(copr_msg)))
err = -EFAULT;
vfree(mbuf);
return err;
case SNDCTL_COPR_RDATA:
if (copy_from_user(&dbuf, arg, sizeof(dbuf)))
return -EFAULT;
spin_lock_irqsave(&lock, flags);
if (!pss_put_dspword(devc, 0x00d0)) {
spin_unlock_irqrestore(&lock,flags);
return -EIO;
}
if (!pss_put_dspword(devc, (unsigned short)(dbuf.parm1 & 0xffff))) {
spin_unlock_irqrestore(&lock,flags);
return -EIO;
}
if (!pss_get_dspword(devc, &tmp)) {
spin_unlock_irqrestore(&lock,flags);
return -EIO;
}
dbuf.parm1 = tmp;
spin_unlock_irqrestore(&lock,flags);
if (copy_to_user(arg, &dbuf, sizeof(dbuf)))
return -EFAULT;
return 0;
case SNDCTL_COPR_WDATA:
if (copy_from_user(&dbuf, arg, sizeof(dbuf)))
return -EFAULT;
spin_lock_irqsave(&lock, flags);
if (!pss_put_dspword(devc, 0x00d1)) {
spin_unlock_irqrestore(&lock,flags);
return -EIO;
}
if (!pss_put_dspword(devc, (unsigned short) (dbuf.parm1 & 0xffff))) {
spin_unlock_irqrestore(&lock,flags);
return -EIO;
}
tmp = (unsigned int)dbuf.parm2 & 0xffff;
if (!pss_put_dspword(devc, tmp)) {
spin_unlock_irqrestore(&lock,flags);
return -EIO;
}
spin_unlock_irqrestore(&lock,flags);
return 0;
case SNDCTL_COPR_WCODE:
if (copy_from_user(&dbuf, arg, sizeof(dbuf)))
return -EFAULT;
spin_lock_irqsave(&lock, flags);
if (!pss_put_dspword(devc, 0x00d3)) {
spin_unlock_irqrestore(&lock,flags);
return -EIO;
}
if (!pss_put_dspword(devc, (unsigned short)(dbuf.parm1 & 0xffff))) {
spin_unlock_irqrestore(&lock,flags);
return -EIO;
}
tmp = (unsigned int)dbuf.parm2 & 0x00ff;
if (!pss_put_dspword(devc, tmp)) {
spin_unlock_irqrestore(&lock,flags);
return -EIO;
}
tmp = ((unsigned int)dbuf.parm2 >> 8) & 0xffff;
if (!pss_put_dspword(devc, tmp)) {
spin_unlock_irqrestore(&lock,flags);
return -EIO;
}
spin_unlock_irqrestore(&lock,flags);
return 0;
case SNDCTL_COPR_RCODE:
if (copy_from_user(&dbuf, arg, sizeof(dbuf)))
return -EFAULT;
spin_lock_irqsave(&lock, flags);
if (!pss_put_dspword(devc, 0x00d2)) {
spin_unlock_irqrestore(&lock,flags);
return -EIO;
}
if (!pss_put_dspword(devc, (unsigned short)(dbuf.parm1 & 0xffff))) {
spin_unlock_irqrestore(&lock,flags);
return -EIO;
}
if (!pss_get_dspword(devc, &tmp)) { /* Read MSB */
spin_unlock_irqrestore(&lock,flags);
return -EIO;
}
dbuf.parm1 = tmp << 8;
if (!pss_get_dspword(devc, &tmp)) { /* Read LSB */
spin_unlock_irqrestore(&lock,flags);
return -EIO;
}
dbuf.parm1 |= tmp & 0x00ff;
spin_unlock_irqrestore(&lock,flags);
if (copy_to_user(arg, &dbuf, sizeof(dbuf)))
return -EFAULT;
return 0;
default:
return -EINVAL;
}
return -EINVAL;
}
static coproc_operations pss_coproc_operations =
{
"ADSP-2115",
THIS_MODULE,
pss_coproc_open,
pss_coproc_close,
pss_coproc_ioctl,
pss_coproc_reset,
&pss_data
};
static int __init probe_pss_mss(struct address_info *hw_config)
{
volatile int timeout;
struct resource *ports;
int my_mix = -999; /* gcc shut up */
if (!pss_initialized)
return 0;
if (!request_region(hw_config->io_base, 4, "WSS config")) {
printk(KERN_ERR "PSS: WSS I/O port conflicts.\n");
return 0;
}
ports = request_region(hw_config->io_base + 4, 4, "ad1848");
if (!ports) {
printk(KERN_ERR "PSS: WSS I/O port conflicts.\n");
release_region(hw_config->io_base, 4);
return 0;
}
set_io_base(devc, CONF_WSS, hw_config->io_base);
if (!set_irq(devc, CONF_WSS, hw_config->irq)) {
printk("PSS: WSS IRQ allocation error.\n");
goto fail;
}
if (!set_dma(devc, CONF_WSS, hw_config->dma)) {
printk(KERN_ERR "PSS: WSS DMA allocation error\n");
goto fail;
}
/*
* For some reason the card returns 0xff in the WSS status register
* immediately after boot. Probably MIDI+SB emulation algorithm
* downloaded to the ADSP2115 spends some time initializing the card.
* Let's try to wait until it finishes this task.
*/
for (timeout = 0; timeout < 100000 && (inb(hw_config->io_base + WSS_INDEX) &
WSS_INITIALIZING); timeout++)
;
outb((0x0b), hw_config->io_base + WSS_INDEX); /* Required by some cards */
for (timeout = 0; (inb(hw_config->io_base + WSS_DATA) & WSS_AUTOCALIBRATION) &&
(timeout < 100000); timeout++)
;
if (!probe_ms_sound(hw_config, ports))
goto fail;
devc->ad_mixer_dev = NO_WSS_MIXER;
if (pss_mixer)
{
if ((my_mix = sound_install_mixer (MIXER_DRIVER_VERSION,
"PSS-SPEAKERS and AD1848 (through MSS audio codec)",
&pss_mixer_operations,
sizeof (struct mixer_operations),
devc)) < 0)
{
printk(KERN_ERR "Could not install PSS mixer\n");
goto fail;
}
}
pss_mixer_reset(devc);
attach_ms_sound(hw_config, ports, THIS_MODULE); /* Slot 0 */
if (hw_config->slots[0] != -1)
{
/* The MSS driver installed itself */
audio_devs[hw_config->slots[0]]->coproc = &pss_coproc_operations;
if (pss_mixer && (num_mixers == (my_mix + 2)))
{
/* The MSS mixer installed */
devc->ad_mixer_dev = audio_devs[hw_config->slots[0]]->mixer_dev;
}
}
return 1;
fail:
release_region(hw_config->io_base + 4, 4);
release_region(hw_config->io_base, 4);
return 0;
}
static inline void __exit unload_pss(struct address_info *hw_config)
{
release_region(hw_config->io_base, 0x10);
release_region(hw_config->io_base+0x10, 0x9);
}
static inline void __exit unload_pss_mpu(struct address_info *hw_config)
{
unload_mpu401(hw_config);
}
static inline void __exit unload_pss_mss(struct address_info *hw_config)
{
unload_ms_sound(hw_config);
}
static struct address_info cfg;
static struct address_info cfg2;
static struct address_info cfg_mpu;
static int pss_io __initdata = -1;
static int mss_io __initdata = -1;
static int mss_irq __initdata = -1;
static int mss_dma __initdata = -1;
static int mpu_io __initdata = -1;
static int mpu_irq __initdata = -1;
static int pss_no_sound = 0; /* Just configure non-sound components */
static int pss_keep_settings = 1; /* Keep hardware settings at module exit */
static char *pss_firmware = "/etc/sound/pss_synth";
module_param(pss_io, int, 0);
MODULE_PARM_DESC(pss_io, "Set i/o base of PSS card (probably 0x220 or 0x240)");
module_param(mss_io, int, 0);
MODULE_PARM_DESC(mss_io, "Set WSS (audio) i/o base (0x530, 0x604, 0xE80, 0xF40, or other. Address must end in 0 or 4 and must be from 0x100 to 0xFF4)");
module_param(mss_irq, int, 0);
MODULE_PARM_DESC(mss_irq, "Set WSS (audio) IRQ (3, 5, 7, 9, 10, 11, 12)");
module_param(mss_dma, int, 0);
MODULE_PARM_DESC(mss_dma, "Set WSS (audio) DMA (0, 1, 3)");
module_param(mpu_io, int, 0);
MODULE_PARM_DESC(mpu_io, "Set MIDI i/o base (0x330 or other. Address must be on 4 location boundaries and must be from 0x100 to 0xFFC)");
module_param(mpu_irq, int, 0);
MODULE_PARM_DESC(mpu_irq, "Set MIDI IRQ (3, 5, 7, 9, 10, 11, 12)");
module_param(pss_cdrom_port, int, 0);
MODULE_PARM_DESC(pss_cdrom_port, "Set the PSS CDROM port i/o base (0x340 or other)");
module_param(pss_enable_joystick, bool, 0);
MODULE_PARM_DESC(pss_enable_joystick, "Enables the PSS joystick port (1 to enable, 0 to disable)");
module_param(pss_no_sound, bool, 0);
MODULE_PARM_DESC(pss_no_sound, "Configure sound compoents (0 - no, 1 - yes)");
module_param(pss_keep_settings, bool, 0);
MODULE_PARM_DESC(pss_keep_settings, "Keep hardware setting at driver unloading (0 - no, 1 - yes)");
module_param(pss_firmware, charp, 0);
MODULE_PARM_DESC(pss_firmware, "Location of the firmware file (default - /etc/sound/pss_synth)");
module_param(pss_mixer, bool, 0);
MODULE_PARM_DESC(pss_mixer, "Enable (1) or disable (0) PSS mixer (controlling of output volume, bass, treble, synth volume). The mixer is not available on all PSS cards.");
MODULE_AUTHOR("Hannu Savolainen, Vladimir Michl");
MODULE_DESCRIPTION("Module for PSS sound cards (based on AD1848, ADSP-2115 and ESC614). This module includes control of output amplifier and synth volume of the Beethoven ADSP-16 card (this may work with other PSS cards).");
MODULE_LICENSE("GPL");
static int fw_load = 0;
static int pssmpu = 0, pssmss = 0;
/*
* Load a PSS sound card module
*/
static int __init init_pss(void)
{
if(pss_no_sound) /* If configuring only nonsound components */
{
cfg.io_base = pss_io;
if(!probe_pss(&cfg))
return -ENODEV;
printk(KERN_INFO "ECHO-PSS Rev. %d\n", inw(REG(PSS_ID)) & 0x00ff);
printk(KERN_INFO "PSS: loading in no sound mode.\n");
disable_all_emulations();
configure_nonsound_components();
release_region(pss_io, 0x10);
release_region(pss_io + 0x10, 0x9);
return 0;
}
cfg.io_base = pss_io;
cfg2.io_base = mss_io;
cfg2.irq = mss_irq;
cfg2.dma = mss_dma;
cfg_mpu.io_base = mpu_io;
cfg_mpu.irq = mpu_irq;
if (cfg.io_base == -1 || cfg2.io_base == -1 || cfg2.irq == -1 || cfg.dma == -1) {
printk(KERN_INFO "pss: mss_io, mss_dma, mss_irq and pss_io must be set.\n");
return -EINVAL;
}
if (!pss_synth) {
fw_load = 1;
pss_synthLen = mod_firmware_load(pss_firmware, (void *) &pss_synth);
}
if (!attach_pss(&cfg))
return -ENODEV;
/*
* Attach stuff
*/
if (probe_pss_mpu(&cfg_mpu))
pssmpu = 1;
if (probe_pss_mss(&cfg2))
pssmss = 1;
return 0;
}
static void __exit cleanup_pss(void)
{
if(!pss_no_sound)
{
if(fw_load && pss_synth)
vfree(pss_synth);
if(pssmss)
unload_pss_mss(&cfg2);
if(pssmpu)
unload_pss_mpu(&cfg_mpu);
unload_pss(&cfg);
}
if(!pss_keep_settings) /* Keep hardware settings if asked */
{
disable_all_emulations();
printk(KERN_INFO "Resetting PSS sound card configurations.\n");
}
}
module_init(init_pss);
module_exit(cleanup_pss);
#ifndef MODULE
static int __init setup_pss(char *str)
{
/* io, mss_io, mss_irq, mss_dma, mpu_io, mpu_irq */
int ints[7];
str = get_options(str, ARRAY_SIZE(ints), ints);
pss_io = ints[1];
mss_io = ints[2];
mss_irq = ints[3];
mss_dma = ints[4];
mpu_io = ints[5];
mpu_irq = ints[6];
return 1;
}
__setup("pss=", setup_pss);
#endif
| gpl-2.0 |
ProtouProject/android_kernel_msm | drivers/media/platform/msm/wfd/wfd-util.c | 2271 | 5700 | /* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/debugfs.h>
#include <linux/hrtimer.h>
#include <linux/limits.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include "wfd-util.h"
static struct dentry *wfd_debugfs_root;
int wfd_stats_setup()
{
wfd_debugfs_root = debugfs_create_dir("wfd", NULL);
if (wfd_debugfs_root == ERR_PTR(-ENODEV))
return -ENODEV;
else if (!wfd_debugfs_root)
return -ENOMEM;
else
return 0;
}
void wfd_stats_teardown()
{
if (wfd_debugfs_root)
debugfs_remove_recursive(wfd_debugfs_root);
}
int wfd_stats_init(struct wfd_stats *stats, int device)
{
char device_str[NAME_MAX] = "";
int rc = 0;
if (!stats) {
rc = -EINVAL;
goto wfd_stats_init_fail;
} else if (!wfd_debugfs_root) {
WFD_MSG_ERR("wfd debugfs root does not exist\n");
rc = -ENOENT;
goto wfd_stats_init_fail;
}
memset(stats, 0, sizeof(*stats));
INIT_LIST_HEAD(&stats->enc_queue);
mutex_init(&stats->mutex);
snprintf(device_str, sizeof(device_str), "%d", device);
stats->d_parent = debugfs_create_dir(device_str, wfd_debugfs_root);
if (IS_ERR(stats->d_parent)) {
rc = PTR_ERR(stats->d_parent);
stats->d_parent = NULL;
goto wfd_stats_init_fail;
}
stats->d_v4l2_buf_count = debugfs_create_u32("v4l2_buf_count", S_IRUGO,
stats->d_parent, &stats->v4l2_buf_count);
if (IS_ERR(stats->d_v4l2_buf_count)) {
rc = PTR_ERR(stats->d_v4l2_buf_count);
stats->d_v4l2_buf_count = NULL;
goto wfd_stats_init_fail;
}
stats->d_mdp_buf_count = debugfs_create_u32("mdp_buf_count", S_IRUGO,
stats->d_parent, &stats->mdp_buf_count);
if (IS_ERR(stats->d_mdp_buf_count)) {
rc = PTR_ERR(stats->d_mdp_buf_count);
stats->d_mdp_buf_count = NULL;
goto wfd_stats_init_fail;
}
stats->d_vsg_buf_count = debugfs_create_u32("vsg_buf_count", S_IRUGO,
stats->d_parent, &stats->vsg_buf_count);
if (IS_ERR(stats->d_vsg_buf_count)) {
rc = PTR_ERR(stats->d_vsg_buf_count);
stats->d_vsg_buf_count = NULL;
goto wfd_stats_init_fail;
}
stats->d_enc_buf_count = debugfs_create_u32("enc_buf_count", S_IRUGO,
stats->d_parent, &stats->enc_buf_count);
if (IS_ERR(stats->d_enc_buf_count)) {
rc = PTR_ERR(stats->d_enc_buf_count);
stats->d_enc_buf_count = NULL;
goto wfd_stats_init_fail;
}
stats->d_frames_encoded = debugfs_create_u32("frames_encoded", S_IRUGO,
stats->d_parent, &stats->frames_encoded);
if (IS_ERR(stats->d_frames_encoded)) {
rc = PTR_ERR(stats->d_frames_encoded);
stats->d_frames_encoded = NULL;
goto wfd_stats_init_fail;
}
stats->d_mdp_updates = debugfs_create_u32("mdp_updates", S_IRUGO,
stats->d_parent, &stats->mdp_updates);
if (IS_ERR(stats->d_mdp_updates)) {
rc = PTR_ERR(stats->d_mdp_updates);
stats->d_mdp_updates = NULL;
goto wfd_stats_init_fail;
}
stats->d_enc_avg_latency = debugfs_create_u32("enc_avg_latency",
S_IRUGO, stats->d_parent, &stats->enc_avg_latency);
if (IS_ERR(stats->d_enc_avg_latency)) {
rc = PTR_ERR(stats->d_enc_avg_latency);
stats->d_enc_avg_latency = NULL;
goto wfd_stats_init_fail;
}
return rc;
wfd_stats_init_fail:
return rc;
}
int wfd_stats_update(struct wfd_stats *stats, enum wfd_stats_event event)
{
int rc = 0;
mutex_lock(&stats->mutex);
switch (event) {
case WFD_STAT_EVENT_CLIENT_QUEUE:
stats->v4l2_buf_count++;
break;
case WFD_STAT_EVENT_CLIENT_DEQUEUE: {
struct wfd_stats_encode_sample *sample = NULL;
stats->v4l2_buf_count--;
if (!list_empty(&stats->enc_queue))
sample = list_first_entry(&stats->enc_queue,
struct wfd_stats_encode_sample,
list);
if (sample) {
ktime_t kdiff = ktime_sub(ktime_get(),
sample->encode_start_ts);
uint32_t diff = ktime_to_ms(kdiff);
stats->enc_cumulative_latency += diff;
stats->enc_latency_samples++;
stats->enc_avg_latency = stats->enc_cumulative_latency /
stats->enc_latency_samples;
list_del(&sample->list);
kfree(sample);
sample = NULL;
}
break;
}
case WFD_STAT_EVENT_MDP_QUEUE:
stats->mdp_buf_count++;
break;
case WFD_STAT_EVENT_MDP_DEQUEUE:
stats->mdp_buf_count--;
stats->mdp_updates++;
break;
case WFD_STAT_EVENT_ENC_QUEUE: {
struct wfd_stats_encode_sample *sample = NULL;
stats->enc_buf_count++;
stats->frames_encoded++;
sample = kzalloc(sizeof(*sample), GFP_KERNEL);
if (sample) {
INIT_LIST_HEAD(&sample->list);
sample->encode_start_ts = ktime_get();
list_add_tail(&sample->list, &stats->enc_queue);
} else {
WFD_MSG_WARN("Unable to measure latency\n");
}
break;
}
case WFD_STAT_EVENT_ENC_DEQUEUE:
stats->enc_buf_count--;
break;
case WFD_STAT_EVENT_VSG_QUEUE:
stats->vsg_buf_count++;
break;
case WFD_STAT_EVENT_VSG_DEQUEUE:
stats->vsg_buf_count--;
break;
default:
rc = -ENOTSUPP;
}
mutex_unlock(&stats->mutex);
return rc;
}
int wfd_stats_deinit(struct wfd_stats *stats)
{
WFD_MSG_DBG("Latencies: avg enc. latency %d",
stats->enc_avg_latency);
/* Delete all debugfs files in one shot :) */
if (stats->d_parent)
debugfs_remove_recursive(stats->d_parent);
stats->d_parent =
stats->d_v4l2_buf_count =
stats->d_mdp_buf_count =
stats->d_vsg_buf_count =
stats->d_enc_buf_count =
stats->d_frames_encoded =
stats->d_mdp_updates =
stats->d_enc_avg_latency = NULL;
return 0;
}
| gpl-2.0 |
0xD34D/kernel_amazon_tate | drivers/net/wireless/iwlwifi/iwl-led.c | 2271 | 6336 | /******************************************************************************
*
* Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/wireless.h>
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <asm/unaligned.h>
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
/* default: IWL_LED_BLINK(0) using blinking index table */
static int led_mode;
module_param(led_mode, int, S_IRUGO);
MODULE_PARM_DESC(led_mode, "0=system default, "
"1=On(RF On)/Off(RF Off), 2=blinking");
/* Throughput OFF time(ms) ON time (ms)
* >300 25 25
* >200 to 300 40 40
* >100 to 200 55 55
* >70 to 100 65 65
* >50 to 70 75 75
* >20 to 50 85 85
* >10 to 20 95 95
* >5 to 10 110 110
* >1 to 5 130 130
* >0 to 1 167 167
* <=0 SOLID ON
*/
static const struct ieee80211_tpt_blink iwl_blink[] = {
{ .throughput = 0, .blink_time = 334 },
{ .throughput = 1 * 1024 - 1, .blink_time = 260 },
{ .throughput = 5 * 1024 - 1, .blink_time = 220 },
{ .throughput = 10 * 1024 - 1, .blink_time = 190 },
{ .throughput = 20 * 1024 - 1, .blink_time = 170 },
{ .throughput = 50 * 1024 - 1, .blink_time = 150 },
{ .throughput = 70 * 1024 - 1, .blink_time = 130 },
{ .throughput = 100 * 1024 - 1, .blink_time = 110 },
{ .throughput = 200 * 1024 - 1, .blink_time = 80 },
{ .throughput = 300 * 1024 - 1, .blink_time = 50 },
};
/* Set led register off */
void iwlagn_led_enable(struct iwl_priv *priv)
{
iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
}
/*
* Adjust led blink rate to compensate on a MAC Clock difference on every HW
* Led blink rate analysis showed an average deviation of 20% on 5000 series
* and up.
* Need to compensate on the led on/off time per HW according to the deviation
* to achieve the desired led frequency
* The calculation is: (100-averageDeviation)/100 * blinkTime
* For code efficiency the calculation will be:
* compensation = (100 - averageDeviation) * 64 / 100
* NewBlinkTime = (compensation * BlinkTime) / 64
*/
static inline u8 iwl_blink_compensation(struct iwl_priv *priv,
u8 time, u16 compensation)
{
if (!compensation) {
IWL_ERR(priv, "undefined blink compensation: "
"use pre-defined blinking time\n");
return time;
}
return (u8)((time * compensation) >> 6);
}
static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
{
struct iwl_host_cmd cmd = {
.id = REPLY_LEDS_CMD,
.len = { sizeof(struct iwl_led_cmd), },
.data = { led_cmd, },
.flags = CMD_ASYNC,
.callback = NULL,
};
u32 reg;
reg = iwl_read32(priv, CSR_LED_REG);
if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
return iwl_send_cmd(priv, &cmd);
}
/* Set led pattern command */
static int iwl_led_cmd(struct iwl_priv *priv,
unsigned long on,
unsigned long off)
{
struct iwl_led_cmd led_cmd = {
.id = IWL_LED_LINK,
.interval = IWL_DEF_LED_INTRVL
};
int ret;
if (!test_bit(STATUS_READY, &priv->status))
return -EBUSY;
if (priv->blink_on == on && priv->blink_off == off)
return 0;
if (off == 0) {
/* led is SOLID_ON */
on = IWL_LED_SOLID;
}
IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n",
priv->cfg->base_params->led_compensation);
led_cmd.on = iwl_blink_compensation(priv, on,
priv->cfg->base_params->led_compensation);
led_cmd.off = iwl_blink_compensation(priv, off,
priv->cfg->base_params->led_compensation);
ret = iwl_send_led_cmd(priv, &led_cmd);
if (!ret) {
priv->blink_on = on;
priv->blink_off = off;
}
return ret;
}
static void iwl_led_brightness_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
unsigned long on = 0;
if (brightness > 0)
on = IWL_LED_SOLID;
iwl_led_cmd(priv, on, 0);
}
static int iwl_led_blink_set(struct led_classdev *led_cdev,
unsigned long *delay_on,
unsigned long *delay_off)
{
struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
return iwl_led_cmd(priv, *delay_on, *delay_off);
}
void iwl_leds_init(struct iwl_priv *priv)
{
int mode = led_mode;
int ret;
if (mode == IWL_LED_DEFAULT)
mode = priv->cfg->led_mode;
priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
wiphy_name(priv->hw->wiphy));
priv->led.brightness_set = iwl_led_brightness_set;
priv->led.blink_set = iwl_led_blink_set;
priv->led.max_brightness = 1;
switch (mode) {
case IWL_LED_DEFAULT:
WARN_ON(1);
break;
case IWL_LED_BLINK:
priv->led.default_trigger =
ieee80211_create_tpt_led_trigger(priv->hw,
IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
iwl_blink, ARRAY_SIZE(iwl_blink));
break;
case IWL_LED_RF_STATE:
priv->led.default_trigger =
ieee80211_get_radio_led_name(priv->hw);
break;
}
ret = led_classdev_register(&priv->pci_dev->dev, &priv->led);
if (ret) {
kfree(priv->led.name);
return;
}
priv->led_registered = true;
}
void iwl_leds_exit(struct iwl_priv *priv)
{
if (!priv->led_registered)
return;
led_classdev_unregister(&priv->led);
kfree(priv->led.name);
}
| gpl-2.0 |
wuby986/Sixty-4Stroke-kernel | net/phonet/pep-gprs.c | 2783 | 6950 | /*
* File: pep-gprs.c
*
* GPRS over Phonet pipe end point socket
*
* Copyright (C) 2008 Nokia Corporation.
*
* Author: Rémi Denis-Courmont
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*/
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/if_ether.h>
#include <linux/if_arp.h>
#include <net/sock.h>
#include <linux/if_phonet.h>
#include <net/tcp_states.h>
#include <net/phonet/gprs.h>
#define GPRS_DEFAULT_MTU 1400
struct gprs_dev {
struct sock *sk;
void (*old_state_change)(struct sock *);
void (*old_data_ready)(struct sock *, int);
void (*old_write_space)(struct sock *);
struct net_device *dev;
};
static __be16 gprs_type_trans(struct sk_buff *skb)
{
const u8 *pvfc;
u8 buf;
pvfc = skb_header_pointer(skb, 0, 1, &buf);
if (!pvfc)
return htons(0);
/* Look at IP version field */
switch (*pvfc >> 4) {
case 4:
return htons(ETH_P_IP);
case 6:
return htons(ETH_P_IPV6);
}
return htons(0);
}
static void gprs_writeable(struct gprs_dev *gp)
{
struct net_device *dev = gp->dev;
if (pep_writeable(gp->sk))
netif_wake_queue(dev);
}
/*
* Socket callbacks
*/
static void gprs_state_change(struct sock *sk)
{
struct gprs_dev *gp = sk->sk_user_data;
if (sk->sk_state == TCP_CLOSE_WAIT) {
struct net_device *dev = gp->dev;
netif_stop_queue(dev);
netif_carrier_off(dev);
}
}
static int gprs_recv(struct gprs_dev *gp, struct sk_buff *skb)
{
struct net_device *dev = gp->dev;
int err = 0;
__be16 protocol = gprs_type_trans(skb);
if (!protocol) {
err = -EINVAL;
goto drop;
}
if (skb_headroom(skb) & 3) {
struct sk_buff *rskb, *fs;
int flen = 0;
/* Phonet Pipe data header may be misaligned (3 bytes),
* so wrap the IP packet as a single fragment of an head-less
* socket buffer. The network stack will pull what it needs,
* but at least, the whole IP payload is not memcpy'd. */
rskb = netdev_alloc_skb(dev, 0);
if (!rskb) {
err = -ENOBUFS;
goto drop;
}
skb_shinfo(rskb)->frag_list = skb;
rskb->len += skb->len;
rskb->data_len += rskb->len;
rskb->truesize += rskb->len;
/* Avoid nested fragments */
skb_walk_frags(skb, fs)
flen += fs->len;
skb->next = skb_shinfo(skb)->frag_list;
skb_frag_list_init(skb);
skb->len -= flen;
skb->data_len -= flen;
skb->truesize -= flen;
skb = rskb;
}
skb->protocol = protocol;
skb_reset_mac_header(skb);
skb->dev = dev;
if (likely(dev->flags & IFF_UP)) {
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
netif_rx(skb);
skb = NULL;
} else
err = -ENODEV;
drop:
if (skb) {
dev_kfree_skb(skb);
dev->stats.rx_dropped++;
}
return err;
}
static void gprs_data_ready(struct sock *sk, int len)
{
struct gprs_dev *gp = sk->sk_user_data;
struct sk_buff *skb;
while ((skb = pep_read(sk)) != NULL) {
skb_orphan(skb);
gprs_recv(gp, skb);
}
}
static void gprs_write_space(struct sock *sk)
{
struct gprs_dev *gp = sk->sk_user_data;
if (netif_running(gp->dev))
gprs_writeable(gp);
}
/*
* Network device callbacks
*/
static int gprs_open(struct net_device *dev)
{
struct gprs_dev *gp = netdev_priv(dev);
gprs_writeable(gp);
return 0;
}
static int gprs_close(struct net_device *dev)
{
netif_stop_queue(dev);
return 0;
}
static netdev_tx_t gprs_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct gprs_dev *gp = netdev_priv(dev);
struct sock *sk = gp->sk;
int len, err;
switch (skb->protocol) {
case htons(ETH_P_IP):
case htons(ETH_P_IPV6):
break;
default:
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
skb_orphan(skb);
skb_set_owner_w(skb, sk);
len = skb->len;
err = pep_write(sk, skb);
if (err) {
LIMIT_NETDEBUG(KERN_WARNING"%s: TX error (%d)\n",
dev->name, err);
dev->stats.tx_aborted_errors++;
dev->stats.tx_errors++;
} else {
dev->stats.tx_packets++;
dev->stats.tx_bytes += len;
}
netif_stop_queue(dev);
if (pep_writeable(sk))
netif_wake_queue(dev);
return NETDEV_TX_OK;
}
static int gprs_set_mtu(struct net_device *dev, int new_mtu)
{
if ((new_mtu < 576) || (new_mtu > (PHONET_MAX_MTU - 11)))
return -EINVAL;
dev->mtu = new_mtu;
return 0;
}
static const struct net_device_ops gprs_netdev_ops = {
.ndo_open = gprs_open,
.ndo_stop = gprs_close,
.ndo_start_xmit = gprs_xmit,
.ndo_change_mtu = gprs_set_mtu,
};
static void gprs_setup(struct net_device *dev)
{
dev->features = NETIF_F_FRAGLIST;
dev->type = ARPHRD_PHONET_PIPE;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->mtu = GPRS_DEFAULT_MTU;
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->tx_queue_len = 10;
dev->netdev_ops = &gprs_netdev_ops;
dev->destructor = free_netdev;
}
/*
* External interface
*/
/*
* Attach a GPRS interface to a datagram socket.
* Returns the interface index on success, negative error code on error.
*/
int gprs_attach(struct sock *sk)
{
static const char ifname[] = "gprs%d";
struct gprs_dev *gp;
struct net_device *dev;
int err;
if (unlikely(sk->sk_type == SOCK_STREAM))
return -EINVAL; /* need packet boundaries */
/* Create net device */
dev = alloc_netdev(sizeof(*gp), ifname, gprs_setup);
if (!dev)
return -ENOMEM;
gp = netdev_priv(dev);
gp->sk = sk;
gp->dev = dev;
netif_stop_queue(dev);
err = register_netdev(dev);
if (err) {
free_netdev(dev);
return err;
}
lock_sock(sk);
if (unlikely(sk->sk_user_data)) {
err = -EBUSY;
goto out_rel;
}
if (unlikely((1 << sk->sk_state & (TCPF_CLOSE|TCPF_LISTEN)) ||
sock_flag(sk, SOCK_DEAD))) {
err = -EINVAL;
goto out_rel;
}
sk->sk_user_data = gp;
gp->old_state_change = sk->sk_state_change;
gp->old_data_ready = sk->sk_data_ready;
gp->old_write_space = sk->sk_write_space;
sk->sk_state_change = gprs_state_change;
sk->sk_data_ready = gprs_data_ready;
sk->sk_write_space = gprs_write_space;
release_sock(sk);
sock_hold(sk);
printk(KERN_DEBUG"%s: attached\n", dev->name);
return dev->ifindex;
out_rel:
release_sock(sk);
unregister_netdev(dev);
return err;
}
void gprs_detach(struct sock *sk)
{
struct gprs_dev *gp = sk->sk_user_data;
struct net_device *dev = gp->dev;
lock_sock(sk);
sk->sk_user_data = NULL;
sk->sk_state_change = gp->old_state_change;
sk->sk_data_ready = gp->old_data_ready;
sk->sk_write_space = gp->old_write_space;
release_sock(sk);
printk(KERN_DEBUG"%s: detached\n", dev->name);
unregister_netdev(dev);
sock_put(sk);
}
| gpl-2.0 |
civato/Note8.0-StormCharger-Android-4.2.2 | drivers/mtd/nand/tmio_nand.c | 2783 | 14905 | /*
* Toshiba TMIO NAND flash controller driver
*
* Slightly murky pre-git history of the driver:
*
* Copyright (c) Ian Molton 2004, 2005, 2008
* Original work, independent of sharps code. Included hardware ECC support.
* Hard ECC did not work for writes in the early revisions.
* Copyright (c) Dirk Opfer 2005.
* Modifications developed from sharps code but
* NOT containing any, ported onto Ians base.
* Copyright (c) Chris Humbert 2005
* Copyright (c) Dmitry Baryshkov 2008
* Minor fixes
*
* Parts copyright Sebastian Carlier
*
* This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tmio.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
#include <linux/slab.h>
/*--------------------------------------------------------------------------*/
/*
* NAND Flash Host Controller Configuration Register
*/
#define CCR_COMMAND 0x04 /* w Command */
#define CCR_BASE 0x10 /* l NAND Flash Control Reg Base Addr */
#define CCR_INTP 0x3d /* b Interrupt Pin */
#define CCR_INTE 0x48 /* b Interrupt Enable */
#define CCR_EC 0x4a /* b Event Control */
#define CCR_ICC 0x4c /* b Internal Clock Control */
#define CCR_ECCC 0x5b /* b ECC Control */
#define CCR_NFTC 0x60 /* b NAND Flash Transaction Control */
#define CCR_NFM 0x61 /* b NAND Flash Monitor */
#define CCR_NFPSC 0x62 /* b NAND Flash Power Supply Control */
#define CCR_NFDC 0x63 /* b NAND Flash Detect Control */
/*
* NAND Flash Control Register
*/
#define FCR_DATA 0x00 /* bwl Data Register */
#define FCR_MODE 0x04 /* b Mode Register */
#define FCR_STATUS 0x05 /* b Status Register */
#define FCR_ISR 0x06 /* b Interrupt Status Register */
#define FCR_IMR 0x07 /* b Interrupt Mask Register */
/* FCR_MODE Register Command List */
#define FCR_MODE_DATA 0x94 /* Data Data_Mode */
#define FCR_MODE_COMMAND 0x95 /* Data Command_Mode */
#define FCR_MODE_ADDRESS 0x96 /* Data Address_Mode */
#define FCR_MODE_HWECC_CALC 0xB4 /* HW-ECC Data */
#define FCR_MODE_HWECC_RESULT 0xD4 /* HW-ECC Calc result Read_Mode */
#define FCR_MODE_HWECC_RESET 0xF4 /* HW-ECC Reset */
#define FCR_MODE_POWER_ON 0x0C /* Power Supply ON to SSFDC card */
#define FCR_MODE_POWER_OFF 0x08 /* Power Supply OFF to SSFDC card */
#define FCR_MODE_LED_OFF 0x00 /* LED OFF */
#define FCR_MODE_LED_ON 0x04 /* LED ON */
#define FCR_MODE_EJECT_ON 0x68 /* Ejection events active */
#define FCR_MODE_EJECT_OFF 0x08 /* Ejection events ignored */
#define FCR_MODE_LOCK 0x6C /* Lock_Mode. Eject Switch Invalid */
#define FCR_MODE_UNLOCK 0x0C /* UnLock_Mode. Eject Switch is valid */
#define FCR_MODE_CONTROLLER_ID 0x40 /* Controller ID Read */
#define FCR_MODE_STANDBY 0x00 /* SSFDC card Changes Standby State */
#define FCR_MODE_WE 0x80
#define FCR_MODE_ECC1 0x40
#define FCR_MODE_ECC0 0x20
#define FCR_MODE_CE 0x10
#define FCR_MODE_PCNT1 0x08
#define FCR_MODE_PCNT0 0x04
#define FCR_MODE_ALE 0x02
#define FCR_MODE_CLE 0x01
#define FCR_STATUS_BUSY 0x80
/*--------------------------------------------------------------------------*/
struct tmio_nand {
struct mtd_info mtd;
struct nand_chip chip;
struct platform_device *dev;
void __iomem *ccr;
void __iomem *fcr;
unsigned long fcr_base;
unsigned int irq;
/* for tmio_nand_read_byte */
u8 read;
unsigned read_good:1;
};
#define mtd_to_tmio(m) container_of(m, struct tmio_nand, mtd)
#ifdef CONFIG_MTD_CMDLINE_PARTS
static const char *part_probes[] = { "cmdlinepart", NULL };
#endif
/*--------------------------------------------------------------------------*/
static void tmio_nand_hwcontrol(struct mtd_info *mtd, int cmd,
unsigned int ctrl)
{
struct tmio_nand *tmio = mtd_to_tmio(mtd);
struct nand_chip *chip = mtd->priv;
if (ctrl & NAND_CTRL_CHANGE) {
u8 mode;
if (ctrl & NAND_NCE) {
mode = FCR_MODE_DATA;
if (ctrl & NAND_CLE)
mode |= FCR_MODE_CLE;
else
mode &= ~FCR_MODE_CLE;
if (ctrl & NAND_ALE)
mode |= FCR_MODE_ALE;
else
mode &= ~FCR_MODE_ALE;
} else {
mode = FCR_MODE_STANDBY;
}
tmio_iowrite8(mode, tmio->fcr + FCR_MODE);
tmio->read_good = 0;
}
if (cmd != NAND_CMD_NONE)
tmio_iowrite8(cmd, chip->IO_ADDR_W);
}
static int tmio_nand_dev_ready(struct mtd_info *mtd)
{
struct tmio_nand *tmio = mtd_to_tmio(mtd);
return !(tmio_ioread8(tmio->fcr + FCR_STATUS) & FCR_STATUS_BUSY);
}
static irqreturn_t tmio_irq(int irq, void *__tmio)
{
struct tmio_nand *tmio = __tmio;
struct nand_chip *nand_chip = &tmio->chip;
/* disable RDYREQ interrupt */
tmio_iowrite8(0x00, tmio->fcr + FCR_IMR);
if (unlikely(!waitqueue_active(&nand_chip->controller->wq)))
dev_warn(&tmio->dev->dev, "spurious interrupt\n");
wake_up(&nand_chip->controller->wq);
return IRQ_HANDLED;
}
/*
*The TMIO core has a RDYREQ interrupt on the posedge of #SMRB.
*This interrupt is normally disabled, but for long operations like
*erase and write, we enable it to wake us up. The irq handler
*disables the interrupt.
*/
static int
tmio_nand_wait(struct mtd_info *mtd, struct nand_chip *nand_chip)
{
struct tmio_nand *tmio = mtd_to_tmio(mtd);
long timeout;
/* enable RDYREQ interrupt */
tmio_iowrite8(0x0f, tmio->fcr + FCR_ISR);
tmio_iowrite8(0x81, tmio->fcr + FCR_IMR);
timeout = wait_event_timeout(nand_chip->controller->wq,
tmio_nand_dev_ready(mtd),
msecs_to_jiffies(nand_chip->state == FL_ERASING ? 400 : 20));
if (unlikely(!tmio_nand_dev_ready(mtd))) {
tmio_iowrite8(0x00, tmio->fcr + FCR_IMR);
dev_warn(&tmio->dev->dev, "still busy with %s after %d ms\n",
nand_chip->state == FL_ERASING ? "erase" : "program",
nand_chip->state == FL_ERASING ? 400 : 20);
} else if (unlikely(!timeout)) {
tmio_iowrite8(0x00, tmio->fcr + FCR_IMR);
dev_warn(&tmio->dev->dev, "timeout waiting for interrupt\n");
}
nand_chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
return nand_chip->read_byte(mtd);
}
/*
*The TMIO controller combines two 8-bit data bytes into one 16-bit
*word. This function separates them so nand_base.c works as expected,
*especially its NAND_CMD_READID routines.
*
*To prevent stale data from being read, tmio_nand_hwcontrol() clears
*tmio->read_good.
*/
static u_char tmio_nand_read_byte(struct mtd_info *mtd)
{
struct tmio_nand *tmio = mtd_to_tmio(mtd);
unsigned int data;
if (tmio->read_good--)
return tmio->read;
data = tmio_ioread16(tmio->fcr + FCR_DATA);
tmio->read = data >> 8;
return data;
}
/*
*The TMIO controller converts an 8-bit NAND interface to a 16-bit
*bus interface, so all data reads and writes must be 16-bit wide.
*Thus, we implement 16-bit versions of the read, write, and verify
*buffer functions.
*/
static void
tmio_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
{
struct tmio_nand *tmio = mtd_to_tmio(mtd);
tmio_iowrite16_rep(tmio->fcr + FCR_DATA, buf, len >> 1);
}
static void tmio_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
{
struct tmio_nand *tmio = mtd_to_tmio(mtd);
tmio_ioread16_rep(tmio->fcr + FCR_DATA, buf, len >> 1);
}
static int
tmio_nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
{
struct tmio_nand *tmio = mtd_to_tmio(mtd);
u16 *p = (u16 *) buf;
for (len >>= 1; len; len--)
if (*(p++) != tmio_ioread16(tmio->fcr + FCR_DATA))
return -EFAULT;
return 0;
}
static void tmio_nand_enable_hwecc(struct mtd_info *mtd, int mode)
{
struct tmio_nand *tmio = mtd_to_tmio(mtd);
tmio_iowrite8(FCR_MODE_HWECC_RESET, tmio->fcr + FCR_MODE);
tmio_ioread8(tmio->fcr + FCR_DATA); /* dummy read */
tmio_iowrite8(FCR_MODE_HWECC_CALC, tmio->fcr + FCR_MODE);
}
static int tmio_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
u_char *ecc_code)
{
struct tmio_nand *tmio = mtd_to_tmio(mtd);
unsigned int ecc;
tmio_iowrite8(FCR_MODE_HWECC_RESULT, tmio->fcr + FCR_MODE);
ecc = tmio_ioread16(tmio->fcr + FCR_DATA);
ecc_code[1] = ecc; /* 000-255 LP7-0 */
ecc_code[0] = ecc >> 8; /* 000-255 LP15-8 */
ecc = tmio_ioread16(tmio->fcr + FCR_DATA);
ecc_code[2] = ecc; /* 000-255 CP5-0,11b */
ecc_code[4] = ecc >> 8; /* 256-511 LP7-0 */
ecc = tmio_ioread16(tmio->fcr + FCR_DATA);
ecc_code[3] = ecc; /* 256-511 LP15-8 */
ecc_code[5] = ecc >> 8; /* 256-511 CP5-0,11b */
tmio_iowrite8(FCR_MODE_DATA, tmio->fcr + FCR_MODE);
return 0;
}
static int tmio_nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
unsigned char *read_ecc, unsigned char *calc_ecc)
{
int r0, r1;
/* assume ecc.size = 512 and ecc.bytes = 6 */
r0 = __nand_correct_data(buf, read_ecc, calc_ecc, 256);
if (r0 < 0)
return r0;
r1 = __nand_correct_data(buf + 256, read_ecc + 3, calc_ecc + 3, 256);
if (r1 < 0)
return r1;
return r0 + r1;
}
static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio)
{
const struct mfd_cell *cell = mfd_get_cell(dev);
int ret;
if (cell->enable) {
ret = cell->enable(dev);
if (ret)
return ret;
}
/* (4Ch) CLKRUN Enable 1st spcrunc */
tmio_iowrite8(0x81, tmio->ccr + CCR_ICC);
/* (10h)BaseAddress 0x1000 spba.spba2 */
tmio_iowrite16(tmio->fcr_base, tmio->ccr + CCR_BASE);
tmio_iowrite16(tmio->fcr_base >> 16, tmio->ccr + CCR_BASE + 2);
/* (04h)Command Register I/O spcmd */
tmio_iowrite8(0x02, tmio->ccr + CCR_COMMAND);
/* (62h) Power Supply Control ssmpwc */
/* HardPowerOFF - SuspendOFF - PowerSupplyWait_4MS */
tmio_iowrite8(0x02, tmio->ccr + CCR_NFPSC);
/* (63h) Detect Control ssmdtc */
tmio_iowrite8(0x02, tmio->ccr + CCR_NFDC);
/* Interrupt status register clear sintst */
tmio_iowrite8(0x0f, tmio->fcr + FCR_ISR);
/* After power supply, Media are reset smode */
tmio_iowrite8(FCR_MODE_POWER_ON, tmio->fcr + FCR_MODE);
tmio_iowrite8(FCR_MODE_COMMAND, tmio->fcr + FCR_MODE);
tmio_iowrite8(NAND_CMD_RESET, tmio->fcr + FCR_DATA);
/* Standby Mode smode */
tmio_iowrite8(FCR_MODE_STANDBY, tmio->fcr + FCR_MODE);
mdelay(5);
return 0;
}
static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio)
{
const struct mfd_cell *cell = mfd_get_cell(dev);
tmio_iowrite8(FCR_MODE_POWER_OFF, tmio->fcr + FCR_MODE);
if (cell->disable)
cell->disable(dev);
}
static int tmio_probe(struct platform_device *dev)
{
struct tmio_nand_data *data = dev->dev.platform_data;
struct resource *fcr = platform_get_resource(dev,
IORESOURCE_MEM, 0);
struct resource *ccr = platform_get_resource(dev,
IORESOURCE_MEM, 1);
int irq = platform_get_irq(dev, 0);
struct tmio_nand *tmio;
struct mtd_info *mtd;
struct nand_chip *nand_chip;
struct mtd_partition *parts;
int nbparts = 0;
int retval;
if (data == NULL)
dev_warn(&dev->dev, "NULL platform data!\n");
tmio = kzalloc(sizeof *tmio, GFP_KERNEL);
if (!tmio) {
retval = -ENOMEM;
goto err_kzalloc;
}
tmio->dev = dev;
platform_set_drvdata(dev, tmio);
mtd = &tmio->mtd;
nand_chip = &tmio->chip;
mtd->priv = nand_chip;
mtd->name = "tmio-nand";
tmio->ccr = ioremap(ccr->start, resource_size(ccr));
if (!tmio->ccr) {
retval = -EIO;
goto err_iomap_ccr;
}
tmio->fcr_base = fcr->start & 0xfffff;
tmio->fcr = ioremap(fcr->start, resource_size(fcr));
if (!tmio->fcr) {
retval = -EIO;
goto err_iomap_fcr;
}
retval = tmio_hw_init(dev, tmio);
if (retval)
goto err_hwinit;
/* Set address of NAND IO lines */
nand_chip->IO_ADDR_R = tmio->fcr;
nand_chip->IO_ADDR_W = tmio->fcr;
/* Set address of hardware control function */
nand_chip->cmd_ctrl = tmio_nand_hwcontrol;
nand_chip->dev_ready = tmio_nand_dev_ready;
nand_chip->read_byte = tmio_nand_read_byte;
nand_chip->write_buf = tmio_nand_write_buf;
nand_chip->read_buf = tmio_nand_read_buf;
nand_chip->verify_buf = tmio_nand_verify_buf;
/* set eccmode using hardware ECC */
nand_chip->ecc.mode = NAND_ECC_HW;
nand_chip->ecc.size = 512;
nand_chip->ecc.bytes = 6;
nand_chip->ecc.hwctl = tmio_nand_enable_hwecc;
nand_chip->ecc.calculate = tmio_nand_calculate_ecc;
nand_chip->ecc.correct = tmio_nand_correct_data;
if (data)
nand_chip->badblock_pattern = data->badblock_pattern;
/* 15 us command delay time */
nand_chip->chip_delay = 15;
retval = request_irq(irq, &tmio_irq,
IRQF_DISABLED, dev_name(&dev->dev), tmio);
if (retval) {
dev_err(&dev->dev, "request_irq error %d\n", retval);
goto err_irq;
}
tmio->irq = irq;
nand_chip->waitfunc = tmio_nand_wait;
/* Scan to find existence of the device */
if (nand_scan(mtd, 1)) {
retval = -ENODEV;
goto err_scan;
}
/* Register the partitions */
#ifdef CONFIG_MTD_CMDLINE_PARTS
nbparts = parse_mtd_partitions(mtd, part_probes, &parts, 0);
#endif
if (nbparts <= 0 && data) {
parts = data->partition;
nbparts = data->num_partitions;
}
retval = mtd_device_register(mtd, parts, nbparts);
if (!retval)
return retval;
nand_release(mtd);
err_scan:
if (tmio->irq)
free_irq(tmio->irq, tmio);
err_irq:
tmio_hw_stop(dev, tmio);
err_hwinit:
iounmap(tmio->fcr);
err_iomap_fcr:
iounmap(tmio->ccr);
err_iomap_ccr:
kfree(tmio);
err_kzalloc:
return retval;
}
static int tmio_remove(struct platform_device *dev)
{
struct tmio_nand *tmio = platform_get_drvdata(dev);
nand_release(&tmio->mtd);
if (tmio->irq)
free_irq(tmio->irq, tmio);
tmio_hw_stop(dev, tmio);
iounmap(tmio->fcr);
iounmap(tmio->ccr);
kfree(tmio);
return 0;
}
#ifdef CONFIG_PM
static int tmio_suspend(struct platform_device *dev, pm_message_t state)
{
const struct mfd_cell *cell = mfd_get_cell(dev);
if (cell->suspend)
cell->suspend(dev);
tmio_hw_stop(dev, platform_get_drvdata(dev));
return 0;
}
static int tmio_resume(struct platform_device *dev)
{
const struct mfd_cell *cell = mfd_get_cell(dev);
/* FIXME - is this required or merely another attack of the broken
* SHARP platform? Looks suspicious.
*/
tmio_hw_init(dev, platform_get_drvdata(dev));
if (cell->resume)
cell->resume(dev);
return 0;
}
#else
#define tmio_suspend NULL
#define tmio_resume NULL
#endif
static struct platform_driver tmio_driver = {
.driver.name = "tmio-nand",
.driver.owner = THIS_MODULE,
.probe = tmio_probe,
.remove = tmio_remove,
.suspend = tmio_suspend,
.resume = tmio_resume,
};
static int __init tmio_init(void)
{
return platform_driver_register(&tmio_driver);
}
static void __exit tmio_exit(void)
{
platform_driver_unregister(&tmio_driver);
}
module_init(tmio_init);
module_exit(tmio_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Ian Molton, Dirk Opfer, Chris Humbert, Dmitry Baryshkov");
MODULE_DESCRIPTION("NAND flash driver on Toshiba Mobile IO controller");
MODULE_ALIAS("platform:tmio-nand");
| gpl-2.0 |
championswimmer/android_kernel_sony_seagull | arch/arm/mach-msm/board-msm8x60-vcm.c | 3295 | 3442 | /* Copyright (c) 2010, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/bootmem.h>
#include <linux/vcm.h>
#include <linux/vcm_alloc.h>
#define MSM_SMI_BASE 0x38000000
#define MSM_SMI_SIZE 0x04000000
#define SMI_16M 0
#define SMI_1M 1
#define SMI_64K 2
#define SMI_4K 3
#define EBI_16M 4
#define EBI_1M 5
#define EBI_64K 6
#define EBI_4K 7
static void free_ebi_pools(void);
static struct physmem_region memory[] = {
{ /* SMI 16M */
.addr = MSM_SMI_BASE,
.size = SZ_16M,
.chunk_size = SZ_16M
},
{ /* SMI 1M */
.addr = MSM_SMI_BASE + SZ_16M,
.size = SZ_8M,
.chunk_size = SZ_1M
},
{ /* SMI 64K */
.addr = MSM_SMI_BASE + SZ_16M + SZ_8M,
.size = SZ_4M,
.chunk_size = SZ_64K
},
{ /* SMI 4K */
.addr = MSM_SMI_BASE + SZ_16M + SZ_8M + SZ_4M,
.size = SZ_4M,
.chunk_size = SZ_4K
},
{ /* EBI 16M */
.addr = 0,
.size = SZ_16M,
.chunk_size = SZ_16M
},
{ /* EBI 1M */
.addr = 0,
.size = SZ_8M,
.chunk_size = SZ_1M
},
{ /* EBI 64K */
.addr = 0,
.size = SZ_4M,
.chunk_size = SZ_64K
},
{ /* EBI 4K */
.addr = 0,
.size = SZ_4M,
.chunk_size = SZ_4K
}
};
/* The pool priority MUST be in descending order of size */
static struct vcm_memtype_map mt_map[] __initdata = {
{
/* MEMTYPE_0 */
.pool_id = {SMI_16M, SMI_1M, SMI_64K, SMI_4K},
.num_pools = 4,
},
{
/* MEMTYPE_1 */
.pool_id = {SMI_16M, SMI_1M, SMI_64K, EBI_4K},
.num_pools = 4,
},
{ /* MEMTYPE_2 */
.pool_id = {EBI_16M, EBI_1M, EBI_64K, EBI_4K},
.num_pools = 4,
},
{
/* MEMTYPE_3 */
.pool_id = {SMI_16M, SMI_1M, EBI_1M, SMI_64K, EBI_64K, EBI_4K},
.num_pools = 6,
}
};
static int __init msm8x60_vcm_init(void)
{
int ret, i;
void *ebi_chunk;
for (i = 0; i < ARRAY_SIZE(memory); i++) {
if (memory[i].addr == 0) {
ebi_chunk = __alloc_bootmem(memory[i].size,
memory[i].size, 0);
if (!ebi_chunk) {
pr_err("Could not allocate VCM-managed physical"
" memory\n");
ret = -ENOMEM;
goto fail;
}
memory[i].addr = __pa(ebi_chunk);
}
}
ret = vcm_sys_init(memory, ARRAY_SIZE(memory),
mt_map, ARRAY_SIZE(mt_map),
(void *)MSM_SMI_BASE + MSM_SMI_SIZE - SZ_8M, SZ_8M);
if (ret != 0) {
pr_err("vcm_sys_init() ret %i\n", ret);
goto fail;
}
return 0;
fail:
free_ebi_pools();
return ret;
};
static void free_ebi_pools(void)
{
int i;
phys_addr_t r;
for (i = 0; i < ARRAY_SIZE(memory); i++) {
r = memory[i].addr;
if (r > MSM_SMI_BASE + MSM_SMI_SIZE)
free_bootmem((unsigned long)__va(r), memory[i].size);
}
}
/* Useful for testing, and if VCM is ever unloaded */
static void __exit msm8x60_vcm_exit(void)
{
int ret;
ret = vcm_sys_destroy();
if (ret != 0) {
pr_err("vcm_sys_destroy() ret %i\n", ret);
goto fail;
}
free_ebi_pools();
fail:
return;
}
subsys_initcall(msm8x60_vcm_init);
module_exit(msm8x60_vcm_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");
| gpl-2.0 |
xjljian/huawei-kernel-3.4 | fs/reiserfs/procfs.c | 4831 | 14971 | /* -*- linux-c -*- */
/* fs/reiserfs/procfs.c */
/*
* Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
*/
/* proc info support a la one created by Sizif@Botik.RU for PGC */
#include <linux/module.h>
#include <linux/time.h>
#include <linux/seq_file.h>
#include <asm/uaccess.h>
#include "reiserfs.h"
#include <linux/init.h>
#include <linux/proc_fs.h>
/*
* LOCKING:
*
* We rely on new Alexander Viro's super-block locking.
*
*/
static int show_version(struct seq_file *m, struct super_block *sb)
{
char *format;
if (REISERFS_SB(sb)->s_properties & (1 << REISERFS_3_6)) {
format = "3.6";
} else if (REISERFS_SB(sb)->s_properties & (1 << REISERFS_3_5)) {
format = "3.5";
} else {
format = "unknown";
}
seq_printf(m, "%s format\twith checks %s\n", format,
#if defined( CONFIG_REISERFS_CHECK )
"on"
#else
"off"
#endif
);
return 0;
}
#define SF( x ) ( r -> x )
#define SFP( x ) SF( s_proc_info_data.x )
#define SFPL( x ) SFP( x[ level ] )
#define SFPF( x ) SFP( scan_bitmap.x )
#define SFPJ( x ) SFP( journal.x )
#define D2C( x ) le16_to_cpu( x )
#define D4C( x ) le32_to_cpu( x )
#define DF( x ) D2C( rs -> s_v1.x )
#define DFL( x ) D4C( rs -> s_v1.x )
#define objectid_map( s, rs ) (old_format_only (s) ? \
(__le32 *)((struct reiserfs_super_block_v1 *)rs + 1) : \
(__le32 *)(rs + 1))
#define MAP( i ) D4C( objectid_map( sb, rs )[ i ] )
#define DJF( x ) le32_to_cpu( rs -> x )
#define DJV( x ) le32_to_cpu( s_v1 -> x )
#define DJP( x ) le32_to_cpu( jp -> x )
#define JF( x ) ( r -> s_journal -> x )
static int show_super(struct seq_file *m, struct super_block *sb)
{
struct reiserfs_sb_info *r = REISERFS_SB(sb);
seq_printf(m, "state: \t%s\n"
"mount options: \t%s%s%s%s%s%s%s%s%s%s%s\n"
"gen. counter: \t%i\n"
"s_disk_reads: \t%i\n"
"s_disk_writes: \t%i\n"
"s_fix_nodes: \t%i\n"
"s_do_balance: \t%i\n"
"s_unneeded_left_neighbor: \t%i\n"
"s_good_search_by_key_reada: \t%i\n"
"s_bmaps: \t%i\n"
"s_bmaps_without_search: \t%i\n"
"s_direct2indirect: \t%i\n"
"s_indirect2direct: \t%i\n"
"\n"
"max_hash_collisions: \t%i\n"
"breads: \t%lu\n"
"bread_misses: \t%lu\n"
"search_by_key: \t%lu\n"
"search_by_key_fs_changed: \t%lu\n"
"search_by_key_restarted: \t%lu\n"
"insert_item_restarted: \t%lu\n"
"paste_into_item_restarted: \t%lu\n"
"cut_from_item_restarted: \t%lu\n"
"delete_solid_item_restarted: \t%lu\n"
"delete_item_restarted: \t%lu\n"
"leaked_oid: \t%lu\n"
"leaves_removable: \t%lu\n",
SF(s_mount_state) == REISERFS_VALID_FS ?
"REISERFS_VALID_FS" : "REISERFS_ERROR_FS",
reiserfs_r5_hash(sb) ? "FORCE_R5 " : "",
reiserfs_rupasov_hash(sb) ? "FORCE_RUPASOV " : "",
reiserfs_tea_hash(sb) ? "FORCE_TEA " : "",
reiserfs_hash_detect(sb) ? "DETECT_HASH " : "",
reiserfs_no_border(sb) ? "NO_BORDER " : "BORDER ",
reiserfs_no_unhashed_relocation(sb) ?
"NO_UNHASHED_RELOCATION " : "",
reiserfs_hashed_relocation(sb) ? "UNHASHED_RELOCATION " : "",
reiserfs_test4(sb) ? "TEST4 " : "",
have_large_tails(sb) ? "TAILS " : have_small_tails(sb) ?
"SMALL_TAILS " : "NO_TAILS ",
replay_only(sb) ? "REPLAY_ONLY " : "",
convert_reiserfs(sb) ? "CONV " : "",
atomic_read(&r->s_generation_counter),
SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
SF(s_do_balance), SF(s_unneeded_left_neighbor),
SF(s_good_search_by_key_reada), SF(s_bmaps),
SF(s_bmaps_without_search), SF(s_direct2indirect),
SF(s_indirect2direct), SFP(max_hash_collisions), SFP(breads),
SFP(bread_miss), SFP(search_by_key),
SFP(search_by_key_fs_changed), SFP(search_by_key_restarted),
SFP(insert_item_restarted), SFP(paste_into_item_restarted),
SFP(cut_from_item_restarted),
SFP(delete_solid_item_restarted), SFP(delete_item_restarted),
SFP(leaked_oid), SFP(leaves_removable));
return 0;
}
static int show_per_level(struct seq_file *m, struct super_block *sb)
{
struct reiserfs_sb_info *r = REISERFS_SB(sb);
int level;
seq_printf(m, "level\t"
" balances"
" [sbk: reads"
" fs_changed"
" restarted]"
" free space"
" items"
" can_remove"
" lnum"
" rnum"
" lbytes"
" rbytes"
" get_neig"
" get_neig_res" " need_l_neig" " need_r_neig" "\n");
for (level = 0; level < MAX_HEIGHT; ++level) {
seq_printf(m, "%i\t"
" %12lu"
" %12lu"
" %12lu"
" %12lu"
" %12lu"
" %12lu"
" %12lu"
" %12li"
" %12li"
" %12li"
" %12li"
" %12lu"
" %12lu"
" %12lu"
" %12lu"
"\n",
level,
SFPL(balance_at),
SFPL(sbk_read_at),
SFPL(sbk_fs_changed),
SFPL(sbk_restarted),
SFPL(free_at),
SFPL(items_at),
SFPL(can_node_be_removed),
SFPL(lnum),
SFPL(rnum),
SFPL(lbytes),
SFPL(rbytes),
SFPL(get_neighbors),
SFPL(get_neighbors_restart),
SFPL(need_l_neighbor), SFPL(need_r_neighbor)
);
}
return 0;
}
static int show_bitmap(struct seq_file *m, struct super_block *sb)
{
struct reiserfs_sb_info *r = REISERFS_SB(sb);
seq_printf(m, "free_block: %lu\n"
" scan_bitmap:"
" wait"
" bmap"
" retry"
" stolen"
" journal_hint"
"journal_nohint"
"\n"
" %14lu"
" %14lu"
" %14lu"
" %14lu"
" %14lu"
" %14lu"
" %14lu"
"\n",
SFP(free_block),
SFPF(call),
SFPF(wait),
SFPF(bmap),
SFPF(retry),
SFPF(stolen),
SFPF(in_journal_hint), SFPF(in_journal_nohint));
return 0;
}
static int show_on_disk_super(struct seq_file *m, struct super_block *sb)
{
struct reiserfs_sb_info *sb_info = REISERFS_SB(sb);
struct reiserfs_super_block *rs = sb_info->s_rs;
int hash_code = DFL(s_hash_function_code);
__u32 flags = DJF(s_flags);
seq_printf(m, "block_count: \t%i\n"
"free_blocks: \t%i\n"
"root_block: \t%i\n"
"blocksize: \t%i\n"
"oid_maxsize: \t%i\n"
"oid_cursize: \t%i\n"
"umount_state: \t%i\n"
"magic: \t%10.10s\n"
"fs_state: \t%i\n"
"hash: \t%s\n"
"tree_height: \t%i\n"
"bmap_nr: \t%i\n"
"version: \t%i\n"
"flags: \t%x[%s]\n"
"reserved_for_journal: \t%i\n",
DFL(s_block_count),
DFL(s_free_blocks),
DFL(s_root_block),
DF(s_blocksize),
DF(s_oid_maxsize),
DF(s_oid_cursize),
DF(s_umount_state),
rs->s_v1.s_magic,
DF(s_fs_state),
hash_code == TEA_HASH ? "tea" :
(hash_code == YURA_HASH) ? "rupasov" :
(hash_code == R5_HASH) ? "r5" :
(hash_code == UNSET_HASH) ? "unset" : "unknown",
DF(s_tree_height),
DF(s_bmap_nr),
DF(s_version), flags, (flags & reiserfs_attrs_cleared)
? "attrs_cleared" : "", DF(s_reserved_for_journal));
return 0;
}
static int show_oidmap(struct seq_file *m, struct super_block *sb)
{
struct reiserfs_sb_info *sb_info = REISERFS_SB(sb);
struct reiserfs_super_block *rs = sb_info->s_rs;
unsigned int mapsize = le16_to_cpu(rs->s_v1.s_oid_cursize);
unsigned long total_used = 0;
int i;
for (i = 0; i < mapsize; ++i) {
__u32 right;
right = (i == mapsize - 1) ? MAX_KEY_OBJECTID : MAP(i + 1);
seq_printf(m, "%s: [ %x .. %x )\n",
(i & 1) ? "free" : "used", MAP(i), right);
if (!(i & 1)) {
total_used += right - MAP(i);
}
}
#if defined( REISERFS_USE_OIDMAPF )
if (sb_info->oidmap.use_file && (sb_info->oidmap.mapf != NULL)) {
loff_t size = sb_info->oidmap.mapf->f_path.dentry->d_inode->i_size;
total_used += size / sizeof(reiserfs_oidinterval_d_t);
}
#endif
seq_printf(m, "total: \t%i [%i/%i] used: %lu [exact]\n",
mapsize,
mapsize, le16_to_cpu(rs->s_v1.s_oid_maxsize), total_used);
return 0;
}
static int show_journal(struct seq_file *m, struct super_block *sb)
{
struct reiserfs_sb_info *r = REISERFS_SB(sb);
struct reiserfs_super_block *rs = r->s_rs;
struct journal_params *jp = &rs->s_v1.s_journal;
char b[BDEVNAME_SIZE];
seq_printf(m, /* on-disk fields */
"jp_journal_1st_block: \t%i\n"
"jp_journal_dev: \t%s[%x]\n"
"jp_journal_size: \t%i\n"
"jp_journal_trans_max: \t%i\n"
"jp_journal_magic: \t%i\n"
"jp_journal_max_batch: \t%i\n"
"jp_journal_max_commit_age: \t%i\n"
"jp_journal_max_trans_age: \t%i\n"
/* incore fields */
"j_1st_reserved_block: \t%i\n"
"j_state: \t%li\n"
"j_trans_id: \t%u\n"
"j_mount_id: \t%lu\n"
"j_start: \t%lu\n"
"j_len: \t%lu\n"
"j_len_alloc: \t%lu\n"
"j_wcount: \t%i\n"
"j_bcount: \t%lu\n"
"j_first_unflushed_offset: \t%lu\n"
"j_last_flush_trans_id: \t%u\n"
"j_trans_start_time: \t%li\n"
"j_list_bitmap_index: \t%i\n"
"j_must_wait: \t%i\n"
"j_next_full_flush: \t%i\n"
"j_next_async_flush: \t%i\n"
"j_cnode_used: \t%i\n" "j_cnode_free: \t%i\n" "\n"
/* reiserfs_proc_info_data_t.journal fields */
"in_journal: \t%12lu\n"
"in_journal_bitmap: \t%12lu\n"
"in_journal_reusable: \t%12lu\n"
"lock_journal: \t%12lu\n"
"lock_journal_wait: \t%12lu\n"
"journal_begin: \t%12lu\n"
"journal_relock_writers: \t%12lu\n"
"journal_relock_wcount: \t%12lu\n"
"mark_dirty: \t%12lu\n"
"mark_dirty_already: \t%12lu\n"
"mark_dirty_notjournal: \t%12lu\n"
"restore_prepared: \t%12lu\n"
"prepare: \t%12lu\n"
"prepare_retry: \t%12lu\n",
DJP(jp_journal_1st_block),
bdevname(SB_JOURNAL(sb)->j_dev_bd, b),
DJP(jp_journal_dev),
DJP(jp_journal_size),
DJP(jp_journal_trans_max),
DJP(jp_journal_magic),
DJP(jp_journal_max_batch),
SB_JOURNAL(sb)->j_max_commit_age,
DJP(jp_journal_max_trans_age),
JF(j_1st_reserved_block),
JF(j_state),
JF(j_trans_id),
JF(j_mount_id),
JF(j_start),
JF(j_len),
JF(j_len_alloc),
atomic_read(&r->s_journal->j_wcount),
JF(j_bcount),
JF(j_first_unflushed_offset),
JF(j_last_flush_trans_id),
JF(j_trans_start_time),
JF(j_list_bitmap_index),
JF(j_must_wait),
JF(j_next_full_flush),
JF(j_next_async_flush),
JF(j_cnode_used),
JF(j_cnode_free),
SFPJ(in_journal),
SFPJ(in_journal_bitmap),
SFPJ(in_journal_reusable),
SFPJ(lock_journal),
SFPJ(lock_journal_wait),
SFPJ(journal_being),
SFPJ(journal_relock_writers),
SFPJ(journal_relock_wcount),
SFPJ(mark_dirty),
SFPJ(mark_dirty_already),
SFPJ(mark_dirty_notjournal),
SFPJ(restore_prepared), SFPJ(prepare), SFPJ(prepare_retry)
);
return 0;
}
/* iterator */
static int test_sb(struct super_block *sb, void *data)
{
return data == sb;
}
static int set_sb(struct super_block *sb, void *data)
{
return -ENOENT;
}
static void *r_start(struct seq_file *m, loff_t * pos)
{
struct proc_dir_entry *de = m->private;
struct super_block *s = de->parent->data;
loff_t l = *pos;
if (l)
return NULL;
if (IS_ERR(sget(&reiserfs_fs_type, test_sb, set_sb, s)))
return NULL;
up_write(&s->s_umount);
return s;
}
static void *r_next(struct seq_file *m, void *v, loff_t * pos)
{
++*pos;
if (v)
deactivate_super(v);
return NULL;
}
static void r_stop(struct seq_file *m, void *v)
{
if (v)
deactivate_super(v);
}
static int r_show(struct seq_file *m, void *v)
{
struct proc_dir_entry *de = m->private;
int (*show) (struct seq_file *, struct super_block *) = de->data;
return show(m, v);
}
static const struct seq_operations r_ops = {
.start = r_start,
.next = r_next,
.stop = r_stop,
.show = r_show,
};
static int r_open(struct inode *inode, struct file *file)
{
int ret = seq_open(file, &r_ops);
if (!ret) {
struct seq_file *m = file->private_data;
m->private = PDE(inode);
}
return ret;
}
static const struct file_operations r_file_operations = {
.open = r_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
.owner = THIS_MODULE,
};
static struct proc_dir_entry *proc_info_root = NULL;
static const char proc_info_root_name[] = "fs/reiserfs";
static void add_file(struct super_block *sb, char *name,
int (*func) (struct seq_file *, struct super_block *))
{
proc_create_data(name, 0, REISERFS_SB(sb)->procdir,
&r_file_operations, func);
}
int reiserfs_proc_info_init(struct super_block *sb)
{
char b[BDEVNAME_SIZE];
char *s;
/* Some block devices use /'s */
strlcpy(b, reiserfs_bdevname(sb), BDEVNAME_SIZE);
s = strchr(b, '/');
if (s)
*s = '!';
spin_lock_init(&__PINFO(sb).lock);
REISERFS_SB(sb)->procdir = proc_mkdir(b, proc_info_root);
if (REISERFS_SB(sb)->procdir) {
REISERFS_SB(sb)->procdir->data = sb;
add_file(sb, "version", show_version);
add_file(sb, "super", show_super);
add_file(sb, "per-level", show_per_level);
add_file(sb, "bitmap", show_bitmap);
add_file(sb, "on-disk-super", show_on_disk_super);
add_file(sb, "oidmap", show_oidmap);
add_file(sb, "journal", show_journal);
return 0;
}
reiserfs_warning(sb, "cannot create /proc/%s/%s",
proc_info_root_name, b);
return 1;
}
int reiserfs_proc_info_done(struct super_block *sb)
{
struct proc_dir_entry *de = REISERFS_SB(sb)->procdir;
char b[BDEVNAME_SIZE];
char *s;
/* Some block devices use /'s */
strlcpy(b, reiserfs_bdevname(sb), BDEVNAME_SIZE);
s = strchr(b, '/');
if (s)
*s = '!';
if (de) {
remove_proc_entry("journal", de);
remove_proc_entry("oidmap", de);
remove_proc_entry("on-disk-super", de);
remove_proc_entry("bitmap", de);
remove_proc_entry("per-level", de);
remove_proc_entry("super", de);
remove_proc_entry("version", de);
}
spin_lock(&__PINFO(sb).lock);
__PINFO(sb).exiting = 1;
spin_unlock(&__PINFO(sb).lock);
if (proc_info_root) {
remove_proc_entry(b, proc_info_root);
REISERFS_SB(sb)->procdir = NULL;
}
return 0;
}
int reiserfs_proc_info_global_init(void)
{
if (proc_info_root == NULL) {
proc_info_root = proc_mkdir(proc_info_root_name, NULL);
if (!proc_info_root) {
reiserfs_warning(NULL, "cannot create /proc/%s",
proc_info_root_name);
return 1;
}
}
return 0;
}
int reiserfs_proc_info_global_done(void)
{
if (proc_info_root != NULL) {
proc_info_root = NULL;
remove_proc_entry(proc_info_root_name, NULL);
}
return 0;
}
/*
* Revision 1.1.8.2 2001/07/15 17:08:42 god
* . use get_super() in procfs.c
* . remove remove_save_link() from reiserfs_do_truncate()
*
* I accept terms and conditions stated in the Legal Agreement
* (available at http://www.namesys.com/legalese.html)
*
* Revision 1.1.8.1 2001/07/11 16:48:50 god
* proc info support
*
* I accept terms and conditions stated in the Legal Agreement
* (available at http://www.namesys.com/legalese.html)
*
*/
/*
* Make Linus happy.
* Local variables:
* c-indentation-style: "K&R"
* mode-name: "LC"
* c-basic-offset: 8
* tab-width: 8
* End:
*/
| gpl-2.0 |
ShinySide/HispAsian_Lollipop_J4 | drivers/isdn/divert/divert_init.c | 5087 | 2334 | /* $Id divert_init.c,v 1.5.6.2 2001/01/24 22:18:17 kai Exp $
*
* Module init for DSS1 diversion services for i4l.
*
* Copyright 1999 by Werner Cornelius (werner@isdn4linux.de)
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include "isdn_divert.h"
MODULE_DESCRIPTION("ISDN4Linux: Call diversion support");
MODULE_AUTHOR("Werner Cornelius");
MODULE_LICENSE("GPL");
/****************************************/
/* structure containing interface to hl */
/****************************************/
isdn_divert_if divert_if =
{ DIVERT_IF_MAGIC, /* magic value */
DIVERT_CMD_REG, /* register cmd */
ll_callback, /* callback routine from ll */
NULL, /* command still not specified */
NULL, /* drv_to_name */
NULL, /* name_to_drv */
};
/*************************/
/* Module interface code */
/* no cmd line parms */
/*************************/
static int __init divert_init(void)
{ int i;
if (divert_dev_init())
{ printk(KERN_WARNING "dss1_divert: cannot install device, not loaded\n");
return (-EIO);
}
if ((i = DIVERT_REG_NAME(&divert_if)) != DIVERT_NO_ERR)
{ divert_dev_deinit();
printk(KERN_WARNING "dss1_divert: error %d registering module, not loaded\n", i);
return (-EIO);
}
printk(KERN_INFO "dss1_divert module successfully installed\n");
return (0);
}
/**********************/
/* Module deinit code */
/**********************/
static void __exit divert_exit(void)
{
unsigned long flags;
int i;
spin_lock_irqsave(&divert_lock, flags);
divert_if.cmd = DIVERT_CMD_REL; /* release */
if ((i = DIVERT_REG_NAME(&divert_if)) != DIVERT_NO_ERR)
{ printk(KERN_WARNING "dss1_divert: error %d releasing module\n", i);
spin_unlock_irqrestore(&divert_lock, flags);
return;
}
if (divert_dev_deinit())
{ printk(KERN_WARNING "dss1_divert: device busy, remove cancelled\n");
spin_unlock_irqrestore(&divert_lock, flags);
return;
}
spin_unlock_irqrestore(&divert_lock, flags);
deleterule(-1); /* delete all rules and free mem */
deleteprocs();
printk(KERN_INFO "dss1_divert module successfully removed \n");
}
module_init(divert_init);
module_exit(divert_exit);
| gpl-2.0 |
AntaresOne/android_kernel_samsung_jf | kernel/uid16.c | 7647 | 5403 | /*
* Wrapper functions for 16bit uid back compatibility. All nicely tied
* together in the faint hope we can take the out in five years time.
*/
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/prctl.h>
#include <linux/capability.h>
#include <linux/init.h>
#include <linux/highuid.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <asm/uaccess.h>
SYSCALL_DEFINE3(chown16, const char __user *, filename, old_uid_t, user, old_gid_t, group)
{
long ret = sys_chown(filename, low2highuid(user), low2highgid(group));
/* avoid REGPARM breakage on x86: */
asmlinkage_protect(3, ret, filename, user, group);
return ret;
}
SYSCALL_DEFINE3(lchown16, const char __user *, filename, old_uid_t, user, old_gid_t, group)
{
long ret = sys_lchown(filename, low2highuid(user), low2highgid(group));
/* avoid REGPARM breakage on x86: */
asmlinkage_protect(3, ret, filename, user, group);
return ret;
}
SYSCALL_DEFINE3(fchown16, unsigned int, fd, old_uid_t, user, old_gid_t, group)
{
long ret = sys_fchown(fd, low2highuid(user), low2highgid(group));
/* avoid REGPARM breakage on x86: */
asmlinkage_protect(3, ret, fd, user, group);
return ret;
}
SYSCALL_DEFINE2(setregid16, old_gid_t, rgid, old_gid_t, egid)
{
long ret = sys_setregid(low2highgid(rgid), low2highgid(egid));
/* avoid REGPARM breakage on x86: */
asmlinkage_protect(2, ret, rgid, egid);
return ret;
}
SYSCALL_DEFINE1(setgid16, old_gid_t, gid)
{
long ret = sys_setgid(low2highgid(gid));
/* avoid REGPARM breakage on x86: */
asmlinkage_protect(1, ret, gid);
return ret;
}
SYSCALL_DEFINE2(setreuid16, old_uid_t, ruid, old_uid_t, euid)
{
long ret = sys_setreuid(low2highuid(ruid), low2highuid(euid));
/* avoid REGPARM breakage on x86: */
asmlinkage_protect(2, ret, ruid, euid);
return ret;
}
SYSCALL_DEFINE1(setuid16, old_uid_t, uid)
{
long ret = sys_setuid(low2highuid(uid));
/* avoid REGPARM breakage on x86: */
asmlinkage_protect(1, ret, uid);
return ret;
}
SYSCALL_DEFINE3(setresuid16, old_uid_t, ruid, old_uid_t, euid, old_uid_t, suid)
{
long ret = sys_setresuid(low2highuid(ruid), low2highuid(euid),
low2highuid(suid));
/* avoid REGPARM breakage on x86: */
asmlinkage_protect(3, ret, ruid, euid, suid);
return ret;
}
SYSCALL_DEFINE3(getresuid16, old_uid_t __user *, ruid, old_uid_t __user *, euid, old_uid_t __user *, suid)
{
const struct cred *cred = current_cred();
int retval;
if (!(retval = put_user(high2lowuid(cred->uid), ruid)) &&
!(retval = put_user(high2lowuid(cred->euid), euid)))
retval = put_user(high2lowuid(cred->suid), suid);
return retval;
}
SYSCALL_DEFINE3(setresgid16, old_gid_t, rgid, old_gid_t, egid, old_gid_t, sgid)
{
long ret = sys_setresgid(low2highgid(rgid), low2highgid(egid),
low2highgid(sgid));
/* avoid REGPARM breakage on x86: */
asmlinkage_protect(3, ret, rgid, egid, sgid);
return ret;
}
SYSCALL_DEFINE3(getresgid16, old_gid_t __user *, rgid, old_gid_t __user *, egid, old_gid_t __user *, sgid)
{
const struct cred *cred = current_cred();
int retval;
if (!(retval = put_user(high2lowgid(cred->gid), rgid)) &&
!(retval = put_user(high2lowgid(cred->egid), egid)))
retval = put_user(high2lowgid(cred->sgid), sgid);
return retval;
}
SYSCALL_DEFINE1(setfsuid16, old_uid_t, uid)
{
long ret = sys_setfsuid(low2highuid(uid));
/* avoid REGPARM breakage on x86: */
asmlinkage_protect(1, ret, uid);
return ret;
}
SYSCALL_DEFINE1(setfsgid16, old_gid_t, gid)
{
long ret = sys_setfsgid(low2highgid(gid));
/* avoid REGPARM breakage on x86: */
asmlinkage_protect(1, ret, gid);
return ret;
}
static int groups16_to_user(old_gid_t __user *grouplist,
struct group_info *group_info)
{
int i;
old_gid_t group;
for (i = 0; i < group_info->ngroups; i++) {
group = high2lowgid(GROUP_AT(group_info, i));
if (put_user(group, grouplist+i))
return -EFAULT;
}
return 0;
}
static int groups16_from_user(struct group_info *group_info,
old_gid_t __user *grouplist)
{
int i;
old_gid_t group;
for (i = 0; i < group_info->ngroups; i++) {
if (get_user(group, grouplist+i))
return -EFAULT;
GROUP_AT(group_info, i) = low2highgid(group);
}
return 0;
}
SYSCALL_DEFINE2(getgroups16, int, gidsetsize, old_gid_t __user *, grouplist)
{
const struct cred *cred = current_cred();
int i;
if (gidsetsize < 0)
return -EINVAL;
i = cred->group_info->ngroups;
if (gidsetsize) {
if (i > gidsetsize) {
i = -EINVAL;
goto out;
}
if (groups16_to_user(grouplist, cred->group_info)) {
i = -EFAULT;
goto out;
}
}
out:
return i;
}
SYSCALL_DEFINE2(setgroups16, int, gidsetsize, old_gid_t __user *, grouplist)
{
struct group_info *group_info;
int retval;
if (!nsown_capable(CAP_SETGID))
return -EPERM;
if ((unsigned)gidsetsize > NGROUPS_MAX)
return -EINVAL;
group_info = groups_alloc(gidsetsize);
if (!group_info)
return -ENOMEM;
retval = groups16_from_user(group_info, grouplist);
if (retval) {
put_group_info(group_info);
return retval;
}
retval = set_current_groups(group_info);
put_group_info(group_info);
return retval;
}
SYSCALL_DEFINE0(getuid16)
{
return high2lowuid(current_uid());
}
SYSCALL_DEFINE0(geteuid16)
{
return high2lowuid(current_euid());
}
SYSCALL_DEFINE0(getgid16)
{
return high2lowgid(current_gid());
}
SYSCALL_DEFINE0(getegid16)
{
return high2lowgid(current_egid());
}
| gpl-2.0 |
kamarush/sony_yuga_kernel | Documentation/cgroups/cgroup_event_listener.c | 8415 | 2185 | /*
* cgroup_event_listener.c - Simple listener of cgroup events
*
* Copyright (C) Kirill A. Shutemov <kirill@shutemov.name>
*/
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <libgen.h>
#include <limits.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <sys/eventfd.h>
#define USAGE_STR "Usage: cgroup_event_listener <path-to-control-file> <args>\n"
int main(int argc, char **argv)
{
int efd = -1;
int cfd = -1;
int event_control = -1;
char event_control_path[PATH_MAX];
char line[LINE_MAX];
int ret;
if (argc != 3) {
fputs(USAGE_STR, stderr);
return 1;
}
cfd = open(argv[1], O_RDONLY);
if (cfd == -1) {
fprintf(stderr, "Cannot open %s: %s\n", argv[1],
strerror(errno));
goto out;
}
ret = snprintf(event_control_path, PATH_MAX, "%s/cgroup.event_control",
dirname(argv[1]));
if (ret >= PATH_MAX) {
fputs("Path to cgroup.event_control is too long\n", stderr);
goto out;
}
event_control = open(event_control_path, O_WRONLY);
if (event_control == -1) {
fprintf(stderr, "Cannot open %s: %s\n", event_control_path,
strerror(errno));
goto out;
}
efd = eventfd(0, 0);
if (efd == -1) {
perror("eventfd() failed");
goto out;
}
ret = snprintf(line, LINE_MAX, "%d %d %s", efd, cfd, argv[2]);
if (ret >= LINE_MAX) {
fputs("Arguments string is too long\n", stderr);
goto out;
}
ret = write(event_control, line, strlen(line) + 1);
if (ret == -1) {
perror("Cannot write to cgroup.event_control");
goto out;
}
while (1) {
uint64_t result;
ret = read(efd, &result, sizeof(result));
if (ret == -1) {
if (errno == EINTR)
continue;
perror("Cannot read from eventfd");
break;
}
assert(ret == sizeof(result));
ret = access(event_control_path, W_OK);
if ((ret == -1) && (errno == ENOENT)) {
puts("The cgroup seems to have removed.");
ret = 0;
break;
}
if (ret == -1) {
perror("cgroup.event_control "
"is not accessible any more");
break;
}
printf("%s %s: crossed\n", argv[1], argv[2]);
}
out:
if (efd >= 0)
close(efd);
if (event_control >= 0)
close(event_control);
if (cfd >= 0)
close(cfd);
return (ret != 0);
}
| gpl-2.0 |
ribalda/linux-old | arch/mips/dec/kn01-berr.c | 8927 | 5192 | /*
* Bus error event handling code for DECstation/DECsystem 3100
* and 2100 (KN01) systems equipped with parity error detection
* logic.
*
* Copyright (c) 2005 Maciej W. Rozycki
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <asm/inst.h>
#include <asm/irq_regs.h>
#include <asm/mipsregs.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/traps.h>
#include <asm/uaccess.h>
#include <asm/dec/kn01.h>
/* CP0 hazard avoidance. */
#define BARRIER \
__asm__ __volatile__( \
".set push\n\t" \
".set noreorder\n\t" \
"nop\n\t" \
".set pop\n\t")
/*
* Bits 7:0 of the Control Register are write-only -- the
* corresponding bits of the Status Register have a different
* meaning. Hence we use a cache. It speeds up things a bit
* as well.
*
* There is no default value -- it has to be initialized.
*/
u16 cached_kn01_csr;
static DEFINE_RAW_SPINLOCK(kn01_lock);
static inline void dec_kn01_be_ack(void)
{
volatile u16 *csr = (void *)CKSEG1ADDR(KN01_SLOT_BASE + KN01_CSR);
unsigned long flags;
raw_spin_lock_irqsave(&kn01_lock, flags);
*csr = cached_kn01_csr | KN01_CSR_MEMERR; /* Clear bus IRQ. */
iob();
raw_spin_unlock_irqrestore(&kn01_lock, flags);
}
static int dec_kn01_be_backend(struct pt_regs *regs, int is_fixup, int invoker)
{
volatile u32 *kn01_erraddr = (void *)CKSEG1ADDR(KN01_SLOT_BASE +
KN01_ERRADDR);
static const char excstr[] = "exception";
static const char intstr[] = "interrupt";
static const char cpustr[] = "CPU";
static const char mreadstr[] = "memory read";
static const char readstr[] = "read";
static const char writestr[] = "write";
static const char timestr[] = "timeout";
static const char paritystr[] = "parity error";
int data = regs->cp0_cause & 4;
unsigned int __user *pc = (unsigned int __user *)regs->cp0_epc +
((regs->cp0_cause & CAUSEF_BD) != 0);
union mips_instruction insn;
unsigned long entrylo, offset;
long asid, entryhi, vaddr;
const char *kind, *agent, *cycle, *event;
unsigned long address;
u32 erraddr = *kn01_erraddr;
int action = MIPS_BE_FATAL;
/* Ack ASAP, so that any subsequent errors get caught. */
dec_kn01_be_ack();
kind = invoker ? intstr : excstr;
agent = cpustr;
if (invoker)
address = erraddr;
else {
/* Bloody hardware doesn't record the address for reads... */
if (data) {
/* This never faults. */
__get_user(insn.word, pc);
vaddr = regs->regs[insn.i_format.rs] +
insn.i_format.simmediate;
} else
vaddr = (long)pc;
if (KSEGX(vaddr) == CKSEG0 || KSEGX(vaddr) == CKSEG1)
address = CPHYSADDR(vaddr);
else {
/* Peek at what physical address the CPU used. */
asid = read_c0_entryhi();
entryhi = asid & (PAGE_SIZE - 1);
entryhi |= vaddr & ~(PAGE_SIZE - 1);
write_c0_entryhi(entryhi);
BARRIER;
tlb_probe();
/* No need to check for presence. */
tlb_read();
entrylo = read_c0_entrylo0();
write_c0_entryhi(asid);
offset = vaddr & (PAGE_SIZE - 1);
address = (entrylo & ~(PAGE_SIZE - 1)) | offset;
}
}
/* Treat low 256MB as memory, high -- as I/O. */
if (address < 0x10000000) {
cycle = mreadstr;
event = paritystr;
} else {
cycle = invoker ? writestr : readstr;
event = timestr;
}
if (is_fixup)
action = MIPS_BE_FIXUP;
if (action != MIPS_BE_FIXUP)
printk(KERN_ALERT "Bus error %s: %s %s %s at %#010lx\n",
kind, agent, cycle, event, address);
return action;
}
int dec_kn01_be_handler(struct pt_regs *regs, int is_fixup)
{
return dec_kn01_be_backend(regs, is_fixup, 0);
}
irqreturn_t dec_kn01_be_interrupt(int irq, void *dev_id)
{
volatile u16 *csr = (void *)CKSEG1ADDR(KN01_SLOT_BASE + KN01_CSR);
struct pt_regs *regs = get_irq_regs();
int action;
if (!(*csr & KN01_CSR_MEMERR))
return IRQ_NONE; /* Must have been video. */
action = dec_kn01_be_backend(regs, 0, 1);
if (action == MIPS_BE_DISCARD)
return IRQ_HANDLED;
/*
* FIXME: Find the affected processes and kill them, otherwise
* we must die.
*
* The interrupt is asynchronously delivered thus EPC and RA
* may be irrelevant, but are printed for a reference.
*/
printk(KERN_ALERT "Fatal bus interrupt, epc == %08lx, ra == %08lx\n",
regs->cp0_epc, regs->regs[31]);
die("Unrecoverable bus error", regs);
}
void __init dec_kn01_be_init(void)
{
volatile u16 *csr = (void *)CKSEG1ADDR(KN01_SLOT_BASE + KN01_CSR);
unsigned long flags;
raw_spin_lock_irqsave(&kn01_lock, flags);
/* Preset write-only bits of the Control Register cache. */
cached_kn01_csr = *csr;
cached_kn01_csr &= KN01_CSR_STATUS | KN01_CSR_PARDIS | KN01_CSR_TXDIS;
cached_kn01_csr |= KN01_CSR_LEDS;
/* Enable parity error detection. */
cached_kn01_csr &= ~KN01_CSR_PARDIS;
*csr = cached_kn01_csr;
iob();
raw_spin_unlock_irqrestore(&kn01_lock, flags);
/* Clear any leftover errors from the firmware. */
dec_kn01_be_ack();
}
| gpl-2.0 |
Cobaltum/kernel_u8110 | drivers/mca/mca-driver.c | 9951 | 1788 | /* -*- mode: c; c-basic-offset: 8 -*- */
/*
* MCA driver support functions for sysfs.
*
* (C) 2002 James Bottomley <James.Bottomley@HansenPartnership.com>
*
**-----------------------------------------------------------------------------
**
** This program is free software; you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published by
** the Free Software Foundation; either version 2 of the License, or
** (at your option) any later version.
**
** This program is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
**
** You should have received a copy of the GNU General Public License
** along with this program; if not, write to the Free Software
** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
**
**-----------------------------------------------------------------------------
*/
#include <linux/device.h>
#include <linux/mca.h>
#include <linux/module.h>
int mca_register_driver(struct mca_driver *mca_drv)
{
int r;
if (MCA_bus) {
mca_drv->driver.bus = &mca_bus_type;
if ((r = driver_register(&mca_drv->driver)) < 0)
return r;
mca_drv->integrated_id = 0;
}
return 0;
}
EXPORT_SYMBOL(mca_register_driver);
int mca_register_driver_integrated(struct mca_driver *mca_driver,
int integrated_id)
{
int r = mca_register_driver(mca_driver);
if (!r)
mca_driver->integrated_id = integrated_id;
return r;
}
EXPORT_SYMBOL(mca_register_driver_integrated);
void mca_unregister_driver(struct mca_driver *mca_drv)
{
if (MCA_bus)
driver_unregister(&mca_drv->driver);
}
EXPORT_SYMBOL(mca_unregister_driver);
| gpl-2.0 |
scottellis/linux-pansenti | drivers/atm/nicstarmac.c | 13023 | 6098 | /*
* this file included by nicstar.c
*/
/*
* nicstarmac.c
* Read this ForeRunner's MAC address from eprom/eeprom
*/
#include <linux/kernel.h>
typedef void __iomem *virt_addr_t;
#define CYCLE_DELAY 5
/*
This was the original definition
#define osp_MicroDelay(microsec) \
do { int _i = 4*microsec; while (--_i > 0) { __SLOW_DOWN_IO; }} while (0)
*/
#define osp_MicroDelay(microsec) {unsigned long useconds = (microsec); \
udelay((useconds));}
/*
* The following tables represent the timing diagrams found in
* the Data Sheet for the Xicor X25020 EEProm. The #defines below
* represent the bits in the NICStAR's General Purpose register
* that must be toggled for the corresponding actions on the EEProm
* to occur.
*/
/* Write Data To EEProm from SI line on rising edge of CLK */
/* Read Data From EEProm on falling edge of CLK */
#define CS_HIGH 0x0002 /* Chip select high */
#define CS_LOW 0x0000 /* Chip select low (active low) */
#define CLK_HIGH 0x0004 /* Clock high */
#define CLK_LOW 0x0000 /* Clock low */
#define SI_HIGH 0x0001 /* Serial input data high */
#define SI_LOW 0x0000 /* Serial input data low */
/* Read Status Register = 0000 0101b */
#if 0
static u_int32_t rdsrtab[] = {
CS_HIGH | CLK_HIGH,
CS_LOW | CLK_LOW,
CLK_HIGH, /* 0 */
CLK_LOW,
CLK_HIGH, /* 0 */
CLK_LOW,
CLK_HIGH, /* 0 */
CLK_LOW,
CLK_HIGH, /* 0 */
CLK_LOW,
CLK_HIGH, /* 0 */
CLK_LOW | SI_HIGH,
CLK_HIGH | SI_HIGH, /* 1 */
CLK_LOW | SI_LOW,
CLK_HIGH, /* 0 */
CLK_LOW | SI_HIGH,
CLK_HIGH | SI_HIGH /* 1 */
};
#endif /* 0 */
/* Read from EEPROM = 0000 0011b */
static u_int32_t readtab[] = {
/*
CS_HIGH | CLK_HIGH,
*/
CS_LOW | CLK_LOW,
CLK_HIGH, /* 0 */
CLK_LOW,
CLK_HIGH, /* 0 */
CLK_LOW,
CLK_HIGH, /* 0 */
CLK_LOW,
CLK_HIGH, /* 0 */
CLK_LOW,
CLK_HIGH, /* 0 */
CLK_LOW,
CLK_HIGH, /* 0 */
CLK_LOW | SI_HIGH,
CLK_HIGH | SI_HIGH, /* 1 */
CLK_LOW | SI_HIGH,
CLK_HIGH | SI_HIGH /* 1 */
};
/* Clock to read from/write to the eeprom */
static u_int32_t clocktab[] = {
CLK_LOW,
CLK_HIGH,
CLK_LOW,
CLK_HIGH,
CLK_LOW,
CLK_HIGH,
CLK_LOW,
CLK_HIGH,
CLK_LOW,
CLK_HIGH,
CLK_LOW,
CLK_HIGH,
CLK_LOW,
CLK_HIGH,
CLK_LOW,
CLK_HIGH,
CLK_LOW
};
#define NICSTAR_REG_WRITE(bs, reg, val) \
while ( readl(bs + STAT) & 0x0200 ) ; \
writel((val),(base)+(reg))
#define NICSTAR_REG_READ(bs, reg) \
readl((base)+(reg))
#define NICSTAR_REG_GENERAL_PURPOSE GP
/*
* This routine will clock the Read_Status_reg function into the X2520
* eeprom, then pull the result from bit 16 of the NicSTaR's General Purpose
* register.
*/
#if 0
u_int32_t nicstar_read_eprom_status(virt_addr_t base)
{
u_int32_t val;
u_int32_t rbyte;
int32_t i, j;
/* Send read instruction */
val = NICSTAR_REG_READ(base, NICSTAR_REG_GENERAL_PURPOSE) & 0xFFFFFFF0;
for (i = 0; i < ARRAY_SIZE(rdsrtab); i++) {
NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
(val | rdsrtab[i]));
osp_MicroDelay(CYCLE_DELAY);
}
/* Done sending instruction - now pull data off of bit 16, MSB first */
/* Data clocked out of eeprom on falling edge of clock */
rbyte = 0;
for (i = 7, j = 0; i >= 0; i--) {
NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
(val | clocktab[j++]));
rbyte |= (((NICSTAR_REG_READ(base, NICSTAR_REG_GENERAL_PURPOSE)
& 0x00010000) >> 16) << i);
NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
(val | clocktab[j++]));
osp_MicroDelay(CYCLE_DELAY);
}
NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, 2);
osp_MicroDelay(CYCLE_DELAY);
return rbyte;
}
#endif /* 0 */
/*
* This routine will clock the Read_data function into the X2520
* eeprom, followed by the address to read from, through the NicSTaR's General
* Purpose register.
*/
static u_int8_t read_eprom_byte(virt_addr_t base, u_int8_t offset)
{
u_int32_t val = 0;
int i, j = 0;
u_int8_t tempread = 0;
val = NICSTAR_REG_READ(base, NICSTAR_REG_GENERAL_PURPOSE) & 0xFFFFFFF0;
/* Send READ instruction */
for (i = 0; i < ARRAY_SIZE(readtab); i++) {
NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
(val | readtab[i]));
osp_MicroDelay(CYCLE_DELAY);
}
/* Next, we need to send the byte address to read from */
for (i = 7; i >= 0; i--) {
NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
(val | clocktab[j++] | ((offset >> i) & 1)));
osp_MicroDelay(CYCLE_DELAY);
NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
(val | clocktab[j++] | ((offset >> i) & 1)));
osp_MicroDelay(CYCLE_DELAY);
}
j = 0;
/* Now, we can read data from the eeprom by clocking it in */
for (i = 7; i >= 0; i--) {
NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
(val | clocktab[j++]));
osp_MicroDelay(CYCLE_DELAY);
tempread |=
(((NICSTAR_REG_READ(base, NICSTAR_REG_GENERAL_PURPOSE)
& 0x00010000) >> 16) << i);
NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
(val | clocktab[j++]));
osp_MicroDelay(CYCLE_DELAY);
}
NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE, 2);
osp_MicroDelay(CYCLE_DELAY);
return tempread;
}
static void nicstar_init_eprom(virt_addr_t base)
{
u_int32_t val;
/*
* turn chip select off
*/
val = NICSTAR_REG_READ(base, NICSTAR_REG_GENERAL_PURPOSE) & 0xFFFFFFF0;
NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
(val | CS_HIGH | CLK_HIGH));
osp_MicroDelay(CYCLE_DELAY);
NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
(val | CS_HIGH | CLK_LOW));
osp_MicroDelay(CYCLE_DELAY);
NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
(val | CS_HIGH | CLK_HIGH));
osp_MicroDelay(CYCLE_DELAY);
NICSTAR_REG_WRITE(base, NICSTAR_REG_GENERAL_PURPOSE,
(val | CS_HIGH | CLK_LOW));
osp_MicroDelay(CYCLE_DELAY);
}
/*
* This routine will be the interface to the ReadPromByte function
* above.
*/
static void
nicstar_read_eprom(virt_addr_t base,
u_int8_t prom_offset, u_int8_t * buffer, u_int32_t nbytes)
{
u_int i;
for (i = 0; i < nbytes; i++) {
buffer[i] = read_eprom_byte(base, prom_offset);
++prom_offset;
osp_MicroDelay(CYCLE_DELAY);
}
}
| gpl-2.0 |
krzycz/prd | drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c | 480 | 55949 | /*
* V4L2 Driver for SuperH Mobile CEU interface
*
* Copyright (C) 2008 Magnus Damm
*
* Based on V4L2 Driver for PXA camera host - "pxa_camera.c",
*
* Copyright (C) 2006, Sascha Hauer, Pengutronix
* Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/moduleparam.h>
#include <linux/of.h>
#include <linux/time.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/videodev2.h>
#include <linux/pm_runtime.h>
#include <linux/sched.h>
#include <media/v4l2-async.h>
#include <media/v4l2-common.h>
#include <media/v4l2-dev.h>
#include <media/soc_camera.h>
#include <media/sh_mobile_ceu.h>
#include <media/sh_mobile_csi2.h>
#include <media/videobuf2-dma-contig.h>
#include <media/v4l2-mediabus.h>
#include <media/soc_mediabus.h>
#include "soc_scale_crop.h"
/* register offsets for sh7722 / sh7723 */
#define CAPSR 0x00 /* Capture start register */
#define CAPCR 0x04 /* Capture control register */
#define CAMCR 0x08 /* Capture interface control register */
#define CMCYR 0x0c /* Capture interface cycle register */
#define CAMOR 0x10 /* Capture interface offset register */
#define CAPWR 0x14 /* Capture interface width register */
#define CAIFR 0x18 /* Capture interface input format register */
#define CSTCR 0x20 /* Camera strobe control register (<= sh7722) */
#define CSECR 0x24 /* Camera strobe emission count register (<= sh7722) */
#define CRCNTR 0x28 /* CEU register control register */
#define CRCMPR 0x2c /* CEU register forcible control register */
#define CFLCR 0x30 /* Capture filter control register */
#define CFSZR 0x34 /* Capture filter size clip register */
#define CDWDR 0x38 /* Capture destination width register */
#define CDAYR 0x3c /* Capture data address Y register */
#define CDACR 0x40 /* Capture data address C register */
#define CDBYR 0x44 /* Capture data bottom-field address Y register */
#define CDBCR 0x48 /* Capture data bottom-field address C register */
#define CBDSR 0x4c /* Capture bundle destination size register */
#define CFWCR 0x5c /* Firewall operation control register */
#define CLFCR 0x60 /* Capture low-pass filter control register */
#define CDOCR 0x64 /* Capture data output control register */
#define CDDCR 0x68 /* Capture data complexity level register */
#define CDDAR 0x6c /* Capture data complexity level address register */
#define CEIER 0x70 /* Capture event interrupt enable register */
#define CETCR 0x74 /* Capture event flag clear register */
#define CSTSR 0x7c /* Capture status register */
#define CSRTR 0x80 /* Capture software reset register */
#define CDSSR 0x84 /* Capture data size register */
#define CDAYR2 0x90 /* Capture data address Y register 2 */
#define CDACR2 0x94 /* Capture data address C register 2 */
#define CDBYR2 0x98 /* Capture data bottom-field address Y register 2 */
#define CDBCR2 0x9c /* Capture data bottom-field address C register 2 */
#undef DEBUG_GEOMETRY
#ifdef DEBUG_GEOMETRY
#define dev_geo dev_info
#else
#define dev_geo dev_dbg
#endif
/* per video frame buffer */
struct sh_mobile_ceu_buffer {
struct vb2_buffer vb; /* v4l buffer must be first */
struct list_head queue;
};
struct sh_mobile_ceu_dev {
struct soc_camera_host ici;
/* Asynchronous CSI2 linking */
struct v4l2_async_subdev *csi2_asd;
struct v4l2_subdev *csi2_sd;
/* Synchronous probing compatibility */
struct platform_device *csi2_pdev;
unsigned int irq;
void __iomem *base;
size_t video_limit;
size_t buf_total;
spinlock_t lock; /* Protects video buffer lists */
struct list_head capture;
struct vb2_buffer *active;
struct vb2_alloc_ctx *alloc_ctx;
struct sh_mobile_ceu_info *pdata;
struct completion complete;
u32 cflcr;
/* static max sizes either from platform data or default */
int max_width;
int max_height;
enum v4l2_field field;
int sequence;
unsigned long flags;
unsigned int image_mode:1;
unsigned int is_16bit:1;
unsigned int frozen:1;
};
struct sh_mobile_ceu_cam {
/* CEU offsets within the camera output, before the CEU scaler */
unsigned int ceu_left;
unsigned int ceu_top;
/* Client output, as seen by the CEU */
unsigned int width;
unsigned int height;
/*
* User window from S_CROP / G_CROP, produced by client cropping and
* scaling, CEU scaling and CEU cropping, mapped back onto the client
* input window
*/
struct v4l2_rect subrect;
/* Camera cropping rectangle */
struct v4l2_rect rect;
const struct soc_mbus_pixelfmt *extra_fmt;
enum v4l2_mbus_pixelcode code;
};
static struct sh_mobile_ceu_buffer *to_ceu_vb(struct vb2_buffer *vb)
{
return container_of(vb, struct sh_mobile_ceu_buffer, vb);
}
static void ceu_write(struct sh_mobile_ceu_dev *priv,
unsigned long reg_offs, u32 data)
{
iowrite32(data, priv->base + reg_offs);
}
static u32 ceu_read(struct sh_mobile_ceu_dev *priv, unsigned long reg_offs)
{
return ioread32(priv->base + reg_offs);
}
static int sh_mobile_ceu_soft_reset(struct sh_mobile_ceu_dev *pcdev)
{
int i, success = 0;
ceu_write(pcdev, CAPSR, 1 << 16); /* reset */
/* wait CSTSR.CPTON bit */
for (i = 0; i < 1000; i++) {
if (!(ceu_read(pcdev, CSTSR) & 1)) {
success++;
break;
}
udelay(1);
}
/* wait CAPSR.CPKIL bit */
for (i = 0; i < 1000; i++) {
if (!(ceu_read(pcdev, CAPSR) & (1 << 16))) {
success++;
break;
}
udelay(1);
}
if (2 != success) {
dev_warn(pcdev->ici.v4l2_dev.dev, "soft reset time out\n");
return -EIO;
}
return 0;
}
/*
* Videobuf operations
*/
/*
* .queue_setup() is called to check, whether the driver can accept the
* requested number of buffers and to fill in plane sizes
* for the current frame format if required
*/
static int sh_mobile_ceu_videobuf_setup(struct vb2_queue *vq,
const struct v4l2_format *fmt,
unsigned int *count, unsigned int *num_planes,
unsigned int sizes[], void *alloc_ctxs[])
{
struct soc_camera_device *icd = container_of(vq, struct soc_camera_device, vb2_vidq);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
if (fmt) {
const struct soc_camera_format_xlate *xlate = soc_camera_xlate_by_fourcc(icd,
fmt->fmt.pix.pixelformat);
unsigned int bytes_per_line;
int ret;
if (!xlate)
return -EINVAL;
ret = soc_mbus_bytes_per_line(fmt->fmt.pix.width,
xlate->host_fmt);
if (ret < 0)
return ret;
bytes_per_line = max_t(u32, fmt->fmt.pix.bytesperline, ret);
ret = soc_mbus_image_size(xlate->host_fmt, bytes_per_line,
fmt->fmt.pix.height);
if (ret < 0)
return ret;
sizes[0] = max_t(u32, fmt->fmt.pix.sizeimage, ret);
} else {
/* Called from VIDIOC_REQBUFS or in compatibility mode */
sizes[0] = icd->sizeimage;
}
alloc_ctxs[0] = pcdev->alloc_ctx;
if (!vq->num_buffers)
pcdev->sequence = 0;
if (!*count)
*count = 2;
/* If *num_planes != 0, we have already verified *count. */
if (pcdev->video_limit && !*num_planes) {
size_t size = PAGE_ALIGN(sizes[0]) * *count;
if (size + pcdev->buf_total > pcdev->video_limit)
*count = (pcdev->video_limit - pcdev->buf_total) /
PAGE_ALIGN(sizes[0]);
}
*num_planes = 1;
dev_dbg(icd->parent, "count=%d, size=%u\n", *count, sizes[0]);
return 0;
}
#define CEU_CETCR_MAGIC 0x0317f313 /* acknowledge magical interrupt sources */
#define CEU_CETCR_IGRW (1 << 4) /* prohibited register access interrupt bit */
#define CEU_CEIER_CPEIE (1 << 0) /* one-frame capture end interrupt */
#define CEU_CEIER_VBP (1 << 20) /* vbp error */
#define CEU_CAPCR_CTNCP (1 << 16) /* continuous capture mode (if set) */
#define CEU_CEIER_MASK (CEU_CEIER_CPEIE | CEU_CEIER_VBP)
/*
* return value doesn't reflex the success/failure to queue the new buffer,
* but rather the status of the previous buffer.
*/
static int sh_mobile_ceu_capture(struct sh_mobile_ceu_dev *pcdev)
{
struct soc_camera_device *icd = pcdev->ici.icd;
dma_addr_t phys_addr_top, phys_addr_bottom;
unsigned long top1, top2;
unsigned long bottom1, bottom2;
u32 status;
bool planar;
int ret = 0;
/*
* The hardware is _very_ picky about this sequence. Especially
* the CEU_CETCR_MAGIC value. It seems like we need to acknowledge
* several not-so-well documented interrupt sources in CETCR.
*/
ceu_write(pcdev, CEIER, ceu_read(pcdev, CEIER) & ~CEU_CEIER_MASK);
status = ceu_read(pcdev, CETCR);
ceu_write(pcdev, CETCR, ~status & CEU_CETCR_MAGIC);
if (!pcdev->frozen)
ceu_write(pcdev, CEIER, ceu_read(pcdev, CEIER) | CEU_CEIER_MASK);
ceu_write(pcdev, CAPCR, ceu_read(pcdev, CAPCR) & ~CEU_CAPCR_CTNCP);
ceu_write(pcdev, CETCR, CEU_CETCR_MAGIC ^ CEU_CETCR_IGRW);
/*
* When a VBP interrupt occurs, a capture end interrupt does not occur
* and the image of that frame is not captured correctly. So, soft reset
* is needed here.
*/
if (status & CEU_CEIER_VBP) {
sh_mobile_ceu_soft_reset(pcdev);
ret = -EIO;
}
if (pcdev->frozen) {
complete(&pcdev->complete);
return ret;
}
if (!pcdev->active)
return ret;
if (V4L2_FIELD_INTERLACED_BT == pcdev->field) {
top1 = CDBYR;
top2 = CDBCR;
bottom1 = CDAYR;
bottom2 = CDACR;
} else {
top1 = CDAYR;
top2 = CDACR;
bottom1 = CDBYR;
bottom2 = CDBCR;
}
phys_addr_top = vb2_dma_contig_plane_dma_addr(pcdev->active, 0);
switch (icd->current_fmt->host_fmt->fourcc) {
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
planar = true;
break;
default:
planar = false;
}
ceu_write(pcdev, top1, phys_addr_top);
if (V4L2_FIELD_NONE != pcdev->field) {
phys_addr_bottom = phys_addr_top + icd->bytesperline;
ceu_write(pcdev, bottom1, phys_addr_bottom);
}
if (planar) {
phys_addr_top += icd->bytesperline * icd->user_height;
ceu_write(pcdev, top2, phys_addr_top);
if (V4L2_FIELD_NONE != pcdev->field) {
phys_addr_bottom = phys_addr_top + icd->bytesperline;
ceu_write(pcdev, bottom2, phys_addr_bottom);
}
}
ceu_write(pcdev, CAPSR, 0x1); /* start capture */
return ret;
}
static int sh_mobile_ceu_videobuf_prepare(struct vb2_buffer *vb)
{
struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vb);
/* Added list head initialization on alloc */
WARN(!list_empty(&buf->queue), "Buffer %p on queue!\n", vb);
return 0;
}
static void sh_mobile_ceu_videobuf_queue(struct vb2_buffer *vb)
{
struct soc_camera_device *icd = container_of(vb->vb2_queue, struct soc_camera_device, vb2_vidq);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vb);
unsigned long size;
size = icd->sizeimage;
if (vb2_plane_size(vb, 0) < size) {
dev_err(icd->parent, "Buffer #%d too small (%lu < %lu)\n",
vb->v4l2_buf.index, vb2_plane_size(vb, 0), size);
goto error;
}
vb2_set_plane_payload(vb, 0, size);
dev_dbg(icd->parent, "%s (vb=0x%p) 0x%p %lu\n", __func__,
vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0));
#ifdef DEBUG
/*
* This can be useful if you want to see if we actually fill
* the buffer with something
*/
if (vb2_plane_vaddr(vb, 0))
memset(vb2_plane_vaddr(vb, 0), 0xaa, vb2_get_plane_payload(vb, 0));
#endif
spin_lock_irq(&pcdev->lock);
list_add_tail(&buf->queue, &pcdev->capture);
if (!pcdev->active) {
/*
* Because there were no active buffer at this moment,
* we are not interested in the return value of
* sh_mobile_ceu_capture here.
*/
pcdev->active = vb;
sh_mobile_ceu_capture(pcdev);
}
spin_unlock_irq(&pcdev->lock);
return;
error:
vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
}
static void sh_mobile_ceu_videobuf_release(struct vb2_buffer *vb)
{
struct soc_camera_device *icd = container_of(vb->vb2_queue, struct soc_camera_device, vb2_vidq);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vb);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
spin_lock_irq(&pcdev->lock);
if (pcdev->active == vb) {
/* disable capture (release DMA buffer), reset */
ceu_write(pcdev, CAPSR, 1 << 16);
pcdev->active = NULL;
}
/*
* Doesn't hurt also if the list is empty, but it hurts, if queuing the
* buffer failed, and .buf_init() hasn't been called
*/
if (buf->queue.next)
list_del_init(&buf->queue);
pcdev->buf_total -= PAGE_ALIGN(vb2_plane_size(vb, 0));
dev_dbg(icd->parent, "%s() %zu bytes buffers\n", __func__,
pcdev->buf_total);
spin_unlock_irq(&pcdev->lock);
}
static int sh_mobile_ceu_videobuf_init(struct vb2_buffer *vb)
{
struct soc_camera_device *icd = container_of(vb->vb2_queue, struct soc_camera_device, vb2_vidq);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
pcdev->buf_total += PAGE_ALIGN(vb2_plane_size(vb, 0));
dev_dbg(icd->parent, "%s() %zu bytes buffers\n", __func__,
pcdev->buf_total);
/* This is for locking debugging only */
INIT_LIST_HEAD(&to_ceu_vb(vb)->queue);
return 0;
}
static void sh_mobile_ceu_stop_streaming(struct vb2_queue *q)
{
struct soc_camera_device *icd = container_of(q, struct soc_camera_device, vb2_vidq);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
struct list_head *buf_head, *tmp;
spin_lock_irq(&pcdev->lock);
pcdev->active = NULL;
list_for_each_safe(buf_head, tmp, &pcdev->capture)
list_del_init(buf_head);
spin_unlock_irq(&pcdev->lock);
sh_mobile_ceu_soft_reset(pcdev);
}
static struct vb2_ops sh_mobile_ceu_videobuf_ops = {
.queue_setup = sh_mobile_ceu_videobuf_setup,
.buf_prepare = sh_mobile_ceu_videobuf_prepare,
.buf_queue = sh_mobile_ceu_videobuf_queue,
.buf_cleanup = sh_mobile_ceu_videobuf_release,
.buf_init = sh_mobile_ceu_videobuf_init,
.wait_prepare = soc_camera_unlock,
.wait_finish = soc_camera_lock,
.stop_streaming = sh_mobile_ceu_stop_streaming,
};
static irqreturn_t sh_mobile_ceu_irq(int irq, void *data)
{
struct sh_mobile_ceu_dev *pcdev = data;
struct vb2_buffer *vb;
int ret;
spin_lock(&pcdev->lock);
vb = pcdev->active;
if (!vb)
/* Stale interrupt from a released buffer */
goto out;
list_del_init(&to_ceu_vb(vb)->queue);
if (!list_empty(&pcdev->capture))
pcdev->active = &list_entry(pcdev->capture.next,
struct sh_mobile_ceu_buffer, queue)->vb;
else
pcdev->active = NULL;
ret = sh_mobile_ceu_capture(pcdev);
v4l2_get_timestamp(&vb->v4l2_buf.timestamp);
if (!ret) {
vb->v4l2_buf.field = pcdev->field;
vb->v4l2_buf.sequence = pcdev->sequence++;
}
vb2_buffer_done(vb, ret < 0 ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
out:
spin_unlock(&pcdev->lock);
return IRQ_HANDLED;
}
static struct v4l2_subdev *find_csi2(struct sh_mobile_ceu_dev *pcdev)
{
struct v4l2_subdev *sd;
if (pcdev->csi2_sd)
return pcdev->csi2_sd;
if (pcdev->csi2_asd) {
char name[] = "sh-mobile-csi2";
v4l2_device_for_each_subdev(sd, &pcdev->ici.v4l2_dev)
if (!strncmp(name, sd->name, sizeof(name) - 1)) {
pcdev->csi2_sd = sd;
return sd;
}
}
return NULL;
}
static struct v4l2_subdev *csi2_subdev(struct sh_mobile_ceu_dev *pcdev,
struct soc_camera_device *icd)
{
struct v4l2_subdev *sd = pcdev->csi2_sd;
return sd && sd->grp_id == soc_camera_grp_id(icd) ? sd : NULL;
}
static int sh_mobile_ceu_add_device(struct soc_camera_device *icd)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
struct v4l2_subdev *csi2_sd = find_csi2(pcdev);
int ret;
if (csi2_sd) {
csi2_sd->grp_id = soc_camera_grp_id(icd);
v4l2_set_subdev_hostdata(csi2_sd, icd);
}
ret = v4l2_subdev_call(csi2_sd, core, s_power, 1);
if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
return ret;
/*
* -ENODEV is special: either csi2_sd == NULL or the CSI-2 driver
* has not found this soc-camera device among its clients
*/
if (csi2_sd && ret == -ENODEV)
csi2_sd->grp_id = 0;
dev_info(icd->parent,
"SuperH Mobile CEU%s driver attached to camera %d\n",
csi2_sd && csi2_sd->grp_id ? "/CSI-2" : "", icd->devnum);
return 0;
}
static void sh_mobile_ceu_remove_device(struct soc_camera_device *icd)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
struct v4l2_subdev *csi2_sd = find_csi2(pcdev);
dev_info(icd->parent,
"SuperH Mobile CEU driver detached from camera %d\n",
icd->devnum);
v4l2_subdev_call(csi2_sd, core, s_power, 0);
}
/* Called with .host_lock held */
static int sh_mobile_ceu_clock_start(struct soc_camera_host *ici)
{
struct sh_mobile_ceu_dev *pcdev = ici->priv;
pm_runtime_get_sync(ici->v4l2_dev.dev);
pcdev->buf_total = 0;
sh_mobile_ceu_soft_reset(pcdev);
return 0;
}
/* Called with .host_lock held */
static void sh_mobile_ceu_clock_stop(struct soc_camera_host *ici)
{
struct sh_mobile_ceu_dev *pcdev = ici->priv;
/* disable capture, disable interrupts */
ceu_write(pcdev, CEIER, 0);
sh_mobile_ceu_soft_reset(pcdev);
/* make sure active buffer is canceled */
spin_lock_irq(&pcdev->lock);
if (pcdev->active) {
list_del_init(&to_ceu_vb(pcdev->active)->queue);
vb2_buffer_done(pcdev->active, VB2_BUF_STATE_ERROR);
pcdev->active = NULL;
}
spin_unlock_irq(&pcdev->lock);
pm_runtime_put(ici->v4l2_dev.dev);
}
/*
* See chapter 29.4.12 "Capture Filter Control Register (CFLCR)"
* in SH7722 Hardware Manual
*/
static unsigned int size_dst(unsigned int src, unsigned int scale)
{
unsigned int mant_pre = scale >> 12;
if (!src || !scale)
return src;
return ((mant_pre + 2 * (src - 1)) / (2 * mant_pre) - 1) *
mant_pre * 4096 / scale + 1;
}
static u16 calc_scale(unsigned int src, unsigned int *dst)
{
u16 scale;
if (src == *dst)
return 0;
scale = (src * 4096 / *dst) & ~7;
while (scale > 4096 && size_dst(src, scale) < *dst)
scale -= 8;
*dst = size_dst(src, scale);
return scale;
}
/* rect is guaranteed to not exceed the scaled camera rectangle */
static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_cam *cam = icd->host_priv;
struct sh_mobile_ceu_dev *pcdev = ici->priv;
unsigned int height, width, cdwdr_width, in_width, in_height;
unsigned int left_offset, top_offset;
u32 camor;
dev_geo(icd->parent, "Crop %ux%u@%u:%u\n",
icd->user_width, icd->user_height, cam->ceu_left, cam->ceu_top);
left_offset = cam->ceu_left;
top_offset = cam->ceu_top;
WARN_ON(icd->user_width & 3 || icd->user_height & 3);
width = icd->user_width;
if (pcdev->image_mode) {
in_width = cam->width;
if (!pcdev->is_16bit) {
in_width *= 2;
left_offset *= 2;
}
} else {
unsigned int w_factor;
switch (icd->current_fmt->host_fmt->packing) {
case SOC_MBUS_PACKING_2X8_PADHI:
w_factor = 2;
break;
default:
w_factor = 1;
}
in_width = cam->width * w_factor;
left_offset *= w_factor;
}
cdwdr_width = icd->bytesperline;
height = icd->user_height;
in_height = cam->height;
if (V4L2_FIELD_NONE != pcdev->field) {
height = (height / 2) & ~3;
in_height /= 2;
top_offset /= 2;
cdwdr_width *= 2;
}
/* CSI2 special configuration */
if (csi2_subdev(pcdev, icd)) {
in_width = ((in_width - 2) * 2);
left_offset *= 2;
}
/* Set CAMOR, CAPWR, CFSZR, take care of CDWDR */
camor = left_offset | (top_offset << 16);
dev_geo(icd->parent,
"CAMOR 0x%x, CAPWR 0x%x, CFSZR 0x%x, CDWDR 0x%x\n", camor,
(in_height << 16) | in_width, (height << 16) | width,
cdwdr_width);
ceu_write(pcdev, CAMOR, camor);
ceu_write(pcdev, CAPWR, (in_height << 16) | in_width);
/* CFSZR clipping is applied _after_ the scaling filter (CFLCR) */
ceu_write(pcdev, CFSZR, (height << 16) | width);
ceu_write(pcdev, CDWDR, cdwdr_width);
}
static u32 capture_save_reset(struct sh_mobile_ceu_dev *pcdev)
{
u32 capsr = ceu_read(pcdev, CAPSR);
ceu_write(pcdev, CAPSR, 1 << 16); /* reset, stop capture */
return capsr;
}
static void capture_restore(struct sh_mobile_ceu_dev *pcdev, u32 capsr)
{
unsigned long timeout = jiffies + 10 * HZ;
/*
* Wait until the end of the current frame. It can take a long time,
* but if it has been aborted by a CAPSR reset, it shoule exit sooner.
*/
while ((ceu_read(pcdev, CSTSR) & 1) && time_before(jiffies, timeout))
msleep(1);
if (time_after(jiffies, timeout)) {
dev_err(pcdev->ici.v4l2_dev.dev,
"Timeout waiting for frame end! Interface problem?\n");
return;
}
/* Wait until reset clears, this shall not hang... */
while (ceu_read(pcdev, CAPSR) & (1 << 16))
udelay(10);
/* Anything to restore? */
if (capsr & ~(1 << 16))
ceu_write(pcdev, CAPSR, capsr);
}
/* Find the bus subdevice driver, e.g., CSI2 */
static struct v4l2_subdev *find_bus_subdev(struct sh_mobile_ceu_dev *pcdev,
struct soc_camera_device *icd)
{
return csi2_subdev(pcdev, icd) ? : soc_camera_to_subdev(icd);
}
#define CEU_BUS_FLAGS (V4L2_MBUS_MASTER | \
V4L2_MBUS_PCLK_SAMPLE_RISING | \
V4L2_MBUS_HSYNC_ACTIVE_HIGH | \
V4L2_MBUS_HSYNC_ACTIVE_LOW | \
V4L2_MBUS_VSYNC_ACTIVE_HIGH | \
V4L2_MBUS_VSYNC_ACTIVE_LOW | \
V4L2_MBUS_DATA_ACTIVE_HIGH)
/* Capture is not running, no interrupts, no locking needed */
static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
struct v4l2_subdev *sd = find_bus_subdev(pcdev, icd);
struct sh_mobile_ceu_cam *cam = icd->host_priv;
struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
unsigned long value, common_flags = CEU_BUS_FLAGS;
u32 capsr = capture_save_reset(pcdev);
unsigned int yuv_lineskip;
int ret;
/*
* If the client doesn't implement g_mbus_config, we just use our
* platform data
*/
ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
if (!ret) {
common_flags = soc_mbus_config_compatible(&cfg,
common_flags);
if (!common_flags)
return -EINVAL;
} else if (ret != -ENOIOCTLCMD) {
return ret;
}
/* Make choises, based on platform preferences */
if ((common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) &&
(common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)) {
if (pcdev->flags & SH_CEU_FLAG_HSYNC_LOW)
common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_HIGH;
else
common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_LOW;
}
if ((common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) &&
(common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)) {
if (pcdev->flags & SH_CEU_FLAG_VSYNC_LOW)
common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_HIGH;
else
common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_LOW;
}
cfg.flags = common_flags;
ret = v4l2_subdev_call(sd, video, s_mbus_config, &cfg);
if (ret < 0 && ret != -ENOIOCTLCMD)
return ret;
if (icd->current_fmt->host_fmt->bits_per_sample > 8)
pcdev->is_16bit = 1;
else
pcdev->is_16bit = 0;
ceu_write(pcdev, CRCNTR, 0);
ceu_write(pcdev, CRCMPR, 0);
value = 0x00000010; /* data fetch by default */
yuv_lineskip = 0x10;
switch (icd->current_fmt->host_fmt->fourcc) {
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
/* convert 4:2:2 -> 4:2:0 */
yuv_lineskip = 0; /* skip for NV12/21, no skip for NV16/61 */
/* fall-through */
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
switch (cam->code) {
case V4L2_MBUS_FMT_UYVY8_2X8:
value = 0x00000000; /* Cb0, Y0, Cr0, Y1 */
break;
case V4L2_MBUS_FMT_VYUY8_2X8:
value = 0x00000100; /* Cr0, Y0, Cb0, Y1 */
break;
case V4L2_MBUS_FMT_YUYV8_2X8:
value = 0x00000200; /* Y0, Cb0, Y1, Cr0 */
break;
case V4L2_MBUS_FMT_YVYU8_2X8:
value = 0x00000300; /* Y0, Cr0, Y1, Cb0 */
break;
default:
BUG();
}
}
if (icd->current_fmt->host_fmt->fourcc == V4L2_PIX_FMT_NV21 ||
icd->current_fmt->host_fmt->fourcc == V4L2_PIX_FMT_NV61)
value ^= 0x00000100; /* swap U, V to change from NV1x->NVx1 */
value |= common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW ? 1 << 1 : 0;
value |= common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW ? 1 << 0 : 0;
if (csi2_subdev(pcdev, icd)) /* CSI2 mode */
value |= 3 << 12;
else if (pcdev->is_16bit)
value |= 1 << 12;
else if (pcdev->flags & SH_CEU_FLAG_LOWER_8BIT)
value |= 2 << 12;
ceu_write(pcdev, CAMCR, value);
ceu_write(pcdev, CAPCR, 0x00300000);
switch (pcdev->field) {
case V4L2_FIELD_INTERLACED_TB:
value = 0x101;
break;
case V4L2_FIELD_INTERLACED_BT:
value = 0x102;
break;
default:
value = 0;
break;
}
ceu_write(pcdev, CAIFR, value);
sh_mobile_ceu_set_rect(icd);
mdelay(1);
dev_geo(icd->parent, "CFLCR 0x%x\n", pcdev->cflcr);
ceu_write(pcdev, CFLCR, pcdev->cflcr);
/*
* A few words about byte order (observed in Big Endian mode)
*
* In data fetch mode bytes are received in chunks of 8 bytes.
* D0, D1, D2, D3, D4, D5, D6, D7 (D0 received first)
*
* The data is however by default written to memory in reverse order:
* D7, D6, D5, D4, D3, D2, D1, D0 (D7 written to lowest byte)
*
* The lowest three bits of CDOCR allows us to do swapping,
* using 7 we swap the data bytes to match the incoming order:
* D0, D1, D2, D3, D4, D5, D6, D7
*/
value = 0x00000007 | yuv_lineskip;
ceu_write(pcdev, CDOCR, value);
ceu_write(pcdev, CFWCR, 0); /* keep "datafetch firewall" disabled */
capture_restore(pcdev, capsr);
/* not in bundle mode: skip CBDSR, CDAYR2, CDACR2, CDBYR2, CDBCR2 */
return 0;
}
static int sh_mobile_ceu_try_bus_param(struct soc_camera_device *icd,
unsigned char buswidth)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
struct v4l2_subdev *sd = find_bus_subdev(pcdev, icd);
unsigned long common_flags = CEU_BUS_FLAGS;
struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
int ret;
ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
if (!ret)
common_flags = soc_mbus_config_compatible(&cfg,
common_flags);
else if (ret != -ENOIOCTLCMD)
return ret;
if (!common_flags || buswidth > 16)
return -EINVAL;
return 0;
}
static const struct soc_mbus_pixelfmt sh_mobile_ceu_formats[] = {
{
.fourcc = V4L2_PIX_FMT_NV12,
.name = "NV12",
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_1_5X8,
.order = SOC_MBUS_ORDER_LE,
.layout = SOC_MBUS_LAYOUT_PLANAR_2Y_C,
}, {
.fourcc = V4L2_PIX_FMT_NV21,
.name = "NV21",
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_1_5X8,
.order = SOC_MBUS_ORDER_LE,
.layout = SOC_MBUS_LAYOUT_PLANAR_2Y_C,
}, {
.fourcc = V4L2_PIX_FMT_NV16,
.name = "NV16",
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_LE,
.layout = SOC_MBUS_LAYOUT_PLANAR_Y_C,
}, {
.fourcc = V4L2_PIX_FMT_NV61,
.name = "NV61",
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_LE,
.layout = SOC_MBUS_LAYOUT_PLANAR_Y_C,
},
};
/* This will be corrected as we get more formats */
static bool sh_mobile_ceu_packing_supported(const struct soc_mbus_pixelfmt *fmt)
{
return fmt->packing == SOC_MBUS_PACKING_NONE ||
(fmt->bits_per_sample == 8 &&
fmt->packing == SOC_MBUS_PACKING_1_5X8) ||
(fmt->bits_per_sample == 8 &&
fmt->packing == SOC_MBUS_PACKING_2X8_PADHI) ||
(fmt->bits_per_sample > 8 &&
fmt->packing == SOC_MBUS_PACKING_EXTEND16);
}
static struct soc_camera_device *ctrl_to_icd(struct v4l2_ctrl *ctrl)
{
return container_of(ctrl->handler, struct soc_camera_device,
ctrl_handler);
}
static int sh_mobile_ceu_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct soc_camera_device *icd = ctrl_to_icd(ctrl);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
switch (ctrl->id) {
case V4L2_CID_SHARPNESS:
switch (icd->current_fmt->host_fmt->fourcc) {
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
ceu_write(pcdev, CLFCR, !ctrl->val);
return 0;
}
break;
}
return -EINVAL;
}
static const struct v4l2_ctrl_ops sh_mobile_ceu_ctrl_ops = {
.s_ctrl = sh_mobile_ceu_s_ctrl,
};
static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int idx,
struct soc_camera_format_xlate *xlate)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct device *dev = icd->parent;
struct soc_camera_host *ici = to_soc_camera_host(dev);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
int ret, k, n;
int formats = 0;
struct sh_mobile_ceu_cam *cam;
enum v4l2_mbus_pixelcode code;
const struct soc_mbus_pixelfmt *fmt;
ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code);
if (ret < 0)
/* No more formats */
return 0;
fmt = soc_mbus_get_fmtdesc(code);
if (!fmt) {
dev_warn(dev, "unsupported format code #%u: %d\n", idx, code);
return 0;
}
if (!csi2_subdev(pcdev, icd)) {
/* Are there any restrictions in the CSI-2 case? */
ret = sh_mobile_ceu_try_bus_param(icd, fmt->bits_per_sample);
if (ret < 0)
return 0;
}
if (!icd->host_priv) {
struct v4l2_mbus_framefmt mf;
struct v4l2_rect rect;
int shift = 0;
/* Add our control */
v4l2_ctrl_new_std(&icd->ctrl_handler, &sh_mobile_ceu_ctrl_ops,
V4L2_CID_SHARPNESS, 0, 1, 1, 1);
if (icd->ctrl_handler.error)
return icd->ctrl_handler.error;
/* FIXME: subwindow is lost between close / open */
/* Cache current client geometry */
ret = soc_camera_client_g_rect(sd, &rect);
if (ret < 0)
return ret;
/* First time */
ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
if (ret < 0)
return ret;
/*
* All currently existing CEU implementations support 2560x1920
* or larger frames. If the sensor is proposing too big a frame,
* don't bother with possibly supportred by the CEU larger
* sizes, just try VGA multiples. If needed, this can be
* adjusted in the future.
*/
while ((mf.width > pcdev->max_width ||
mf.height > pcdev->max_height) && shift < 4) {
/* Try 2560x1920, 1280x960, 640x480, 320x240 */
mf.width = 2560 >> shift;
mf.height = 1920 >> shift;
ret = v4l2_device_call_until_err(sd->v4l2_dev,
soc_camera_grp_id(icd), video,
s_mbus_fmt, &mf);
if (ret < 0)
return ret;
shift++;
}
if (shift == 4) {
dev_err(dev, "Failed to configure the client below %ux%x\n",
mf.width, mf.height);
return -EIO;
}
dev_geo(dev, "camera fmt %ux%u\n", mf.width, mf.height);
cam = kzalloc(sizeof(*cam), GFP_KERNEL);
if (!cam)
return -ENOMEM;
/* We are called with current camera crop, initialise subrect with it */
cam->rect = rect;
cam->subrect = rect;
cam->width = mf.width;
cam->height = mf.height;
icd->host_priv = cam;
} else {
cam = icd->host_priv;
}
/* Beginning of a pass */
if (!idx)
cam->extra_fmt = NULL;
switch (code) {
case V4L2_MBUS_FMT_UYVY8_2X8:
case V4L2_MBUS_FMT_VYUY8_2X8:
case V4L2_MBUS_FMT_YUYV8_2X8:
case V4L2_MBUS_FMT_YVYU8_2X8:
if (cam->extra_fmt)
break;
/*
* Our case is simple so far: for any of the above four camera
* formats we add all our four synthesized NV* formats, so,
* just marking the device with a single flag suffices. If
* the format generation rules are more complex, you would have
* to actually hang your already added / counted formats onto
* the host_priv pointer and check whether the format you're
* going to add now is already there.
*/
cam->extra_fmt = sh_mobile_ceu_formats;
n = ARRAY_SIZE(sh_mobile_ceu_formats);
formats += n;
for (k = 0; xlate && k < n; k++) {
xlate->host_fmt = &sh_mobile_ceu_formats[k];
xlate->code = code;
xlate++;
dev_dbg(dev, "Providing format %s using code %d\n",
sh_mobile_ceu_formats[k].name, code);
}
break;
default:
if (!sh_mobile_ceu_packing_supported(fmt))
return 0;
}
/* Generic pass-through */
formats++;
if (xlate) {
xlate->host_fmt = fmt;
xlate->code = code;
xlate++;
dev_dbg(dev, "Providing format %s in pass-through mode\n",
fmt->name);
}
return formats;
}
static void sh_mobile_ceu_put_formats(struct soc_camera_device *icd)
{
kfree(icd->host_priv);
icd->host_priv = NULL;
}
#define scale_down(size, scale) soc_camera_shift_scale(size, 12, scale)
#define calc_generic_scale(in, out) soc_camera_calc_scale(in, 12, out)
/*
* CEU can scale and crop, but we don't want to waste bandwidth and kill the
* framerate by always requesting the maximum image from the client. See
* Documentation/video4linux/sh_mobile_ceu_camera.txt for a description of
* scaling and cropping algorithms and for the meaning of referenced here steps.
*/
static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd,
const struct v4l2_crop *a)
{
struct v4l2_crop a_writable = *a;
const struct v4l2_rect *rect = &a_writable.c;
struct device *dev = icd->parent;
struct soc_camera_host *ici = to_soc_camera_host(dev);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
struct v4l2_crop cam_crop;
struct sh_mobile_ceu_cam *cam = icd->host_priv;
struct v4l2_rect *cam_rect = &cam_crop.c;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct v4l2_mbus_framefmt mf;
unsigned int scale_cam_h, scale_cam_v, scale_ceu_h, scale_ceu_v,
out_width, out_height;
int interm_width, interm_height;
u32 capsr, cflcr;
int ret;
dev_geo(dev, "S_CROP(%ux%u@%u:%u)\n", rect->width, rect->height,
rect->left, rect->top);
/* During camera cropping its output window can change too, stop CEU */
capsr = capture_save_reset(pcdev);
dev_dbg(dev, "CAPSR 0x%x, CFLCR 0x%x\n", capsr, pcdev->cflcr);
/*
* 1. - 2. Apply iterative camera S_CROP for new input window, read back
* actual camera rectangle.
*/
ret = soc_camera_client_s_crop(sd, &a_writable, &cam_crop,
&cam->rect, &cam->subrect);
if (ret < 0)
return ret;
dev_geo(dev, "1-2: camera cropped to %ux%u@%u:%u\n",
cam_rect->width, cam_rect->height,
cam_rect->left, cam_rect->top);
/* On success cam_crop contains current camera crop */
/* 3. Retrieve camera output window */
ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
if (ret < 0)
return ret;
if (mf.width > pcdev->max_width || mf.height > pcdev->max_height)
return -EINVAL;
/* 4. Calculate camera scales */
scale_cam_h = calc_generic_scale(cam_rect->width, mf.width);
scale_cam_v = calc_generic_scale(cam_rect->height, mf.height);
/* Calculate intermediate window */
interm_width = scale_down(rect->width, scale_cam_h);
interm_height = scale_down(rect->height, scale_cam_v);
if (interm_width < icd->user_width) {
u32 new_scale_h;
new_scale_h = calc_generic_scale(rect->width, icd->user_width);
mf.width = scale_down(cam_rect->width, new_scale_h);
}
if (interm_height < icd->user_height) {
u32 new_scale_v;
new_scale_v = calc_generic_scale(rect->height, icd->user_height);
mf.height = scale_down(cam_rect->height, new_scale_v);
}
if (interm_width < icd->user_width || interm_height < icd->user_height) {
ret = v4l2_device_call_until_err(sd->v4l2_dev,
soc_camera_grp_id(icd), video,
s_mbus_fmt, &mf);
if (ret < 0)
return ret;
dev_geo(dev, "New camera output %ux%u\n", mf.width, mf.height);
scale_cam_h = calc_generic_scale(cam_rect->width, mf.width);
scale_cam_v = calc_generic_scale(cam_rect->height, mf.height);
interm_width = scale_down(rect->width, scale_cam_h);
interm_height = scale_down(rect->height, scale_cam_v);
}
/* Cache camera output window */
cam->width = mf.width;
cam->height = mf.height;
if (pcdev->image_mode) {
out_width = min(interm_width, icd->user_width);
out_height = min(interm_height, icd->user_height);
} else {
out_width = interm_width;
out_height = interm_height;
}
/*
* 5. Calculate CEU scales from camera scales from results of (5) and
* the user window
*/
scale_ceu_h = calc_scale(interm_width, &out_width);
scale_ceu_v = calc_scale(interm_height, &out_height);
dev_geo(dev, "5: CEU scales %u:%u\n", scale_ceu_h, scale_ceu_v);
/* Apply CEU scales. */
cflcr = scale_ceu_h | (scale_ceu_v << 16);
if (cflcr != pcdev->cflcr) {
pcdev->cflcr = cflcr;
ceu_write(pcdev, CFLCR, cflcr);
}
icd->user_width = out_width & ~3;
icd->user_height = out_height & ~3;
/* Offsets are applied at the CEU scaling filter input */
cam->ceu_left = scale_down(rect->left - cam_rect->left, scale_cam_h) & ~1;
cam->ceu_top = scale_down(rect->top - cam_rect->top, scale_cam_v) & ~1;
/* 6. Use CEU cropping to crop to the new window. */
sh_mobile_ceu_set_rect(icd);
cam->subrect = *rect;
dev_geo(dev, "6: CEU cropped to %ux%u@%u:%u\n",
icd->user_width, icd->user_height,
cam->ceu_left, cam->ceu_top);
/* Restore capture. The CE bit can be cleared by the hardware */
if (pcdev->active)
capsr |= 1;
capture_restore(pcdev, capsr);
/* Even if only camera cropping succeeded */
return ret;
}
static int sh_mobile_ceu_get_crop(struct soc_camera_device *icd,
struct v4l2_crop *a)
{
struct sh_mobile_ceu_cam *cam = icd->host_priv;
a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
a->c = cam->subrect;
return 0;
}
/* Similar to set_crop multistage iterative algorithm */
static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd,
struct v4l2_format *f)
{
struct device *dev = icd->parent;
struct soc_camera_host *ici = to_soc_camera_host(dev);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
struct sh_mobile_ceu_cam *cam = icd->host_priv;
struct v4l2_pix_format *pix = &f->fmt.pix;
struct v4l2_mbus_framefmt mf;
__u32 pixfmt = pix->pixelformat;
const struct soc_camera_format_xlate *xlate;
unsigned int ceu_sub_width = pcdev->max_width,
ceu_sub_height = pcdev->max_height;
u16 scale_v, scale_h;
int ret;
bool image_mode;
enum v4l2_field field;
switch (pix->field) {
default:
pix->field = V4L2_FIELD_NONE;
/* fall-through */
case V4L2_FIELD_INTERLACED_TB:
case V4L2_FIELD_INTERLACED_BT:
case V4L2_FIELD_NONE:
field = pix->field;
break;
case V4L2_FIELD_INTERLACED:
field = V4L2_FIELD_INTERLACED_TB;
break;
}
xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
if (!xlate) {
dev_warn(dev, "Format %x not found\n", pixfmt);
return -EINVAL;
}
/* 1.-4. Calculate desired client output geometry */
soc_camera_calc_client_output(icd, &cam->rect, &cam->subrect, pix, &mf, 12);
mf.field = pix->field;
mf.colorspace = pix->colorspace;
mf.code = xlate->code;
switch (pixfmt) {
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
image_mode = true;
break;
default:
image_mode = false;
}
dev_geo(dev, "S_FMT(pix=0x%x, fld 0x%x, code 0x%x, %ux%u)\n", pixfmt, mf.field, mf.code,
pix->width, pix->height);
dev_geo(dev, "4: request camera output %ux%u\n", mf.width, mf.height);
/* 5. - 9. */
ret = soc_camera_client_scale(icd, &cam->rect, &cam->subrect,
&mf, &ceu_sub_width, &ceu_sub_height,
image_mode && V4L2_FIELD_NONE == field, 12);
dev_geo(dev, "5-9: client scale return %d\n", ret);
/* Done with the camera. Now see if we can improve the result */
dev_geo(dev, "fmt %ux%u, requested %ux%u\n",
mf.width, mf.height, pix->width, pix->height);
if (ret < 0)
return ret;
if (mf.code != xlate->code)
return -EINVAL;
/* 9. Prepare CEU crop */
cam->width = mf.width;
cam->height = mf.height;
/* 10. Use CEU scaling to scale to the requested user window. */
/* We cannot scale up */
if (pix->width > ceu_sub_width)
ceu_sub_width = pix->width;
if (pix->height > ceu_sub_height)
ceu_sub_height = pix->height;
pix->colorspace = mf.colorspace;
if (image_mode) {
/* Scale pix->{width x height} down to width x height */
scale_h = calc_scale(ceu_sub_width, &pix->width);
scale_v = calc_scale(ceu_sub_height, &pix->height);
} else {
pix->width = ceu_sub_width;
pix->height = ceu_sub_height;
scale_h = 0;
scale_v = 0;
}
pcdev->cflcr = scale_h | (scale_v << 16);
/*
* We have calculated CFLCR, the actual configuration will be performed
* in sh_mobile_ceu_set_bus_param()
*/
dev_geo(dev, "10: W: %u : 0x%x = %u, H: %u : 0x%x = %u\n",
ceu_sub_width, scale_h, pix->width,
ceu_sub_height, scale_v, pix->height);
cam->code = xlate->code;
icd->current_fmt = xlate;
pcdev->field = field;
pcdev->image_mode = image_mode;
/* CFSZR requirement */
pix->width &= ~3;
pix->height &= ~3;
return 0;
}
#define CEU_CHDW_MAX 8188U /* Maximum line stride */
static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
struct v4l2_format *f)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
const struct soc_camera_format_xlate *xlate;
struct v4l2_pix_format *pix = &f->fmt.pix;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct v4l2_mbus_framefmt mf;
__u32 pixfmt = pix->pixelformat;
int width, height;
int ret;
dev_geo(icd->parent, "TRY_FMT(pix=0x%x, %ux%u)\n",
pixfmt, pix->width, pix->height);
xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
if (!xlate) {
xlate = icd->current_fmt;
dev_dbg(icd->parent, "Format %x not found, keeping %x\n",
pixfmt, xlate->host_fmt->fourcc);
pixfmt = xlate->host_fmt->fourcc;
pix->pixelformat = pixfmt;
pix->colorspace = icd->colorspace;
}
/* FIXME: calculate using depth and bus width */
/* CFSZR requires height and width to be 4-pixel aligned */
v4l_bound_align_image(&pix->width, 2, pcdev->max_width, 2,
&pix->height, 4, pcdev->max_height, 2, 0);
width = pix->width;
height = pix->height;
/* limit to sensor capabilities */
mf.width = pix->width;
mf.height = pix->height;
mf.field = pix->field;
mf.code = xlate->code;
mf.colorspace = pix->colorspace;
ret = v4l2_device_call_until_err(sd->v4l2_dev, soc_camera_grp_id(icd),
video, try_mbus_fmt, &mf);
if (ret < 0)
return ret;
pix->width = mf.width;
pix->height = mf.height;
pix->field = mf.field;
pix->colorspace = mf.colorspace;
switch (pixfmt) {
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
/* FIXME: check against rect_max after converting soc-camera */
/* We can scale precisely, need a bigger image from camera */
if (pix->width < width || pix->height < height) {
/*
* We presume, the sensor behaves sanely, i.e., if
* requested a bigger rectangle, it will not return a
* smaller one.
*/
mf.width = pcdev->max_width;
mf.height = pcdev->max_height;
ret = v4l2_device_call_until_err(sd->v4l2_dev,
soc_camera_grp_id(icd), video,
try_mbus_fmt, &mf);
if (ret < 0) {
/* Shouldn't actually happen... */
dev_err(icd->parent,
"FIXME: client try_fmt() = %d\n", ret);
return ret;
}
}
/* We will scale exactly */
if (mf.width > width)
pix->width = width;
if (mf.height > height)
pix->height = height;
pix->bytesperline = max(pix->bytesperline, pix->width);
pix->bytesperline = min(pix->bytesperline, CEU_CHDW_MAX);
pix->bytesperline &= ~3;
break;
default:
/* Configurable stride isn't supported in pass-through mode. */
pix->bytesperline = 0;
}
pix->width &= ~3;
pix->height &= ~3;
pix->sizeimage = 0;
dev_geo(icd->parent, "%s(): return %d, fmt 0x%x, %ux%u\n",
__func__, ret, pix->pixelformat, pix->width, pix->height);
return ret;
}
static int sh_mobile_ceu_set_livecrop(struct soc_camera_device *icd,
const struct v4l2_crop *a)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct sh_mobile_ceu_dev *pcdev = ici->priv;
u32 out_width = icd->user_width, out_height = icd->user_height;
int ret;
/* Freeze queue */
pcdev->frozen = 1;
/* Wait for frame */
ret = wait_for_completion_interruptible(&pcdev->complete);
/* Stop the client */
ret = v4l2_subdev_call(sd, video, s_stream, 0);
if (ret < 0)
dev_warn(icd->parent,
"Client failed to stop the stream: %d\n", ret);
else
/* Do the crop, if it fails, there's nothing more we can do */
sh_mobile_ceu_set_crop(icd, a);
dev_geo(icd->parent, "Output after crop: %ux%u\n", icd->user_width, icd->user_height);
if (icd->user_width != out_width || icd->user_height != out_height) {
struct v4l2_format f = {
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
.fmt.pix = {
.width = out_width,
.height = out_height,
.pixelformat = icd->current_fmt->host_fmt->fourcc,
.field = pcdev->field,
.colorspace = icd->colorspace,
},
};
ret = sh_mobile_ceu_set_fmt(icd, &f);
if (!ret && (out_width != f.fmt.pix.width ||
out_height != f.fmt.pix.height))
ret = -EINVAL;
if (!ret) {
icd->user_width = out_width & ~3;
icd->user_height = out_height & ~3;
ret = sh_mobile_ceu_set_bus_param(icd);
}
}
/* Thaw the queue */
pcdev->frozen = 0;
spin_lock_irq(&pcdev->lock);
sh_mobile_ceu_capture(pcdev);
spin_unlock_irq(&pcdev->lock);
/* Start the client */
ret = v4l2_subdev_call(sd, video, s_stream, 1);
return ret;
}
static unsigned int sh_mobile_ceu_poll(struct file *file, poll_table *pt)
{
struct soc_camera_device *icd = file->private_data;
return vb2_poll(&icd->vb2_vidq, file, pt);
}
static int sh_mobile_ceu_querycap(struct soc_camera_host *ici,
struct v4l2_capability *cap)
{
strlcpy(cap->card, "SuperH_Mobile_CEU", sizeof(cap->card));
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
return 0;
}
static int sh_mobile_ceu_init_videobuf(struct vb2_queue *q,
struct soc_camera_device *icd)
{
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
q->io_modes = VB2_MMAP | VB2_USERPTR;
q->drv_priv = icd;
q->ops = &sh_mobile_ceu_videobuf_ops;
q->mem_ops = &vb2_dma_contig_memops;
q->buf_struct_size = sizeof(struct sh_mobile_ceu_buffer);
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
return vb2_queue_init(q);
}
static struct soc_camera_host_ops sh_mobile_ceu_host_ops = {
.owner = THIS_MODULE,
.add = sh_mobile_ceu_add_device,
.remove = sh_mobile_ceu_remove_device,
.clock_start = sh_mobile_ceu_clock_start,
.clock_stop = sh_mobile_ceu_clock_stop,
.get_formats = sh_mobile_ceu_get_formats,
.put_formats = sh_mobile_ceu_put_formats,
.get_crop = sh_mobile_ceu_get_crop,
.set_crop = sh_mobile_ceu_set_crop,
.set_livecrop = sh_mobile_ceu_set_livecrop,
.set_fmt = sh_mobile_ceu_set_fmt,
.try_fmt = sh_mobile_ceu_try_fmt,
.poll = sh_mobile_ceu_poll,
.querycap = sh_mobile_ceu_querycap,
.set_bus_param = sh_mobile_ceu_set_bus_param,
.init_videobuf2 = sh_mobile_ceu_init_videobuf,
};
struct bus_wait {
struct notifier_block notifier;
struct completion completion;
struct device *dev;
};
static int bus_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
struct device *dev = data;
struct bus_wait *wait = container_of(nb, struct bus_wait, notifier);
if (wait->dev != dev)
return NOTIFY_DONE;
switch (action) {
case BUS_NOTIFY_UNBOUND_DRIVER:
/* Protect from module unloading */
wait_for_completion(&wait->completion);
return NOTIFY_OK;
}
return NOTIFY_DONE;
}
static int sh_mobile_ceu_probe(struct platform_device *pdev)
{
struct sh_mobile_ceu_dev *pcdev;
struct resource *res;
void __iomem *base;
unsigned int irq;
int err, i;
struct bus_wait wait = {
.completion = COMPLETION_INITIALIZER_ONSTACK(wait.completion),
.notifier.notifier_call = bus_notify,
};
struct sh_mobile_ceu_companion *csi2;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
if (!res || (int)irq <= 0) {
dev_err(&pdev->dev, "Not enough CEU platform resources.\n");
return -ENODEV;
}
pcdev = devm_kzalloc(&pdev->dev, sizeof(*pcdev), GFP_KERNEL);
if (!pcdev) {
dev_err(&pdev->dev, "Could not allocate pcdev\n");
return -ENOMEM;
}
INIT_LIST_HEAD(&pcdev->capture);
spin_lock_init(&pcdev->lock);
init_completion(&pcdev->complete);
pcdev->pdata = pdev->dev.platform_data;
if (!pcdev->pdata && !pdev->dev.of_node) {
dev_err(&pdev->dev, "CEU platform data not set.\n");
return -EINVAL;
}
/* TODO: implement per-device bus flags */
if (pcdev->pdata) {
pcdev->max_width = pcdev->pdata->max_width;
pcdev->max_height = pcdev->pdata->max_height;
pcdev->flags = pcdev->pdata->flags;
}
if (!pcdev->max_width) {
unsigned int v;
err = of_property_read_u32(pdev->dev.of_node, "renesas,max-width", &v);
if (!err)
pcdev->max_width = v;
if (!pcdev->max_width)
pcdev->max_width = 2560;
}
if (!pcdev->max_height) {
unsigned int v;
err = of_property_read_u32(pdev->dev.of_node, "renesas,max-height", &v);
if (!err)
pcdev->max_height = v;
if (!pcdev->max_height)
pcdev->max_height = 1920;
}
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
pcdev->irq = irq;
pcdev->base = base;
pcdev->video_limit = 0; /* only enabled if second resource exists */
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res) {
err = dma_declare_coherent_memory(&pdev->dev, res->start,
res->start,
resource_size(res),
DMA_MEMORY_MAP |
DMA_MEMORY_EXCLUSIVE);
if (!err) {
dev_err(&pdev->dev, "Unable to declare CEU memory.\n");
return -ENXIO;
}
pcdev->video_limit = resource_size(res);
}
/* request irq */
err = devm_request_irq(&pdev->dev, pcdev->irq, sh_mobile_ceu_irq,
0, dev_name(&pdev->dev), pcdev);
if (err) {
dev_err(&pdev->dev, "Unable to register CEU interrupt.\n");
goto exit_release_mem;
}
pm_suspend_ignore_children(&pdev->dev, true);
pm_runtime_enable(&pdev->dev);
pm_runtime_resume(&pdev->dev);
pcdev->ici.priv = pcdev;
pcdev->ici.v4l2_dev.dev = &pdev->dev;
pcdev->ici.nr = pdev->id;
pcdev->ici.drv_name = dev_name(&pdev->dev);
pcdev->ici.ops = &sh_mobile_ceu_host_ops;
pcdev->ici.capabilities = SOCAM_HOST_CAP_STRIDE;
pcdev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
if (IS_ERR(pcdev->alloc_ctx)) {
err = PTR_ERR(pcdev->alloc_ctx);
goto exit_free_clk;
}
if (pcdev->pdata && pcdev->pdata->asd_sizes) {
struct v4l2_async_subdev **asd;
char name[] = "sh-mobile-csi2";
int j;
/*
* CSI2 interfacing: several groups can use CSI2, pick up the
* first one
*/
asd = pcdev->pdata->asd;
for (j = 0; pcdev->pdata->asd_sizes[j]; j++) {
for (i = 0; i < pcdev->pdata->asd_sizes[j]; i++, asd++) {
dev_dbg(&pdev->dev, "%s(): subdev #%d, type %u\n",
__func__, i, (*asd)->match_type);
if ((*asd)->match_type == V4L2_ASYNC_MATCH_DEVNAME &&
!strncmp(name, (*asd)->match.device_name.name,
sizeof(name) - 1)) {
pcdev->csi2_asd = *asd;
break;
}
}
if (pcdev->csi2_asd)
break;
}
pcdev->ici.asd = pcdev->pdata->asd;
pcdev->ici.asd_sizes = pcdev->pdata->asd_sizes;
}
/* Legacy CSI2 interfacing */
csi2 = pcdev->pdata ? pcdev->pdata->csi2 : NULL;
if (csi2) {
/*
* TODO: remove this once all users are converted to
* asynchronous CSI2 probing. If it has to be kept, csi2
* platform device resources have to be added, using
* platform_device_add_resources()
*/
struct platform_device *csi2_pdev =
platform_device_alloc("sh-mobile-csi2", csi2->id);
struct sh_csi2_pdata *csi2_pdata = csi2->platform_data;
if (!csi2_pdev) {
err = -ENOMEM;
goto exit_free_ctx;
}
pcdev->csi2_pdev = csi2_pdev;
err = platform_device_add_data(csi2_pdev, csi2_pdata,
sizeof(*csi2_pdata));
if (err < 0)
goto exit_pdev_put;
csi2_pdev->resource = csi2->resource;
csi2_pdev->num_resources = csi2->num_resources;
err = platform_device_add(csi2_pdev);
if (err < 0)
goto exit_pdev_put;
wait.dev = &csi2_pdev->dev;
err = bus_register_notifier(&platform_bus_type, &wait.notifier);
if (err < 0)
goto exit_pdev_unregister;
/*
* From this point the driver module will not unload, until
* we complete the completion.
*/
if (!csi2_pdev->dev.driver) {
complete(&wait.completion);
/* Either too late, or probing failed */
bus_unregister_notifier(&platform_bus_type, &wait.notifier);
err = -ENXIO;
goto exit_pdev_unregister;
}
/*
* The module is still loaded, in the worst case it is hanging
* in device release on our completion. So, _now_ dereferencing
* the "owner" is safe!
*/
err = try_module_get(csi2_pdev->dev.driver->owner);
/* Let notifier complete, if it has been locked */
complete(&wait.completion);
bus_unregister_notifier(&platform_bus_type, &wait.notifier);
if (!err) {
err = -ENODEV;
goto exit_pdev_unregister;
}
pcdev->csi2_sd = platform_get_drvdata(csi2_pdev);
}
err = soc_camera_host_register(&pcdev->ici);
if (err)
goto exit_csi2_unregister;
if (csi2) {
err = v4l2_device_register_subdev(&pcdev->ici.v4l2_dev,
pcdev->csi2_sd);
dev_dbg(&pdev->dev, "%s(): ret(register_subdev) = %d\n",
__func__, err);
if (err < 0)
goto exit_host_unregister;
/* v4l2_device_register_subdev() took a reference too */
module_put(pcdev->csi2_sd->owner);
}
return 0;
exit_host_unregister:
soc_camera_host_unregister(&pcdev->ici);
exit_csi2_unregister:
if (csi2) {
module_put(pcdev->csi2_pdev->dev.driver->owner);
exit_pdev_unregister:
platform_device_del(pcdev->csi2_pdev);
exit_pdev_put:
pcdev->csi2_pdev->resource = NULL;
platform_device_put(pcdev->csi2_pdev);
}
exit_free_ctx:
vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
exit_free_clk:
pm_runtime_disable(&pdev->dev);
exit_release_mem:
if (platform_get_resource(pdev, IORESOURCE_MEM, 1))
dma_release_declared_memory(&pdev->dev);
return err;
}
static int sh_mobile_ceu_remove(struct platform_device *pdev)
{
struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
struct sh_mobile_ceu_dev *pcdev = container_of(soc_host,
struct sh_mobile_ceu_dev, ici);
struct platform_device *csi2_pdev = pcdev->csi2_pdev;
soc_camera_host_unregister(soc_host);
pm_runtime_disable(&pdev->dev);
if (platform_get_resource(pdev, IORESOURCE_MEM, 1))
dma_release_declared_memory(&pdev->dev);
vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx);
if (csi2_pdev && csi2_pdev->dev.driver) {
struct module *csi2_drv = csi2_pdev->dev.driver->owner;
platform_device_del(csi2_pdev);
csi2_pdev->resource = NULL;
platform_device_put(csi2_pdev);
module_put(csi2_drv);
}
return 0;
}
static int sh_mobile_ceu_runtime_nop(struct device *dev)
{
/* Runtime PM callback shared between ->runtime_suspend()
* and ->runtime_resume(). Simply returns success.
*
* This driver re-initializes all registers after
* pm_runtime_get_sync() anyway so there is no need
* to save and restore registers here.
*/
return 0;
}
static const struct dev_pm_ops sh_mobile_ceu_dev_pm_ops = {
.runtime_suspend = sh_mobile_ceu_runtime_nop,
.runtime_resume = sh_mobile_ceu_runtime_nop,
};
static const struct of_device_id sh_mobile_ceu_of_match[] = {
{ .compatible = "renesas,sh-mobile-ceu" },
{ }
};
MODULE_DEVICE_TABLE(of, sh_mobile_ceu_of_match);
static struct platform_driver sh_mobile_ceu_driver = {
.driver = {
.name = "sh_mobile_ceu",
.owner = THIS_MODULE,
.pm = &sh_mobile_ceu_dev_pm_ops,
.of_match_table = sh_mobile_ceu_of_match,
},
.probe = sh_mobile_ceu_probe,
.remove = sh_mobile_ceu_remove,
};
static int __init sh_mobile_ceu_init(void)
{
/* Whatever return code */
request_module("sh_mobile_csi2");
return platform_driver_register(&sh_mobile_ceu_driver);
}
static void __exit sh_mobile_ceu_exit(void)
{
platform_driver_unregister(&sh_mobile_ceu_driver);
}
module_init(sh_mobile_ceu_init);
module_exit(sh_mobile_ceu_exit);
MODULE_DESCRIPTION("SuperH Mobile CEU driver");
MODULE_AUTHOR("Magnus Damm");
MODULE_LICENSE("GPL");
MODULE_VERSION("0.1.0");
MODULE_ALIAS("platform:sh_mobile_ceu");
| gpl-2.0 |
olexiyt/telechips-linux | drivers/s390/cio/ccwgroup.c | 736 | 16806 | /*
* bus driver for ccwgroup
*
* Copyright IBM Corp. 2002, 2009
*
* Author(s): Arnd Bergmann (arndb@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/ctype.h>
#include <linux/dcache.h>
#include <asm/ccwdev.h>
#include <asm/ccwgroup.h>
#define CCW_BUS_ID_SIZE 20
/* In Linux 2.4, we had a channel device layer called "chandev"
* that did all sorts of obscure stuff for networking devices.
* This is another driver that serves as a replacement for just
* one of its functions, namely the translation of single subchannels
* to devices that use multiple subchannels.
*/
/* a device matches a driver if all its slave devices match the same
* entry of the driver */
static int
ccwgroup_bus_match (struct device * dev, struct device_driver * drv)
{
struct ccwgroup_device *gdev;
struct ccwgroup_driver *gdrv;
gdev = to_ccwgroupdev(dev);
gdrv = to_ccwgroupdrv(drv);
if (gdev->creator_id == gdrv->driver_id)
return 1;
return 0;
}
static int
ccwgroup_uevent (struct device *dev, struct kobj_uevent_env *env)
{
/* TODO */
return 0;
}
static struct bus_type ccwgroup_bus_type;
static void
__ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
{
int i;
char str[8];
for (i = 0; i < gdev->count; i++) {
sprintf(str, "cdev%d", i);
sysfs_remove_link(&gdev->dev.kobj, str);
sysfs_remove_link(&gdev->cdev[i]->dev.kobj, "group_device");
}
}
/*
* Remove references from ccw devices to ccw group device and from
* ccw group device to ccw devices.
*/
static void __ccwgroup_remove_cdev_refs(struct ccwgroup_device *gdev)
{
struct ccw_device *cdev;
int i;
for (i = 0; i < gdev->count; i++) {
cdev = gdev->cdev[i];
if (!cdev)
continue;
spin_lock_irq(cdev->ccwlock);
dev_set_drvdata(&cdev->dev, NULL);
spin_unlock_irq(cdev->ccwlock);
gdev->cdev[i] = NULL;
put_device(&cdev->dev);
}
}
/*
* Provide an 'ungroup' attribute so the user can remove group devices no
* longer needed or accidentially created. Saves memory :)
*/
static void ccwgroup_ungroup_callback(struct device *dev)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
mutex_lock(&gdev->reg_mutex);
if (device_is_registered(&gdev->dev)) {
__ccwgroup_remove_symlinks(gdev);
device_unregister(dev);
__ccwgroup_remove_cdev_refs(gdev);
}
mutex_unlock(&gdev->reg_mutex);
}
static ssize_t
ccwgroup_ungroup_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct ccwgroup_device *gdev;
int rc;
gdev = to_ccwgroupdev(dev);
/* Prevent concurrent online/offline processing and ungrouping. */
if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
return -EAGAIN;
if (gdev->state != CCWGROUP_OFFLINE) {
rc = -EINVAL;
goto out;
}
/* Note that we cannot unregister the device from one of its
* attribute methods, so we have to use this roundabout approach.
*/
rc = device_schedule_callback(dev, ccwgroup_ungroup_callback);
out:
if (rc) {
if (rc != -EAGAIN)
/* Release onoff "lock" when ungrouping failed. */
atomic_set(&gdev->onoff, 0);
return rc;
}
return count;
}
static DEVICE_ATTR(ungroup, 0200, NULL, ccwgroup_ungroup_store);
static void
ccwgroup_release (struct device *dev)
{
kfree(to_ccwgroupdev(dev));
}
static int
__ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
{
char str[8];
int i, rc;
for (i = 0; i < gdev->count; i++) {
rc = sysfs_create_link(&gdev->cdev[i]->dev.kobj, &gdev->dev.kobj,
"group_device");
if (rc) {
for (--i; i >= 0; i--)
sysfs_remove_link(&gdev->cdev[i]->dev.kobj,
"group_device");
return rc;
}
}
for (i = 0; i < gdev->count; i++) {
sprintf(str, "cdev%d", i);
rc = sysfs_create_link(&gdev->dev.kobj, &gdev->cdev[i]->dev.kobj,
str);
if (rc) {
for (--i; i >= 0; i--) {
sprintf(str, "cdev%d", i);
sysfs_remove_link(&gdev->dev.kobj, str);
}
for (i = 0; i < gdev->count; i++)
sysfs_remove_link(&gdev->cdev[i]->dev.kobj,
"group_device");
return rc;
}
}
return 0;
}
static int __get_next_bus_id(const char **buf, char *bus_id)
{
int rc, len;
char *start, *end;
start = (char *)*buf;
end = strchr(start, ',');
if (!end) {
/* Last entry. Strip trailing newline, if applicable. */
end = strchr(start, '\n');
if (end)
*end = '\0';
len = strlen(start) + 1;
} else {
len = end - start + 1;
end++;
}
if (len < CCW_BUS_ID_SIZE) {
strlcpy(bus_id, start, len);
rc = 0;
} else
rc = -EINVAL;
*buf = end;
return rc;
}
static int __is_valid_bus_id(char bus_id[CCW_BUS_ID_SIZE])
{
int cssid, ssid, devno;
/* Must be of form %x.%x.%04x */
if (sscanf(bus_id, "%x.%1x.%04x", &cssid, &ssid, &devno) != 3)
return 0;
return 1;
}
/**
* ccwgroup_create_from_string() - create and register a ccw group device
* @root: parent device for the new device
* @creator_id: identifier of creating driver
* @cdrv: ccw driver of slave devices
* @num_devices: number of slave devices
* @buf: buffer containing comma separated bus ids of slave devices
*
* Create and register a new ccw group device as a child of @root. Slave
* devices are obtained from the list of bus ids given in @buf and must all
* belong to @cdrv.
* Returns:
* %0 on success and an error code on failure.
* Context:
* non-atomic
*/
int ccwgroup_create_from_string(struct device *root, unsigned int creator_id,
struct ccw_driver *cdrv, int num_devices,
const char *buf)
{
struct ccwgroup_device *gdev;
int rc, i;
char tmp_bus_id[CCW_BUS_ID_SIZE];
const char *curr_buf;
gdev = kzalloc(sizeof(*gdev) + num_devices * sizeof(gdev->cdev[0]),
GFP_KERNEL);
if (!gdev)
return -ENOMEM;
atomic_set(&gdev->onoff, 0);
mutex_init(&gdev->reg_mutex);
mutex_lock(&gdev->reg_mutex);
gdev->creator_id = creator_id;
gdev->count = num_devices;
gdev->dev.bus = &ccwgroup_bus_type;
gdev->dev.parent = root;
gdev->dev.release = ccwgroup_release;
device_initialize(&gdev->dev);
curr_buf = buf;
for (i = 0; i < num_devices && curr_buf; i++) {
rc = __get_next_bus_id(&curr_buf, tmp_bus_id);
if (rc != 0)
goto error;
if (!__is_valid_bus_id(tmp_bus_id)) {
rc = -EINVAL;
goto error;
}
gdev->cdev[i] = get_ccwdev_by_busid(cdrv, tmp_bus_id);
/*
* All devices have to be of the same type in
* order to be grouped.
*/
if (!gdev->cdev[i]
|| gdev->cdev[i]->id.driver_info !=
gdev->cdev[0]->id.driver_info) {
rc = -EINVAL;
goto error;
}
/* Don't allow a device to belong to more than one group. */
spin_lock_irq(gdev->cdev[i]->ccwlock);
if (dev_get_drvdata(&gdev->cdev[i]->dev)) {
spin_unlock_irq(gdev->cdev[i]->ccwlock);
rc = -EINVAL;
goto error;
}
dev_set_drvdata(&gdev->cdev[i]->dev, gdev);
spin_unlock_irq(gdev->cdev[i]->ccwlock);
}
/* Check for sufficient number of bus ids. */
if (i < num_devices && !curr_buf) {
rc = -EINVAL;
goto error;
}
/* Check for trailing stuff. */
if (i == num_devices && strlen(curr_buf) > 0) {
rc = -EINVAL;
goto error;
}
dev_set_name(&gdev->dev, "%s", dev_name(&gdev->cdev[0]->dev));
rc = device_add(&gdev->dev);
if (rc)
goto error;
get_device(&gdev->dev);
rc = device_create_file(&gdev->dev, &dev_attr_ungroup);
if (rc) {
device_unregister(&gdev->dev);
goto error;
}
rc = __ccwgroup_create_symlinks(gdev);
if (!rc) {
mutex_unlock(&gdev->reg_mutex);
put_device(&gdev->dev);
return 0;
}
device_remove_file(&gdev->dev, &dev_attr_ungroup);
device_unregister(&gdev->dev);
error:
for (i = 0; i < num_devices; i++)
if (gdev->cdev[i]) {
spin_lock_irq(gdev->cdev[i]->ccwlock);
if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev)
dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
spin_unlock_irq(gdev->cdev[i]->ccwlock);
put_device(&gdev->cdev[i]->dev);
gdev->cdev[i] = NULL;
}
mutex_unlock(&gdev->reg_mutex);
put_device(&gdev->dev);
return rc;
}
EXPORT_SYMBOL(ccwgroup_create_from_string);
static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action,
void *data);
static struct notifier_block ccwgroup_nb = {
.notifier_call = ccwgroup_notifier
};
static int __init init_ccwgroup(void)
{
int ret;
ret = bus_register(&ccwgroup_bus_type);
if (ret)
return ret;
ret = bus_register_notifier(&ccwgroup_bus_type, &ccwgroup_nb);
if (ret)
bus_unregister(&ccwgroup_bus_type);
return ret;
}
static void __exit cleanup_ccwgroup(void)
{
bus_unregister_notifier(&ccwgroup_bus_type, &ccwgroup_nb);
bus_unregister(&ccwgroup_bus_type);
}
module_init(init_ccwgroup);
module_exit(cleanup_ccwgroup);
/************************** driver stuff ******************************/
static int
ccwgroup_set_online(struct ccwgroup_device *gdev)
{
struct ccwgroup_driver *gdrv;
int ret;
if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
return -EAGAIN;
if (gdev->state == CCWGROUP_ONLINE) {
ret = 0;
goto out;
}
if (!gdev->dev.driver) {
ret = -EINVAL;
goto out;
}
gdrv = to_ccwgroupdrv (gdev->dev.driver);
if ((ret = gdrv->set_online ? gdrv->set_online(gdev) : 0))
goto out;
gdev->state = CCWGROUP_ONLINE;
out:
atomic_set(&gdev->onoff, 0);
return ret;
}
static int
ccwgroup_set_offline(struct ccwgroup_device *gdev)
{
struct ccwgroup_driver *gdrv;
int ret;
if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
return -EAGAIN;
if (gdev->state == CCWGROUP_OFFLINE) {
ret = 0;
goto out;
}
if (!gdev->dev.driver) {
ret = -EINVAL;
goto out;
}
gdrv = to_ccwgroupdrv (gdev->dev.driver);
if ((ret = gdrv->set_offline ? gdrv->set_offline(gdev) : 0))
goto out;
gdev->state = CCWGROUP_OFFLINE;
out:
atomic_set(&gdev->onoff, 0);
return ret;
}
static ssize_t
ccwgroup_online_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct ccwgroup_device *gdev;
struct ccwgroup_driver *gdrv;
unsigned long value;
int ret;
if (!dev->driver)
return -ENODEV;
gdev = to_ccwgroupdev(dev);
gdrv = to_ccwgroupdrv(dev->driver);
if (!try_module_get(gdrv->driver.owner))
return -EINVAL;
ret = strict_strtoul(buf, 0, &value);
if (ret)
goto out;
if (value == 1)
ret = ccwgroup_set_online(gdev);
else if (value == 0)
ret = ccwgroup_set_offline(gdev);
else
ret = -EINVAL;
out:
module_put(gdrv->driver.owner);
return (ret == 0) ? count : ret;
}
static ssize_t
ccwgroup_online_show (struct device *dev, struct device_attribute *attr, char *buf)
{
int online;
online = (to_ccwgroupdev(dev)->state == CCWGROUP_ONLINE);
return sprintf(buf, online ? "1\n" : "0\n");
}
static DEVICE_ATTR(online, 0644, ccwgroup_online_show, ccwgroup_online_store);
static int
ccwgroup_probe (struct device *dev)
{
struct ccwgroup_device *gdev;
struct ccwgroup_driver *gdrv;
int ret;
gdev = to_ccwgroupdev(dev);
gdrv = to_ccwgroupdrv(dev->driver);
if ((ret = device_create_file(dev, &dev_attr_online)))
return ret;
ret = gdrv->probe ? gdrv->probe(gdev) : -ENODEV;
if (ret)
device_remove_file(dev, &dev_attr_online);
return ret;
}
static int
ccwgroup_remove (struct device *dev)
{
struct ccwgroup_device *gdev;
struct ccwgroup_driver *gdrv;
device_remove_file(dev, &dev_attr_online);
device_remove_file(dev, &dev_attr_ungroup);
if (!dev->driver)
return 0;
gdev = to_ccwgroupdev(dev);
gdrv = to_ccwgroupdrv(dev->driver);
if (gdrv->remove)
gdrv->remove(gdev);
return 0;
}
static void ccwgroup_shutdown(struct device *dev)
{
struct ccwgroup_device *gdev;
struct ccwgroup_driver *gdrv;
if (!dev->driver)
return;
gdev = to_ccwgroupdev(dev);
gdrv = to_ccwgroupdrv(dev->driver);
if (gdrv->shutdown)
gdrv->shutdown(gdev);
}
static int ccwgroup_pm_prepare(struct device *dev)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
/* Fail while device is being set online/offline. */
if (atomic_read(&gdev->onoff))
return -EAGAIN;
if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
return 0;
return gdrv->prepare ? gdrv->prepare(gdev) : 0;
}
static void ccwgroup_pm_complete(struct device *dev)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
return;
if (gdrv->complete)
gdrv->complete(gdev);
}
static int ccwgroup_pm_freeze(struct device *dev)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
return 0;
return gdrv->freeze ? gdrv->freeze(gdev) : 0;
}
static int ccwgroup_pm_thaw(struct device *dev)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
return 0;
return gdrv->thaw ? gdrv->thaw(gdev) : 0;
}
static int ccwgroup_pm_restore(struct device *dev)
{
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE)
return 0;
return gdrv->restore ? gdrv->restore(gdev) : 0;
}
static const struct dev_pm_ops ccwgroup_pm_ops = {
.prepare = ccwgroup_pm_prepare,
.complete = ccwgroup_pm_complete,
.freeze = ccwgroup_pm_freeze,
.thaw = ccwgroup_pm_thaw,
.restore = ccwgroup_pm_restore,
};
static struct bus_type ccwgroup_bus_type = {
.name = "ccwgroup",
.match = ccwgroup_bus_match,
.uevent = ccwgroup_uevent,
.probe = ccwgroup_probe,
.remove = ccwgroup_remove,
.shutdown = ccwgroup_shutdown,
.pm = &ccwgroup_pm_ops,
};
static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action,
void *data)
{
struct device *dev = data;
if (action == BUS_NOTIFY_UNBIND_DRIVER)
device_schedule_callback(dev, ccwgroup_ungroup_callback);
return NOTIFY_OK;
}
/**
* ccwgroup_driver_register() - register a ccw group driver
* @cdriver: driver to be registered
*
* This function is mainly a wrapper around driver_register().
*/
int ccwgroup_driver_register(struct ccwgroup_driver *cdriver)
{
/* register our new driver with the core */
cdriver->driver.bus = &ccwgroup_bus_type;
return driver_register(&cdriver->driver);
}
static int
__ccwgroup_match_all(struct device *dev, void *data)
{
return 1;
}
/**
* ccwgroup_driver_unregister() - deregister a ccw group driver
* @cdriver: driver to be deregistered
*
* This function is mainly a wrapper around driver_unregister().
*/
void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver)
{
struct device *dev;
/* We don't want ccwgroup devices to live longer than their driver. */
get_driver(&cdriver->driver);
while ((dev = driver_find_device(&cdriver->driver, NULL, NULL,
__ccwgroup_match_all))) {
struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
mutex_lock(&gdev->reg_mutex);
__ccwgroup_remove_symlinks(gdev);
device_unregister(dev);
__ccwgroup_remove_cdev_refs(gdev);
mutex_unlock(&gdev->reg_mutex);
put_device(dev);
}
put_driver(&cdriver->driver);
driver_unregister(&cdriver->driver);
}
/**
* ccwgroup_probe_ccwdev() - probe function for slave devices
* @cdev: ccw device to be probed
*
* This is a dummy probe function for ccw devices that are slave devices in
* a ccw group device.
* Returns:
* always %0
*/
int ccwgroup_probe_ccwdev(struct ccw_device *cdev)
{
return 0;
}
/**
* ccwgroup_remove_ccwdev() - remove function for slave devices
* @cdev: ccw device to be removed
*
* This is a remove function for ccw devices that are slave devices in a ccw
* group device. It sets the ccw device offline and also deregisters the
* embedding ccw group device.
*/
void ccwgroup_remove_ccwdev(struct ccw_device *cdev)
{
struct ccwgroup_device *gdev;
/* Ignore offlining errors, device is gone anyway. */
ccw_device_set_offline(cdev);
/* If one of its devices is gone, the whole group is done for. */
spin_lock_irq(cdev->ccwlock);
gdev = dev_get_drvdata(&cdev->dev);
if (!gdev) {
spin_unlock_irq(cdev->ccwlock);
return;
}
/* Get ccwgroup device reference for local processing. */
get_device(&gdev->dev);
spin_unlock_irq(cdev->ccwlock);
/* Unregister group device. */
mutex_lock(&gdev->reg_mutex);
if (device_is_registered(&gdev->dev)) {
__ccwgroup_remove_symlinks(gdev);
device_unregister(&gdev->dev);
__ccwgroup_remove_cdev_refs(gdev);
}
mutex_unlock(&gdev->reg_mutex);
/* Release ccwgroup device reference for local processing. */
put_device(&gdev->dev);
}
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(ccwgroup_driver_register);
EXPORT_SYMBOL(ccwgroup_driver_unregister);
EXPORT_SYMBOL(ccwgroup_probe_ccwdev);
EXPORT_SYMBOL(ccwgroup_remove_ccwdev);
| gpl-2.0 |
olegsvs/android_kernel_ark_benefit_m7_mm | drivers/mmc/host/s3cmci.c | 736 | 46995 | /*
* linux/drivers/mmc/s3cmci.h - Samsung S3C MCI driver
*
* Copyright (C) 2004-2006 maintech GmbH, Thomas Kleffel <tk@maintech.de>
*
* Current driver maintained by Ben Dooks and Simtec Electronics
* Copyright (C) 2008 Simtec Electronics <ben-linux@fluff.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/clk.h>
#include <linux/mmc/host.h>
#include <linux/platform_device.h>
#include <linux/cpufreq.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/gpio.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <plat/gpio-cfg.h>
#include <mach/dma.h>
#include <mach/gpio-samsung.h>
#include <linux/platform_data/dma-s3c24xx.h>
#include <linux/platform_data/mmc-s3cmci.h>
#include "s3cmci.h"
#define DRIVER_NAME "s3c-mci"
#define S3C2410_SDICON (0x00)
#define S3C2410_SDIPRE (0x04)
#define S3C2410_SDICMDARG (0x08)
#define S3C2410_SDICMDCON (0x0C)
#define S3C2410_SDICMDSTAT (0x10)
#define S3C2410_SDIRSP0 (0x14)
#define S3C2410_SDIRSP1 (0x18)
#define S3C2410_SDIRSP2 (0x1C)
#define S3C2410_SDIRSP3 (0x20)
#define S3C2410_SDITIMER (0x24)
#define S3C2410_SDIBSIZE (0x28)
#define S3C2410_SDIDCON (0x2C)
#define S3C2410_SDIDCNT (0x30)
#define S3C2410_SDIDSTA (0x34)
#define S3C2410_SDIFSTA (0x38)
#define S3C2410_SDIDATA (0x3C)
#define S3C2410_SDIIMSK (0x40)
#define S3C2440_SDIDATA (0x40)
#define S3C2440_SDIIMSK (0x3C)
#define S3C2440_SDICON_SDRESET (1 << 8)
#define S3C2410_SDICON_SDIOIRQ (1 << 3)
#define S3C2410_SDICON_FIFORESET (1 << 1)
#define S3C2410_SDICON_CLOCKTYPE (1 << 0)
#define S3C2410_SDICMDCON_LONGRSP (1 << 10)
#define S3C2410_SDICMDCON_WAITRSP (1 << 9)
#define S3C2410_SDICMDCON_CMDSTART (1 << 8)
#define S3C2410_SDICMDCON_SENDERHOST (1 << 6)
#define S3C2410_SDICMDCON_INDEX (0x3f)
#define S3C2410_SDICMDSTAT_CRCFAIL (1 << 12)
#define S3C2410_SDICMDSTAT_CMDSENT (1 << 11)
#define S3C2410_SDICMDSTAT_CMDTIMEOUT (1 << 10)
#define S3C2410_SDICMDSTAT_RSPFIN (1 << 9)
#define S3C2440_SDIDCON_DS_WORD (2 << 22)
#define S3C2410_SDIDCON_TXAFTERRESP (1 << 20)
#define S3C2410_SDIDCON_RXAFTERCMD (1 << 19)
#define S3C2410_SDIDCON_BLOCKMODE (1 << 17)
#define S3C2410_SDIDCON_WIDEBUS (1 << 16)
#define S3C2410_SDIDCON_DMAEN (1 << 15)
#define S3C2410_SDIDCON_STOP (1 << 14)
#define S3C2440_SDIDCON_DATSTART (1 << 14)
#define S3C2410_SDIDCON_XFER_RXSTART (2 << 12)
#define S3C2410_SDIDCON_XFER_TXSTART (3 << 12)
#define S3C2410_SDIDCON_BLKNUM_MASK (0xFFF)
#define S3C2410_SDIDSTA_SDIOIRQDETECT (1 << 9)
#define S3C2410_SDIDSTA_FIFOFAIL (1 << 8)
#define S3C2410_SDIDSTA_CRCFAIL (1 << 7)
#define S3C2410_SDIDSTA_RXCRCFAIL (1 << 6)
#define S3C2410_SDIDSTA_DATATIMEOUT (1 << 5)
#define S3C2410_SDIDSTA_XFERFINISH (1 << 4)
#define S3C2410_SDIDSTA_TXDATAON (1 << 1)
#define S3C2410_SDIDSTA_RXDATAON (1 << 0)
#define S3C2440_SDIFSTA_FIFORESET (1 << 16)
#define S3C2440_SDIFSTA_FIFOFAIL (3 << 14)
#define S3C2410_SDIFSTA_TFDET (1 << 13)
#define S3C2410_SDIFSTA_RFDET (1 << 12)
#define S3C2410_SDIFSTA_COUNTMASK (0x7f)
#define S3C2410_SDIIMSK_RESPONSECRC (1 << 17)
#define S3C2410_SDIIMSK_CMDSENT (1 << 16)
#define S3C2410_SDIIMSK_CMDTIMEOUT (1 << 15)
#define S3C2410_SDIIMSK_RESPONSEND (1 << 14)
#define S3C2410_SDIIMSK_SDIOIRQ (1 << 12)
#define S3C2410_SDIIMSK_FIFOFAIL (1 << 11)
#define S3C2410_SDIIMSK_CRCSTATUS (1 << 10)
#define S3C2410_SDIIMSK_DATACRC (1 << 9)
#define S3C2410_SDIIMSK_DATATIMEOUT (1 << 8)
#define S3C2410_SDIIMSK_DATAFINISH (1 << 7)
#define S3C2410_SDIIMSK_TXFIFOHALF (1 << 4)
#define S3C2410_SDIIMSK_RXFIFOLAST (1 << 2)
#define S3C2410_SDIIMSK_RXFIFOHALF (1 << 0)
enum dbg_channels {
dbg_err = (1 << 0),
dbg_debug = (1 << 1),
dbg_info = (1 << 2),
dbg_irq = (1 << 3),
dbg_sg = (1 << 4),
dbg_dma = (1 << 5),
dbg_pio = (1 << 6),
dbg_fail = (1 << 7),
dbg_conf = (1 << 8),
};
static const int dbgmap_err = dbg_fail;
static const int dbgmap_info = dbg_info | dbg_conf;
static const int dbgmap_debug = dbg_err | dbg_debug;
#define dbg(host, channels, args...) \
do { \
if (dbgmap_err & channels) \
dev_err(&host->pdev->dev, args); \
else if (dbgmap_info & channels) \
dev_info(&host->pdev->dev, args); \
else if (dbgmap_debug & channels) \
dev_dbg(&host->pdev->dev, args); \
} while (0)
static void finalize_request(struct s3cmci_host *host);
static void s3cmci_send_request(struct mmc_host *mmc);
static void s3cmci_reset(struct s3cmci_host *host);
#ifdef CONFIG_MMC_DEBUG
static void dbg_dumpregs(struct s3cmci_host *host, char *prefix)
{
u32 con, pre, cmdarg, cmdcon, cmdsta, r0, r1, r2, r3, timer, bsize;
u32 datcon, datcnt, datsta, fsta, imask;
con = readl(host->base + S3C2410_SDICON);
pre = readl(host->base + S3C2410_SDIPRE);
cmdarg = readl(host->base + S3C2410_SDICMDARG);
cmdcon = readl(host->base + S3C2410_SDICMDCON);
cmdsta = readl(host->base + S3C2410_SDICMDSTAT);
r0 = readl(host->base + S3C2410_SDIRSP0);
r1 = readl(host->base + S3C2410_SDIRSP1);
r2 = readl(host->base + S3C2410_SDIRSP2);
r3 = readl(host->base + S3C2410_SDIRSP3);
timer = readl(host->base + S3C2410_SDITIMER);
bsize = readl(host->base + S3C2410_SDIBSIZE);
datcon = readl(host->base + S3C2410_SDIDCON);
datcnt = readl(host->base + S3C2410_SDIDCNT);
datsta = readl(host->base + S3C2410_SDIDSTA);
fsta = readl(host->base + S3C2410_SDIFSTA);
imask = readl(host->base + host->sdiimsk);
dbg(host, dbg_debug, "%s CON:[%08x] PRE:[%08x] TMR:[%08x]\n",
prefix, con, pre, timer);
dbg(host, dbg_debug, "%s CCON:[%08x] CARG:[%08x] CSTA:[%08x]\n",
prefix, cmdcon, cmdarg, cmdsta);
dbg(host, dbg_debug, "%s DCON:[%08x] FSTA:[%08x]"
" DSTA:[%08x] DCNT:[%08x]\n",
prefix, datcon, fsta, datsta, datcnt);
dbg(host, dbg_debug, "%s R0:[%08x] R1:[%08x]"
" R2:[%08x] R3:[%08x]\n",
prefix, r0, r1, r2, r3);
}
static void prepare_dbgmsg(struct s3cmci_host *host, struct mmc_command *cmd,
int stop)
{
snprintf(host->dbgmsg_cmd, 300,
"#%u%s op:%i arg:0x%08x flags:0x08%x retries:%u",
host->ccnt, (stop ? " (STOP)" : ""),
cmd->opcode, cmd->arg, cmd->flags, cmd->retries);
if (cmd->data) {
snprintf(host->dbgmsg_dat, 300,
"#%u bsize:%u blocks:%u bytes:%u",
host->dcnt, cmd->data->blksz,
cmd->data->blocks,
cmd->data->blocks * cmd->data->blksz);
} else {
host->dbgmsg_dat[0] = '\0';
}
}
static void dbg_dumpcmd(struct s3cmci_host *host, struct mmc_command *cmd,
int fail)
{
unsigned int dbglvl = fail ? dbg_fail : dbg_debug;
if (!cmd)
return;
if (cmd->error == 0) {
dbg(host, dbglvl, "CMD[OK] %s R0:0x%08x\n",
host->dbgmsg_cmd, cmd->resp[0]);
} else {
dbg(host, dbglvl, "CMD[ERR %i] %s Status:%s\n",
cmd->error, host->dbgmsg_cmd, host->status);
}
if (!cmd->data)
return;
if (cmd->data->error == 0) {
dbg(host, dbglvl, "DAT[OK] %s\n", host->dbgmsg_dat);
} else {
dbg(host, dbglvl, "DAT[ERR %i] %s DCNT:0x%08x\n",
cmd->data->error, host->dbgmsg_dat,
readl(host->base + S3C2410_SDIDCNT));
}
}
#else
static void dbg_dumpcmd(struct s3cmci_host *host,
struct mmc_command *cmd, int fail) { }
static void prepare_dbgmsg(struct s3cmci_host *host, struct mmc_command *cmd,
int stop) { }
static void dbg_dumpregs(struct s3cmci_host *host, char *prefix) { }
#endif /* CONFIG_MMC_DEBUG */
/**
* s3cmci_host_usedma - return whether the host is using dma or pio
* @host: The host state
*
* Return true if the host is using DMA to transfer data, else false
* to use PIO mode. Will return static data depending on the driver
* configuration.
*/
static inline bool s3cmci_host_usedma(struct s3cmci_host *host)
{
#ifdef CONFIG_MMC_S3C_PIO
return false;
#else /* CONFIG_MMC_S3C_DMA */
return true;
#endif
}
static inline u32 enable_imask(struct s3cmci_host *host, u32 imask)
{
u32 newmask;
newmask = readl(host->base + host->sdiimsk);
newmask |= imask;
writel(newmask, host->base + host->sdiimsk);
return newmask;
}
static inline u32 disable_imask(struct s3cmci_host *host, u32 imask)
{
u32 newmask;
newmask = readl(host->base + host->sdiimsk);
newmask &= ~imask;
writel(newmask, host->base + host->sdiimsk);
return newmask;
}
static inline void clear_imask(struct s3cmci_host *host)
{
u32 mask = readl(host->base + host->sdiimsk);
/* preserve the SDIO IRQ mask state */
mask &= S3C2410_SDIIMSK_SDIOIRQ;
writel(mask, host->base + host->sdiimsk);
}
/**
* s3cmci_check_sdio_irq - test whether the SDIO IRQ is being signalled
* @host: The host to check.
*
* Test to see if the SDIO interrupt is being signalled in case the
* controller has failed to re-detect a card interrupt. Read GPE8 and
* see if it is low and if so, signal a SDIO interrupt.
*
* This is currently called if a request is finished (we assume that the
* bus is now idle) and when the SDIO IRQ is enabled in case the IRQ is
* already being indicated.
*/
static void s3cmci_check_sdio_irq(struct s3cmci_host *host)
{
if (host->sdio_irqen) {
if (gpio_get_value(S3C2410_GPE(8)) == 0) {
pr_debug("%s: signalling irq\n", __func__);
mmc_signal_sdio_irq(host->mmc);
}
}
}
static inline int get_data_buffer(struct s3cmci_host *host,
u32 *bytes, u32 **pointer)
{
struct scatterlist *sg;
if (host->pio_active == XFER_NONE)
return -EINVAL;
if ((!host->mrq) || (!host->mrq->data))
return -EINVAL;
if (host->pio_sgptr >= host->mrq->data->sg_len) {
dbg(host, dbg_debug, "no more buffers (%i/%i)\n",
host->pio_sgptr, host->mrq->data->sg_len);
return -EBUSY;
}
sg = &host->mrq->data->sg[host->pio_sgptr];
*bytes = sg->length;
*pointer = sg_virt(sg);
host->pio_sgptr++;
dbg(host, dbg_sg, "new buffer (%i/%i)\n",
host->pio_sgptr, host->mrq->data->sg_len);
return 0;
}
static inline u32 fifo_count(struct s3cmci_host *host)
{
u32 fifostat = readl(host->base + S3C2410_SDIFSTA);
fifostat &= S3C2410_SDIFSTA_COUNTMASK;
return fifostat;
}
static inline u32 fifo_free(struct s3cmci_host *host)
{
u32 fifostat = readl(host->base + S3C2410_SDIFSTA);
fifostat &= S3C2410_SDIFSTA_COUNTMASK;
return 63 - fifostat;
}
/**
* s3cmci_enable_irq - enable IRQ, after having disabled it.
* @host: The device state.
* @more: True if more IRQs are expected from transfer.
*
* Enable the main IRQ if needed after it has been disabled.
*
* The IRQ can be one of the following states:
* - disabled during IDLE
* - disabled whilst processing data
* - enabled during transfer
* - enabled whilst awaiting SDIO interrupt detection
*/
static void s3cmci_enable_irq(struct s3cmci_host *host, bool more)
{
unsigned long flags;
bool enable = false;
local_irq_save(flags);
host->irq_enabled = more;
host->irq_disabled = false;
enable = more | host->sdio_irqen;
if (host->irq_state != enable) {
host->irq_state = enable;
if (enable)
enable_irq(host->irq);
else
disable_irq(host->irq);
}
local_irq_restore(flags);
}
/**
*
*/
static void s3cmci_disable_irq(struct s3cmci_host *host, bool transfer)
{
unsigned long flags;
local_irq_save(flags);
/* pr_debug("%s: transfer %d\n", __func__, transfer); */
host->irq_disabled = transfer;
if (transfer && host->irq_state) {
host->irq_state = false;
disable_irq(host->irq);
}
local_irq_restore(flags);
}
static void do_pio_read(struct s3cmci_host *host)
{
int res;
u32 fifo;
u32 *ptr;
u32 fifo_words;
void __iomem *from_ptr;
/* write real prescaler to host, it might be set slow to fix */
writel(host->prescaler, host->base + S3C2410_SDIPRE);
from_ptr = host->base + host->sdidata;
while ((fifo = fifo_count(host))) {
if (!host->pio_bytes) {
res = get_data_buffer(host, &host->pio_bytes,
&host->pio_ptr);
if (res) {
host->pio_active = XFER_NONE;
host->complete_what = COMPLETION_FINALIZE;
dbg(host, dbg_pio, "pio_read(): "
"complete (no more data).\n");
return;
}
dbg(host, dbg_pio,
"pio_read(): new target: [%i]@[%p]\n",
host->pio_bytes, host->pio_ptr);
}
dbg(host, dbg_pio,
"pio_read(): fifo:[%02i] buffer:[%03i] dcnt:[%08X]\n",
fifo, host->pio_bytes,
readl(host->base + S3C2410_SDIDCNT));
/* If we have reached the end of the block, we can
* read a word and get 1 to 3 bytes. If we in the
* middle of the block, we have to read full words,
* otherwise we will write garbage, so round down to
* an even multiple of 4. */
if (fifo >= host->pio_bytes)
fifo = host->pio_bytes;
else
fifo -= fifo & 3;
host->pio_bytes -= fifo;
host->pio_count += fifo;
fifo_words = fifo >> 2;
ptr = host->pio_ptr;
while (fifo_words--)
*ptr++ = readl(from_ptr);
host->pio_ptr = ptr;
if (fifo & 3) {
u32 n = fifo & 3;
u32 data = readl(from_ptr);
u8 *p = (u8 *)host->pio_ptr;
while (n--) {
*p++ = data;
data >>= 8;
}
}
}
if (!host->pio_bytes) {
res = get_data_buffer(host, &host->pio_bytes, &host->pio_ptr);
if (res) {
dbg(host, dbg_pio,
"pio_read(): complete (no more buffers).\n");
host->pio_active = XFER_NONE;
host->complete_what = COMPLETION_FINALIZE;
return;
}
}
enable_imask(host,
S3C2410_SDIIMSK_RXFIFOHALF | S3C2410_SDIIMSK_RXFIFOLAST);
}
static void do_pio_write(struct s3cmci_host *host)
{
void __iomem *to_ptr;
int res;
u32 fifo;
u32 *ptr;
to_ptr = host->base + host->sdidata;
while ((fifo = fifo_free(host)) > 3) {
if (!host->pio_bytes) {
res = get_data_buffer(host, &host->pio_bytes,
&host->pio_ptr);
if (res) {
dbg(host, dbg_pio,
"pio_write(): complete (no more data).\n");
host->pio_active = XFER_NONE;
return;
}
dbg(host, dbg_pio,
"pio_write(): new source: [%i]@[%p]\n",
host->pio_bytes, host->pio_ptr);
}
/* If we have reached the end of the block, we have to
* write exactly the remaining number of bytes. If we
* in the middle of the block, we have to write full
* words, so round down to an even multiple of 4. */
if (fifo >= host->pio_bytes)
fifo = host->pio_bytes;
else
fifo -= fifo & 3;
host->pio_bytes -= fifo;
host->pio_count += fifo;
fifo = (fifo + 3) >> 2;
ptr = host->pio_ptr;
while (fifo--)
writel(*ptr++, to_ptr);
host->pio_ptr = ptr;
}
enable_imask(host, S3C2410_SDIIMSK_TXFIFOHALF);
}
static void pio_tasklet(unsigned long data)
{
struct s3cmci_host *host = (struct s3cmci_host *) data;
s3cmci_disable_irq(host, true);
if (host->pio_active == XFER_WRITE)
do_pio_write(host);
if (host->pio_active == XFER_READ)
do_pio_read(host);
if (host->complete_what == COMPLETION_FINALIZE) {
clear_imask(host);
if (host->pio_active != XFER_NONE) {
dbg(host, dbg_err, "unfinished %s "
"- pio_count:[%u] pio_bytes:[%u]\n",
(host->pio_active == XFER_READ) ? "read" : "write",
host->pio_count, host->pio_bytes);
if (host->mrq->data)
host->mrq->data->error = -EINVAL;
}
s3cmci_enable_irq(host, false);
finalize_request(host);
} else
s3cmci_enable_irq(host, true);
}
/*
* ISR for SDI Interface IRQ
* Communication between driver and ISR works as follows:
* host->mrq points to current request
* host->complete_what Indicates when the request is considered done
* COMPLETION_CMDSENT when the command was sent
* COMPLETION_RSPFIN when a response was received
* COMPLETION_XFERFINISH when the data transfer is finished
* COMPLETION_XFERFINISH_RSPFIN both of the above.
* host->complete_request is the completion-object the driver waits for
*
* 1) Driver sets up host->mrq and host->complete_what
* 2) Driver prepares the transfer
* 3) Driver enables interrupts
* 4) Driver starts transfer
* 5) Driver waits for host->complete_rquest
* 6) ISR checks for request status (errors and success)
* 6) ISR sets host->mrq->cmd->error and host->mrq->data->error
* 7) ISR completes host->complete_request
* 8) ISR disables interrupts
* 9) Driver wakes up and takes care of the request
*
* Note: "->error"-fields are expected to be set to 0 before the request
* was issued by mmc.c - therefore they are only set, when an error
* contition comes up
*/
static irqreturn_t s3cmci_irq(int irq, void *dev_id)
{
struct s3cmci_host *host = dev_id;
struct mmc_command *cmd;
u32 mci_csta, mci_dsta, mci_fsta, mci_dcnt, mci_imsk;
u32 mci_cclear = 0, mci_dclear;
unsigned long iflags;
mci_dsta = readl(host->base + S3C2410_SDIDSTA);
mci_imsk = readl(host->base + host->sdiimsk);
if (mci_dsta & S3C2410_SDIDSTA_SDIOIRQDETECT) {
if (mci_imsk & S3C2410_SDIIMSK_SDIOIRQ) {
mci_dclear = S3C2410_SDIDSTA_SDIOIRQDETECT;
writel(mci_dclear, host->base + S3C2410_SDIDSTA);
mmc_signal_sdio_irq(host->mmc);
return IRQ_HANDLED;
}
}
spin_lock_irqsave(&host->complete_lock, iflags);
mci_csta = readl(host->base + S3C2410_SDICMDSTAT);
mci_dcnt = readl(host->base + S3C2410_SDIDCNT);
mci_fsta = readl(host->base + S3C2410_SDIFSTA);
mci_dclear = 0;
if ((host->complete_what == COMPLETION_NONE) ||
(host->complete_what == COMPLETION_FINALIZE)) {
host->status = "nothing to complete";
clear_imask(host);
goto irq_out;
}
if (!host->mrq) {
host->status = "no active mrq";
clear_imask(host);
goto irq_out;
}
cmd = host->cmd_is_stop ? host->mrq->stop : host->mrq->cmd;
if (!cmd) {
host->status = "no active cmd";
clear_imask(host);
goto irq_out;
}
if (!s3cmci_host_usedma(host)) {
if ((host->pio_active == XFER_WRITE) &&
(mci_fsta & S3C2410_SDIFSTA_TFDET)) {
disable_imask(host, S3C2410_SDIIMSK_TXFIFOHALF);
tasklet_schedule(&host->pio_tasklet);
host->status = "pio tx";
}
if ((host->pio_active == XFER_READ) &&
(mci_fsta & S3C2410_SDIFSTA_RFDET)) {
disable_imask(host,
S3C2410_SDIIMSK_RXFIFOHALF |
S3C2410_SDIIMSK_RXFIFOLAST);
tasklet_schedule(&host->pio_tasklet);
host->status = "pio rx";
}
}
if (mci_csta & S3C2410_SDICMDSTAT_CMDTIMEOUT) {
dbg(host, dbg_err, "CMDSTAT: error CMDTIMEOUT\n");
cmd->error = -ETIMEDOUT;
host->status = "error: command timeout";
goto fail_transfer;
}
if (mci_csta & S3C2410_SDICMDSTAT_CMDSENT) {
if (host->complete_what == COMPLETION_CMDSENT) {
host->status = "ok: command sent";
goto close_transfer;
}
mci_cclear |= S3C2410_SDICMDSTAT_CMDSENT;
}
if (mci_csta & S3C2410_SDICMDSTAT_CRCFAIL) {
if (cmd->flags & MMC_RSP_CRC) {
if (host->mrq->cmd->flags & MMC_RSP_136) {
dbg(host, dbg_irq,
"fixup: ignore CRC fail with long rsp\n");
} else {
/* note, we used to fail the transfer
* here, but it seems that this is just
* the hardware getting it wrong.
*
* cmd->error = -EILSEQ;
* host->status = "error: bad command crc";
* goto fail_transfer;
*/
}
}
mci_cclear |= S3C2410_SDICMDSTAT_CRCFAIL;
}
if (mci_csta & S3C2410_SDICMDSTAT_RSPFIN) {
if (host->complete_what == COMPLETION_RSPFIN) {
host->status = "ok: command response received";
goto close_transfer;
}
if (host->complete_what == COMPLETION_XFERFINISH_RSPFIN)
host->complete_what = COMPLETION_XFERFINISH;
mci_cclear |= S3C2410_SDICMDSTAT_RSPFIN;
}
/* errors handled after this point are only relevant
when a data transfer is in progress */
if (!cmd->data)
goto clear_status_bits;
/* Check for FIFO failure */
if (host->is2440) {
if (mci_fsta & S3C2440_SDIFSTA_FIFOFAIL) {
dbg(host, dbg_err, "FIFO failure\n");
host->mrq->data->error = -EILSEQ;
host->status = "error: 2440 fifo failure";
goto fail_transfer;
}
} else {
if (mci_dsta & S3C2410_SDIDSTA_FIFOFAIL) {
dbg(host, dbg_err, "FIFO failure\n");
cmd->data->error = -EILSEQ;
host->status = "error: fifo failure";
goto fail_transfer;
}
}
if (mci_dsta & S3C2410_SDIDSTA_RXCRCFAIL) {
dbg(host, dbg_err, "bad data crc (outgoing)\n");
cmd->data->error = -EILSEQ;
host->status = "error: bad data crc (outgoing)";
goto fail_transfer;
}
if (mci_dsta & S3C2410_SDIDSTA_CRCFAIL) {
dbg(host, dbg_err, "bad data crc (incoming)\n");
cmd->data->error = -EILSEQ;
host->status = "error: bad data crc (incoming)";
goto fail_transfer;
}
if (mci_dsta & S3C2410_SDIDSTA_DATATIMEOUT) {
dbg(host, dbg_err, "data timeout\n");
cmd->data->error = -ETIMEDOUT;
host->status = "error: data timeout";
goto fail_transfer;
}
if (mci_dsta & S3C2410_SDIDSTA_XFERFINISH) {
if (host->complete_what == COMPLETION_XFERFINISH) {
host->status = "ok: data transfer completed";
goto close_transfer;
}
if (host->complete_what == COMPLETION_XFERFINISH_RSPFIN)
host->complete_what = COMPLETION_RSPFIN;
mci_dclear |= S3C2410_SDIDSTA_XFERFINISH;
}
clear_status_bits:
writel(mci_cclear, host->base + S3C2410_SDICMDSTAT);
writel(mci_dclear, host->base + S3C2410_SDIDSTA);
goto irq_out;
fail_transfer:
host->pio_active = XFER_NONE;
close_transfer:
host->complete_what = COMPLETION_FINALIZE;
clear_imask(host);
tasklet_schedule(&host->pio_tasklet);
goto irq_out;
irq_out:
dbg(host, dbg_irq,
"csta:0x%08x dsta:0x%08x fsta:0x%08x dcnt:0x%08x status:%s.\n",
mci_csta, mci_dsta, mci_fsta, mci_dcnt, host->status);
spin_unlock_irqrestore(&host->complete_lock, iflags);
return IRQ_HANDLED;
}
/*
* ISR for the CardDetect Pin
*/
static irqreturn_t s3cmci_irq_cd(int irq, void *dev_id)
{
struct s3cmci_host *host = (struct s3cmci_host *)dev_id;
dbg(host, dbg_irq, "card detect\n");
mmc_detect_change(host->mmc, msecs_to_jiffies(500));
return IRQ_HANDLED;
}
static void s3cmci_dma_done_callback(void *arg)
{
struct s3cmci_host *host = arg;
unsigned long iflags;
BUG_ON(!host->mrq);
BUG_ON(!host->mrq->data);
spin_lock_irqsave(&host->complete_lock, iflags);
dbg(host, dbg_dma, "DMA FINISHED\n");
host->dma_complete = 1;
host->complete_what = COMPLETION_FINALIZE;
tasklet_schedule(&host->pio_tasklet);
spin_unlock_irqrestore(&host->complete_lock, iflags);
}
static void finalize_request(struct s3cmci_host *host)
{
struct mmc_request *mrq = host->mrq;
struct mmc_command *cmd;
int debug_as_failure = 0;
if (host->complete_what != COMPLETION_FINALIZE)
return;
if (!mrq)
return;
cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd;
if (cmd->data && (cmd->error == 0) &&
(cmd->data->error == 0)) {
if (s3cmci_host_usedma(host) && (!host->dma_complete)) {
dbg(host, dbg_dma, "DMA Missing (%d)!\n",
host->dma_complete);
return;
}
}
/* Read response from controller. */
cmd->resp[0] = readl(host->base + S3C2410_SDIRSP0);
cmd->resp[1] = readl(host->base + S3C2410_SDIRSP1);
cmd->resp[2] = readl(host->base + S3C2410_SDIRSP2);
cmd->resp[3] = readl(host->base + S3C2410_SDIRSP3);
writel(host->prescaler, host->base + S3C2410_SDIPRE);
if (cmd->error)
debug_as_failure = 1;
if (cmd->data && cmd->data->error)
debug_as_failure = 1;
dbg_dumpcmd(host, cmd, debug_as_failure);
/* Cleanup controller */
writel(0, host->base + S3C2410_SDICMDARG);
writel(S3C2410_SDIDCON_STOP, host->base + S3C2410_SDIDCON);
writel(0, host->base + S3C2410_SDICMDCON);
clear_imask(host);
if (cmd->data && cmd->error)
cmd->data->error = cmd->error;
if (cmd->data && cmd->data->stop && (!host->cmd_is_stop)) {
host->cmd_is_stop = 1;
s3cmci_send_request(host->mmc);
return;
}
/* If we have no data transfer we are finished here */
if (!mrq->data)
goto request_done;
/* Calculate the amout of bytes transfer if there was no error */
if (mrq->data->error == 0) {
mrq->data->bytes_xfered =
(mrq->data->blocks * mrq->data->blksz);
} else {
mrq->data->bytes_xfered = 0;
}
/* If we had an error while transferring data we flush the
* DMA channel and the fifo to clear out any garbage. */
if (mrq->data->error != 0) {
if (s3cmci_host_usedma(host))
dmaengine_terminate_all(host->dma);
if (host->is2440) {
/* Clear failure register and reset fifo. */
writel(S3C2440_SDIFSTA_FIFORESET |
S3C2440_SDIFSTA_FIFOFAIL,
host->base + S3C2410_SDIFSTA);
} else {
u32 mci_con;
/* reset fifo */
mci_con = readl(host->base + S3C2410_SDICON);
mci_con |= S3C2410_SDICON_FIFORESET;
writel(mci_con, host->base + S3C2410_SDICON);
}
}
request_done:
host->complete_what = COMPLETION_NONE;
host->mrq = NULL;
s3cmci_check_sdio_irq(host);
mmc_request_done(host->mmc, mrq);
}
static void s3cmci_send_command(struct s3cmci_host *host,
struct mmc_command *cmd)
{
u32 ccon, imsk;
imsk = S3C2410_SDIIMSK_CRCSTATUS | S3C2410_SDIIMSK_CMDTIMEOUT |
S3C2410_SDIIMSK_RESPONSEND | S3C2410_SDIIMSK_CMDSENT |
S3C2410_SDIIMSK_RESPONSECRC;
enable_imask(host, imsk);
if (cmd->data)
host->complete_what = COMPLETION_XFERFINISH_RSPFIN;
else if (cmd->flags & MMC_RSP_PRESENT)
host->complete_what = COMPLETION_RSPFIN;
else
host->complete_what = COMPLETION_CMDSENT;
writel(cmd->arg, host->base + S3C2410_SDICMDARG);
ccon = cmd->opcode & S3C2410_SDICMDCON_INDEX;
ccon |= S3C2410_SDICMDCON_SENDERHOST | S3C2410_SDICMDCON_CMDSTART;
if (cmd->flags & MMC_RSP_PRESENT)
ccon |= S3C2410_SDICMDCON_WAITRSP;
if (cmd->flags & MMC_RSP_136)
ccon |= S3C2410_SDICMDCON_LONGRSP;
writel(ccon, host->base + S3C2410_SDICMDCON);
}
static int s3cmci_setup_data(struct s3cmci_host *host, struct mmc_data *data)
{
u32 dcon, imsk, stoptries = 3;
/* write DCON register */
if (!data) {
writel(0, host->base + S3C2410_SDIDCON);
return 0;
}
if ((data->blksz & 3) != 0) {
/* We cannot deal with unaligned blocks with more than
* one block being transferred. */
if (data->blocks > 1) {
pr_warn("%s: can't do non-word sized block transfers (blksz %d)\n",
__func__, data->blksz);
return -EINVAL;
}
}
while (readl(host->base + S3C2410_SDIDSTA) &
(S3C2410_SDIDSTA_TXDATAON | S3C2410_SDIDSTA_RXDATAON)) {
dbg(host, dbg_err,
"mci_setup_data() transfer stillin progress.\n");
writel(S3C2410_SDIDCON_STOP, host->base + S3C2410_SDIDCON);
s3cmci_reset(host);
if ((stoptries--) == 0) {
dbg_dumpregs(host, "DRF");
return -EINVAL;
}
}
dcon = data->blocks & S3C2410_SDIDCON_BLKNUM_MASK;
if (s3cmci_host_usedma(host))
dcon |= S3C2410_SDIDCON_DMAEN;
if (host->bus_width == MMC_BUS_WIDTH_4)
dcon |= S3C2410_SDIDCON_WIDEBUS;
if (!(data->flags & MMC_DATA_STREAM))
dcon |= S3C2410_SDIDCON_BLOCKMODE;
if (data->flags & MMC_DATA_WRITE) {
dcon |= S3C2410_SDIDCON_TXAFTERRESP;
dcon |= S3C2410_SDIDCON_XFER_TXSTART;
}
if (data->flags & MMC_DATA_READ) {
dcon |= S3C2410_SDIDCON_RXAFTERCMD;
dcon |= S3C2410_SDIDCON_XFER_RXSTART;
}
if (host->is2440) {
dcon |= S3C2440_SDIDCON_DS_WORD;
dcon |= S3C2440_SDIDCON_DATSTART;
}
writel(dcon, host->base + S3C2410_SDIDCON);
/* write BSIZE register */
writel(data->blksz, host->base + S3C2410_SDIBSIZE);
/* add to IMASK register */
imsk = S3C2410_SDIIMSK_FIFOFAIL | S3C2410_SDIIMSK_DATACRC |
S3C2410_SDIIMSK_DATATIMEOUT | S3C2410_SDIIMSK_DATAFINISH;
enable_imask(host, imsk);
/* write TIMER register */
if (host->is2440) {
writel(0x007FFFFF, host->base + S3C2410_SDITIMER);
} else {
writel(0x0000FFFF, host->base + S3C2410_SDITIMER);
/* FIX: set slow clock to prevent timeouts on read */
if (data->flags & MMC_DATA_READ)
writel(0xFF, host->base + S3C2410_SDIPRE);
}
return 0;
}
#define BOTH_DIR (MMC_DATA_WRITE | MMC_DATA_READ)
static int s3cmci_prepare_pio(struct s3cmci_host *host, struct mmc_data *data)
{
int rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR);
host->pio_sgptr = 0;
host->pio_bytes = 0;
host->pio_count = 0;
host->pio_active = rw ? XFER_WRITE : XFER_READ;
if (rw) {
do_pio_write(host);
enable_imask(host, S3C2410_SDIIMSK_TXFIFOHALF);
} else {
enable_imask(host, S3C2410_SDIIMSK_RXFIFOHALF
| S3C2410_SDIIMSK_RXFIFOLAST);
}
return 0;
}
static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data)
{
int rw = data->flags & MMC_DATA_WRITE;
struct dma_async_tx_descriptor *desc;
struct dma_slave_config conf = {
.src_addr = host->mem->start + host->sdidata,
.dst_addr = host->mem->start + host->sdidata,
.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
};
BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR);
/* Restore prescaler value */
writel(host->prescaler, host->base + S3C2410_SDIPRE);
if (!rw)
conf.direction = DMA_DEV_TO_MEM;
else
conf.direction = DMA_MEM_TO_DEV;
dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
rw ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
dmaengine_slave_config(host->dma, &conf);
desc = dmaengine_prep_slave_sg(host->dma, data->sg, data->sg_len,
conf.direction,
DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
if (!desc)
goto unmap_exit;
desc->callback = s3cmci_dma_done_callback;
desc->callback_param = host;
dmaengine_submit(desc);
dma_async_issue_pending(host->dma);
return 0;
unmap_exit:
dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
rw ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
return -ENOMEM;
}
static void s3cmci_send_request(struct mmc_host *mmc)
{
struct s3cmci_host *host = mmc_priv(mmc);
struct mmc_request *mrq = host->mrq;
struct mmc_command *cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd;
host->ccnt++;
prepare_dbgmsg(host, cmd, host->cmd_is_stop);
/* Clear command, data and fifo status registers
Fifo clear only necessary on 2440, but doesn't hurt on 2410
*/
writel(0xFFFFFFFF, host->base + S3C2410_SDICMDSTAT);
writel(0xFFFFFFFF, host->base + S3C2410_SDIDSTA);
writel(0xFFFFFFFF, host->base + S3C2410_SDIFSTA);
if (cmd->data) {
int res = s3cmci_setup_data(host, cmd->data);
host->dcnt++;
if (res) {
dbg(host, dbg_err, "setup data error %d\n", res);
cmd->error = res;
cmd->data->error = res;
mmc_request_done(mmc, mrq);
return;
}
if (s3cmci_host_usedma(host))
res = s3cmci_prepare_dma(host, cmd->data);
else
res = s3cmci_prepare_pio(host, cmd->data);
if (res) {
dbg(host, dbg_err, "data prepare error %d\n", res);
cmd->error = res;
cmd->data->error = res;
mmc_request_done(mmc, mrq);
return;
}
}
/* Send command */
s3cmci_send_command(host, cmd);
/* Enable Interrupt */
s3cmci_enable_irq(host, true);
}
static int s3cmci_card_present(struct mmc_host *mmc)
{
struct s3cmci_host *host = mmc_priv(mmc);
struct s3c24xx_mci_pdata *pdata = host->pdata;
int ret;
if (pdata->no_detect)
return -ENOSYS;
ret = gpio_get_value(pdata->gpio_detect) ? 0 : 1;
return ret ^ pdata->detect_invert;
}
static void s3cmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct s3cmci_host *host = mmc_priv(mmc);
host->status = "mmc request";
host->cmd_is_stop = 0;
host->mrq = mrq;
if (s3cmci_card_present(mmc) == 0) {
dbg(host, dbg_err, "%s: no medium present\n", __func__);
host->mrq->cmd->error = -ENOMEDIUM;
mmc_request_done(mmc, mrq);
} else
s3cmci_send_request(mmc);
}
static void s3cmci_set_clk(struct s3cmci_host *host, struct mmc_ios *ios)
{
u32 mci_psc;
/* Set clock */
for (mci_psc = 0; mci_psc < 255; mci_psc++) {
host->real_rate = host->clk_rate / (host->clk_div*(mci_psc+1));
if (host->real_rate <= ios->clock)
break;
}
if (mci_psc > 255)
mci_psc = 255;
host->prescaler = mci_psc;
writel(host->prescaler, host->base + S3C2410_SDIPRE);
/* If requested clock is 0, real_rate will be 0, too */
if (ios->clock == 0)
host->real_rate = 0;
}
static void s3cmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct s3cmci_host *host = mmc_priv(mmc);
u32 mci_con;
/* Set the power state */
mci_con = readl(host->base + S3C2410_SDICON);
switch (ios->power_mode) {
case MMC_POWER_ON:
case MMC_POWER_UP:
/* Configure GPE5...GPE10 pins in SD mode */
s3c_gpio_cfgall_range(S3C2410_GPE(5), 6, S3C_GPIO_SFN(2),
S3C_GPIO_PULL_NONE);
if (host->pdata->set_power)
host->pdata->set_power(ios->power_mode, ios->vdd);
if (!host->is2440)
mci_con |= S3C2410_SDICON_FIFORESET;
break;
case MMC_POWER_OFF:
default:
gpio_direction_output(S3C2410_GPE(5), 0);
if (host->is2440)
mci_con |= S3C2440_SDICON_SDRESET;
if (host->pdata->set_power)
host->pdata->set_power(ios->power_mode, ios->vdd);
break;
}
s3cmci_set_clk(host, ios);
/* Set CLOCK_ENABLE */
if (ios->clock)
mci_con |= S3C2410_SDICON_CLOCKTYPE;
else
mci_con &= ~S3C2410_SDICON_CLOCKTYPE;
writel(mci_con, host->base + S3C2410_SDICON);
if ((ios->power_mode == MMC_POWER_ON) ||
(ios->power_mode == MMC_POWER_UP)) {
dbg(host, dbg_conf, "running at %lukHz (requested: %ukHz).\n",
host->real_rate/1000, ios->clock/1000);
} else {
dbg(host, dbg_conf, "powered down.\n");
}
host->bus_width = ios->bus_width;
}
static void s3cmci_reset(struct s3cmci_host *host)
{
u32 con = readl(host->base + S3C2410_SDICON);
con |= S3C2440_SDICON_SDRESET;
writel(con, host->base + S3C2410_SDICON);
}
static int s3cmci_get_ro(struct mmc_host *mmc)
{
struct s3cmci_host *host = mmc_priv(mmc);
struct s3c24xx_mci_pdata *pdata = host->pdata;
int ret;
if (pdata->no_wprotect)
return 0;
ret = gpio_get_value(pdata->gpio_wprotect) ? 1 : 0;
ret ^= pdata->wprotect_invert;
return ret;
}
static void s3cmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
struct s3cmci_host *host = mmc_priv(mmc);
unsigned long flags;
u32 con;
local_irq_save(flags);
con = readl(host->base + S3C2410_SDICON);
host->sdio_irqen = enable;
if (enable == host->sdio_irqen)
goto same_state;
if (enable) {
con |= S3C2410_SDICON_SDIOIRQ;
enable_imask(host, S3C2410_SDIIMSK_SDIOIRQ);
if (!host->irq_state && !host->irq_disabled) {
host->irq_state = true;
enable_irq(host->irq);
}
} else {
disable_imask(host, S3C2410_SDIIMSK_SDIOIRQ);
con &= ~S3C2410_SDICON_SDIOIRQ;
if (!host->irq_enabled && host->irq_state) {
disable_irq_nosync(host->irq);
host->irq_state = false;
}
}
writel(con, host->base + S3C2410_SDICON);
same_state:
local_irq_restore(flags);
s3cmci_check_sdio_irq(host);
}
static struct mmc_host_ops s3cmci_ops = {
.request = s3cmci_request,
.set_ios = s3cmci_set_ios,
.get_ro = s3cmci_get_ro,
.get_cd = s3cmci_card_present,
.enable_sdio_irq = s3cmci_enable_sdio_irq,
};
static struct s3c24xx_mci_pdata s3cmci_def_pdata = {
/* This is currently here to avoid a number of if (host->pdata)
* checks. Any zero fields to ensure reasonable defaults are picked. */
.no_wprotect = 1,
.no_detect = 1,
};
#ifdef CONFIG_CPU_FREQ
static int s3cmci_cpufreq_transition(struct notifier_block *nb,
unsigned long val, void *data)
{
struct s3cmci_host *host;
struct mmc_host *mmc;
unsigned long newclk;
unsigned long flags;
host = container_of(nb, struct s3cmci_host, freq_transition);
newclk = clk_get_rate(host->clk);
mmc = host->mmc;
if ((val == CPUFREQ_PRECHANGE && newclk > host->clk_rate) ||
(val == CPUFREQ_POSTCHANGE && newclk < host->clk_rate)) {
spin_lock_irqsave(&mmc->lock, flags);
host->clk_rate = newclk;
if (mmc->ios.power_mode != MMC_POWER_OFF &&
mmc->ios.clock != 0)
s3cmci_set_clk(host, &mmc->ios);
spin_unlock_irqrestore(&mmc->lock, flags);
}
return 0;
}
static inline int s3cmci_cpufreq_register(struct s3cmci_host *host)
{
host->freq_transition.notifier_call = s3cmci_cpufreq_transition;
return cpufreq_register_notifier(&host->freq_transition,
CPUFREQ_TRANSITION_NOTIFIER);
}
static inline void s3cmci_cpufreq_deregister(struct s3cmci_host *host)
{
cpufreq_unregister_notifier(&host->freq_transition,
CPUFREQ_TRANSITION_NOTIFIER);
}
#else
static inline int s3cmci_cpufreq_register(struct s3cmci_host *host)
{
return 0;
}
static inline void s3cmci_cpufreq_deregister(struct s3cmci_host *host)
{
}
#endif
#ifdef CONFIG_DEBUG_FS
static int s3cmci_state_show(struct seq_file *seq, void *v)
{
struct s3cmci_host *host = seq->private;
seq_printf(seq, "Register base = 0x%08x\n", (u32)host->base);
seq_printf(seq, "Clock rate = %ld\n", host->clk_rate);
seq_printf(seq, "Prescale = %d\n", host->prescaler);
seq_printf(seq, "is2440 = %d\n", host->is2440);
seq_printf(seq, "IRQ = %d\n", host->irq);
seq_printf(seq, "IRQ enabled = %d\n", host->irq_enabled);
seq_printf(seq, "IRQ disabled = %d\n", host->irq_disabled);
seq_printf(seq, "IRQ state = %d\n", host->irq_state);
seq_printf(seq, "CD IRQ = %d\n", host->irq_cd);
seq_printf(seq, "Do DMA = %d\n", s3cmci_host_usedma(host));
seq_printf(seq, "SDIIMSK at %d\n", host->sdiimsk);
seq_printf(seq, "SDIDATA at %d\n", host->sdidata);
return 0;
}
static int s3cmci_state_open(struct inode *inode, struct file *file)
{
return single_open(file, s3cmci_state_show, inode->i_private);
}
static const struct file_operations s3cmci_fops_state = {
.owner = THIS_MODULE,
.open = s3cmci_state_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
#define DBG_REG(_r) { .addr = S3C2410_SDI##_r, .name = #_r }
struct s3cmci_reg {
unsigned short addr;
unsigned char *name;
} debug_regs[] = {
DBG_REG(CON),
DBG_REG(PRE),
DBG_REG(CMDARG),
DBG_REG(CMDCON),
DBG_REG(CMDSTAT),
DBG_REG(RSP0),
DBG_REG(RSP1),
DBG_REG(RSP2),
DBG_REG(RSP3),
DBG_REG(TIMER),
DBG_REG(BSIZE),
DBG_REG(DCON),
DBG_REG(DCNT),
DBG_REG(DSTA),
DBG_REG(FSTA),
{}
};
static int s3cmci_regs_show(struct seq_file *seq, void *v)
{
struct s3cmci_host *host = seq->private;
struct s3cmci_reg *rptr = debug_regs;
for (; rptr->name; rptr++)
seq_printf(seq, "SDI%s\t=0x%08x\n", rptr->name,
readl(host->base + rptr->addr));
seq_printf(seq, "SDIIMSK\t=0x%08x\n", readl(host->base + host->sdiimsk));
return 0;
}
static int s3cmci_regs_open(struct inode *inode, struct file *file)
{
return single_open(file, s3cmci_regs_show, inode->i_private);
}
static const struct file_operations s3cmci_fops_regs = {
.owner = THIS_MODULE,
.open = s3cmci_regs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static void s3cmci_debugfs_attach(struct s3cmci_host *host)
{
struct device *dev = &host->pdev->dev;
host->debug_root = debugfs_create_dir(dev_name(dev), NULL);
if (IS_ERR(host->debug_root)) {
dev_err(dev, "failed to create debugfs root\n");
return;
}
host->debug_state = debugfs_create_file("state", 0444,
host->debug_root, host,
&s3cmci_fops_state);
if (IS_ERR(host->debug_state))
dev_err(dev, "failed to create debug state file\n");
host->debug_regs = debugfs_create_file("regs", 0444,
host->debug_root, host,
&s3cmci_fops_regs);
if (IS_ERR(host->debug_regs))
dev_err(dev, "failed to create debug regs file\n");
}
static void s3cmci_debugfs_remove(struct s3cmci_host *host)
{
debugfs_remove(host->debug_regs);
debugfs_remove(host->debug_state);
debugfs_remove(host->debug_root);
}
#else
static inline void s3cmci_debugfs_attach(struct s3cmci_host *host) { }
static inline void s3cmci_debugfs_remove(struct s3cmci_host *host) { }
#endif /* CONFIG_DEBUG_FS */
static int s3cmci_probe(struct platform_device *pdev)
{
struct s3cmci_host *host;
struct mmc_host *mmc;
int ret;
int is2440;
int i;
is2440 = platform_get_device_id(pdev)->driver_data;
mmc = mmc_alloc_host(sizeof(struct s3cmci_host), &pdev->dev);
if (!mmc) {
ret = -ENOMEM;
goto probe_out;
}
for (i = S3C2410_GPE(5); i <= S3C2410_GPE(10); i++) {
ret = gpio_request(i, dev_name(&pdev->dev));
if (ret) {
dev_err(&pdev->dev, "failed to get gpio %d\n", i);
for (i--; i >= S3C2410_GPE(5); i--)
gpio_free(i);
goto probe_free_host;
}
}
host = mmc_priv(mmc);
host->mmc = mmc;
host->pdev = pdev;
host->is2440 = is2440;
host->pdata = pdev->dev.platform_data;
if (!host->pdata) {
pdev->dev.platform_data = &s3cmci_def_pdata;
host->pdata = &s3cmci_def_pdata;
}
spin_lock_init(&host->complete_lock);
tasklet_init(&host->pio_tasklet, pio_tasklet, (unsigned long) host);
if (is2440) {
host->sdiimsk = S3C2440_SDIIMSK;
host->sdidata = S3C2440_SDIDATA;
host->clk_div = 1;
} else {
host->sdiimsk = S3C2410_SDIIMSK;
host->sdidata = S3C2410_SDIDATA;
host->clk_div = 2;
}
host->complete_what = COMPLETION_NONE;
host->pio_active = XFER_NONE;
host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!host->mem) {
dev_err(&pdev->dev,
"failed to get io memory region resource.\n");
ret = -ENOENT;
goto probe_free_gpio;
}
host->mem = request_mem_region(host->mem->start,
resource_size(host->mem), pdev->name);
if (!host->mem) {
dev_err(&pdev->dev, "failed to request io memory region.\n");
ret = -ENOENT;
goto probe_free_gpio;
}
host->base = ioremap(host->mem->start, resource_size(host->mem));
if (!host->base) {
dev_err(&pdev->dev, "failed to ioremap() io memory region.\n");
ret = -EINVAL;
goto probe_free_mem_region;
}
host->irq = platform_get_irq(pdev, 0);
if (host->irq == 0) {
dev_err(&pdev->dev, "failed to get interrupt resource.\n");
ret = -EINVAL;
goto probe_iounmap;
}
if (request_irq(host->irq, s3cmci_irq, 0, DRIVER_NAME, host)) {
dev_err(&pdev->dev, "failed to request mci interrupt.\n");
ret = -ENOENT;
goto probe_iounmap;
}
/* We get spurious interrupts even when we have set the IMSK
* register to ignore everything, so use disable_irq() to make
* ensure we don't lock the system with un-serviceable requests. */
disable_irq(host->irq);
host->irq_state = false;
if (!host->pdata->no_detect) {
ret = gpio_request(host->pdata->gpio_detect, "s3cmci detect");
if (ret) {
dev_err(&pdev->dev, "failed to get detect gpio\n");
goto probe_free_irq;
}
host->irq_cd = gpio_to_irq(host->pdata->gpio_detect);
if (host->irq_cd >= 0) {
if (request_irq(host->irq_cd, s3cmci_irq_cd,
IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING,
DRIVER_NAME, host)) {
dev_err(&pdev->dev,
"can't get card detect irq.\n");
ret = -ENOENT;
goto probe_free_gpio_cd;
}
} else {
dev_warn(&pdev->dev,
"host detect has no irq available\n");
gpio_direction_input(host->pdata->gpio_detect);
}
} else
host->irq_cd = -1;
if (!host->pdata->no_wprotect) {
ret = gpio_request(host->pdata->gpio_wprotect, "s3cmci wp");
if (ret) {
dev_err(&pdev->dev, "failed to get writeprotect\n");
goto probe_free_irq_cd;
}
gpio_direction_input(host->pdata->gpio_wprotect);
}
/* depending on the dma state, get a dma channel to use. */
if (s3cmci_host_usedma(host)) {
dma_cap_mask_t mask;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
host->dma = dma_request_slave_channel_compat(mask,
s3c24xx_dma_filter, (void *)DMACH_SDI, &pdev->dev, "rx-tx");
if (!host->dma) {
dev_err(&pdev->dev, "cannot get DMA channel.\n");
ret = -EBUSY;
goto probe_free_gpio_wp;
}
}
host->clk = clk_get(&pdev->dev, "sdi");
if (IS_ERR(host->clk)) {
dev_err(&pdev->dev, "failed to find clock source.\n");
ret = PTR_ERR(host->clk);
host->clk = NULL;
goto probe_free_dma;
}
ret = clk_prepare_enable(host->clk);
if (ret) {
dev_err(&pdev->dev, "failed to enable clock source.\n");
goto clk_free;
}
host->clk_rate = clk_get_rate(host->clk);
mmc->ops = &s3cmci_ops;
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
#ifdef CONFIG_MMC_S3C_HW_SDIO_IRQ
mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
#else
mmc->caps = MMC_CAP_4_BIT_DATA;
#endif
mmc->f_min = host->clk_rate / (host->clk_div * 256);
mmc->f_max = host->clk_rate / host->clk_div;
if (host->pdata->ocr_avail)
mmc->ocr_avail = host->pdata->ocr_avail;
mmc->max_blk_count = 4095;
mmc->max_blk_size = 4095;
mmc->max_req_size = 4095 * 512;
mmc->max_seg_size = mmc->max_req_size;
mmc->max_segs = 128;
dbg(host, dbg_debug,
"probe: mode:%s mapped mci_base:%p irq:%u irq_cd:%u dma:%p.\n",
(host->is2440?"2440":""),
host->base, host->irq, host->irq_cd, host->dma);
ret = s3cmci_cpufreq_register(host);
if (ret) {
dev_err(&pdev->dev, "failed to register cpufreq\n");
goto free_dmabuf;
}
ret = mmc_add_host(mmc);
if (ret) {
dev_err(&pdev->dev, "failed to add mmc host.\n");
goto free_cpufreq;
}
s3cmci_debugfs_attach(host);
platform_set_drvdata(pdev, mmc);
dev_info(&pdev->dev, "%s - using %s, %s SDIO IRQ\n", mmc_hostname(mmc),
s3cmci_host_usedma(host) ? "dma" : "pio",
mmc->caps & MMC_CAP_SDIO_IRQ ? "hw" : "sw");
return 0;
free_cpufreq:
s3cmci_cpufreq_deregister(host);
free_dmabuf:
clk_disable_unprepare(host->clk);
clk_free:
clk_put(host->clk);
probe_free_dma:
if (s3cmci_host_usedma(host))
dma_release_channel(host->dma);
probe_free_gpio_wp:
if (!host->pdata->no_wprotect)
gpio_free(host->pdata->gpio_wprotect);
probe_free_gpio_cd:
if (!host->pdata->no_detect)
gpio_free(host->pdata->gpio_detect);
probe_free_irq_cd:
if (host->irq_cd >= 0)
free_irq(host->irq_cd, host);
probe_free_irq:
free_irq(host->irq, host);
probe_iounmap:
iounmap(host->base);
probe_free_mem_region:
release_mem_region(host->mem->start, resource_size(host->mem));
probe_free_gpio:
for (i = S3C2410_GPE(5); i <= S3C2410_GPE(10); i++)
gpio_free(i);
probe_free_host:
mmc_free_host(mmc);
probe_out:
return ret;
}
static void s3cmci_shutdown(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
struct s3cmci_host *host = mmc_priv(mmc);
if (host->irq_cd >= 0)
free_irq(host->irq_cd, host);
s3cmci_debugfs_remove(host);
s3cmci_cpufreq_deregister(host);
mmc_remove_host(mmc);
clk_disable_unprepare(host->clk);
}
static int s3cmci_remove(struct platform_device *pdev)
{
struct mmc_host *mmc = platform_get_drvdata(pdev);
struct s3cmci_host *host = mmc_priv(mmc);
struct s3c24xx_mci_pdata *pd = host->pdata;
int i;
s3cmci_shutdown(pdev);
clk_put(host->clk);
tasklet_disable(&host->pio_tasklet);
if (s3cmci_host_usedma(host))
dma_release_channel(host->dma);
free_irq(host->irq, host);
if (!pd->no_wprotect)
gpio_free(pd->gpio_wprotect);
if (!pd->no_detect)
gpio_free(pd->gpio_detect);
for (i = S3C2410_GPE(5); i <= S3C2410_GPE(10); i++)
gpio_free(i);
iounmap(host->base);
release_mem_region(host->mem->start, resource_size(host->mem));
mmc_free_host(mmc);
return 0;
}
static struct platform_device_id s3cmci_driver_ids[] = {
{
.name = "s3c2410-sdi",
.driver_data = 0,
}, {
.name = "s3c2412-sdi",
.driver_data = 1,
}, {
.name = "s3c2440-sdi",
.driver_data = 1,
},
{ }
};
MODULE_DEVICE_TABLE(platform, s3cmci_driver_ids);
static struct platform_driver s3cmci_driver = {
.driver = {
.name = "s3c-sdi",
},
.id_table = s3cmci_driver_ids,
.probe = s3cmci_probe,
.remove = s3cmci_remove,
.shutdown = s3cmci_shutdown,
};
module_platform_driver(s3cmci_driver);
MODULE_DESCRIPTION("Samsung S3C MMC/SD Card Interface driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Thomas Kleffel <tk@maintech.de>, Ben Dooks <ben-linux@fluff.org>");
| gpl-2.0 |
GameTheory-/android_kernel_lge_l1m | drivers/net/wireless/ath/ath9k/calib.c | 1248 | 11530 | /*
* Copyright (c) 2008-2011 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "hw.h"
#include "hw-ops.h"
/* Common calibration code */
#define ATH9K_NF_TOO_HIGH -60
static int16_t ath9k_hw_get_nf_hist_mid(int16_t *nfCalBuffer)
{
int16_t nfval;
int16_t sort[ATH9K_NF_CAL_HIST_MAX];
int i, j;
for (i = 0; i < ATH9K_NF_CAL_HIST_MAX; i++)
sort[i] = nfCalBuffer[i];
for (i = 0; i < ATH9K_NF_CAL_HIST_MAX - 1; i++) {
for (j = 1; j < ATH9K_NF_CAL_HIST_MAX - i; j++) {
if (sort[j] > sort[j - 1]) {
nfval = sort[j];
sort[j] = sort[j - 1];
sort[j - 1] = nfval;
}
}
}
nfval = sort[(ATH9K_NF_CAL_HIST_MAX - 1) >> 1];
return nfval;
}
static struct ath_nf_limits *ath9k_hw_get_nf_limits(struct ath_hw *ah,
struct ath9k_channel *chan)
{
struct ath_nf_limits *limit;
if (!chan || IS_CHAN_2GHZ(chan))
limit = &ah->nf_2g;
else
limit = &ah->nf_5g;
return limit;
}
static s16 ath9k_hw_get_default_nf(struct ath_hw *ah,
struct ath9k_channel *chan)
{
return ath9k_hw_get_nf_limits(ah, chan)->nominal;
}
static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah,
struct ath9k_hw_cal_data *cal,
int16_t *nfarray)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_conf *conf = &common->hw->conf;
struct ath_nf_limits *limit;
struct ath9k_nfcal_hist *h;
bool high_nf_mid = false;
u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
int i;
h = cal->nfCalHist;
limit = ath9k_hw_get_nf_limits(ah, ah->curchan);
for (i = 0; i < NUM_NF_READINGS; i++) {
if (!(chainmask & (1 << i)) ||
((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf)))
continue;
h[i].nfCalBuffer[h[i].currIndex] = nfarray[i];
if (++h[i].currIndex >= ATH9K_NF_CAL_HIST_MAX)
h[i].currIndex = 0;
if (h[i].invalidNFcount > 0) {
h[i].invalidNFcount--;
h[i].privNF = nfarray[i];
} else {
h[i].privNF =
ath9k_hw_get_nf_hist_mid(h[i].nfCalBuffer);
}
if (!h[i].privNF)
continue;
if (h[i].privNF > limit->max) {
high_nf_mid = true;
ath_dbg(common, ATH_DBG_CALIBRATE,
"NFmid[%d] (%d) > MAX (%d), %s\n",
i, h[i].privNF, limit->max,
(cal->nfcal_interference ?
"not corrected (due to interference)" :
"correcting to MAX"));
/*
* Normally we limit the average noise floor by the
* hardware specific maximum here. However if we have
* encountered stuck beacons because of interference,
* we bypass this limit here in order to better deal
* with our environment.
*/
if (!cal->nfcal_interference)
h[i].privNF = limit->max;
}
}
/*
* If the noise floor seems normal for all chains, assume that
* there is no significant interference in the environment anymore.
* Re-enable the enforcement of the NF maximum again.
*/
if (!high_nf_mid)
cal->nfcal_interference = false;
}
static bool ath9k_hw_get_nf_thresh(struct ath_hw *ah,
enum ieee80211_band band,
int16_t *nft)
{
switch (band) {
case IEEE80211_BAND_5GHZ:
*nft = (int8_t)ah->eep_ops->get_eeprom(ah, EEP_NFTHRESH_5);
break;
case IEEE80211_BAND_2GHZ:
*nft = (int8_t)ah->eep_ops->get_eeprom(ah, EEP_NFTHRESH_2);
break;
default:
BUG_ON(1);
return false;
}
return true;
}
void ath9k_hw_reset_calibration(struct ath_hw *ah,
struct ath9k_cal_list *currCal)
{
int i;
ath9k_hw_setup_calibration(ah, currCal);
currCal->calState = CAL_RUNNING;
for (i = 0; i < AR5416_MAX_CHAINS; i++) {
ah->meas0.sign[i] = 0;
ah->meas1.sign[i] = 0;
ah->meas2.sign[i] = 0;
ah->meas3.sign[i] = 0;
}
ah->cal_samples = 0;
}
/* This is done for the currently configured channel */
bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_conf *conf = &common->hw->conf;
struct ath9k_cal_list *currCal = ah->cal_list_curr;
if (!ah->caldata)
return true;
if (!AR_SREV_9100(ah) && !AR_SREV_9160_10_OR_LATER(ah))
return true;
if (currCal == NULL)
return true;
if (currCal->calState != CAL_DONE) {
ath_dbg(common, ATH_DBG_CALIBRATE,
"Calibration state incorrect, %d\n",
currCal->calState);
return true;
}
if (!(ah->supp_cals & currCal->calData->calType))
return true;
ath_dbg(common, ATH_DBG_CALIBRATE,
"Resetting Cal %d state for channel %u\n",
currCal->calData->calType, conf->channel->center_freq);
ah->caldata->CalValid &= ~currCal->calData->calType;
currCal->calState = CAL_WAITING;
return false;
}
EXPORT_SYMBOL(ath9k_hw_reset_calvalid);
void ath9k_hw_start_nfcal(struct ath_hw *ah, bool update)
{
if (ah->caldata)
ah->caldata->nfcal_pending = true;
REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
AR_PHY_AGC_CONTROL_ENABLE_NF);
if (update)
REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
else
REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
}
void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
{
struct ath9k_nfcal_hist *h = NULL;
unsigned i, j;
int32_t val;
u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_conf *conf = &common->hw->conf;
s16 default_nf = ath9k_hw_get_default_nf(ah, chan);
if (ah->caldata)
h = ah->caldata->nfCalHist;
for (i = 0; i < NUM_NF_READINGS; i++) {
if (chainmask & (1 << i)) {
s16 nfval;
if ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf))
continue;
if (h)
nfval = h[i].privNF;
else
nfval = default_nf;
val = REG_READ(ah, ah->nf_regs[i]);
val &= 0xFFFFFE00;
val |= (((u32) nfval << 1) & 0x1ff);
REG_WRITE(ah, ah->nf_regs[i], val);
}
}
/*
* Load software filtered NF value into baseband internal minCCApwr
* variable.
*/
REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
AR_PHY_AGC_CONTROL_ENABLE_NF);
REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
/*
* Wait for load to complete, should be fast, a few 10s of us.
* The max delay was changed from an original 250us to 10000us
* since 250us often results in NF load timeout and causes deaf
* condition during stress testing 12/12/2009
*/
for (j = 0; j < 10000; j++) {
if ((REG_READ(ah, AR_PHY_AGC_CONTROL) &
AR_PHY_AGC_CONTROL_NF) == 0)
break;
udelay(10);
}
/*
* We timed out waiting for the noisefloor to load, probably due to an
* in-progress rx. Simply return here and allow the load plenty of time
* to complete before the next calibration interval. We need to avoid
* trying to load -50 (which happens below) while the previous load is
* still in progress as this can cause rx deafness. Instead by returning
* here, the baseband nf cal will just be capped by our present
* noisefloor until the next calibration timer.
*/
if (j == 10000) {
ath_dbg(common, ATH_DBG_ANY,
"Timeout while waiting for nf to load: AR_PHY_AGC_CONTROL=0x%x\n",
REG_READ(ah, AR_PHY_AGC_CONTROL));
return;
}
/*
* Restore maxCCAPower register parameter again so that we're not capped
* by the median we just loaded. This will be initial (and max) value
* of next noise floor calibration the baseband does.
*/
ENABLE_REGWRITE_BUFFER(ah);
for (i = 0; i < NUM_NF_READINGS; i++) {
if (chainmask & (1 << i)) {
if ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf))
continue;
val = REG_READ(ah, ah->nf_regs[i]);
val &= 0xFFFFFE00;
val |= (((u32) (-50) << 1) & 0x1ff);
REG_WRITE(ah, ah->nf_regs[i], val);
}
}
REGWRITE_BUFFER_FLUSH(ah);
}
static void ath9k_hw_nf_sanitize(struct ath_hw *ah, s16 *nf)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath_nf_limits *limit;
int i;
if (IS_CHAN_2GHZ(ah->curchan))
limit = &ah->nf_2g;
else
limit = &ah->nf_5g;
for (i = 0; i < NUM_NF_READINGS; i++) {
if (!nf[i])
continue;
ath_dbg(common, ATH_DBG_CALIBRATE,
"NF calibrated [%s] [chain %d] is %d\n",
(i >= 3 ? "ext" : "ctl"), i % 3, nf[i]);
if (nf[i] > ATH9K_NF_TOO_HIGH) {
ath_dbg(common, ATH_DBG_CALIBRATE,
"NF[%d] (%d) > MAX (%d), correcting to MAX\n",
i, nf[i], ATH9K_NF_TOO_HIGH);
nf[i] = limit->max;
} else if (nf[i] < limit->min) {
ath_dbg(common, ATH_DBG_CALIBRATE,
"NF[%d] (%d) < MIN (%d), correcting to NOM\n",
i, nf[i], limit->min);
nf[i] = limit->nominal;
}
}
}
bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
{
struct ath_common *common = ath9k_hw_common(ah);
int16_t nf, nfThresh;
int16_t nfarray[NUM_NF_READINGS] = { 0 };
struct ath9k_nfcal_hist *h;
struct ieee80211_channel *c = chan->chan;
struct ath9k_hw_cal_data *caldata = ah->caldata;
chan->channelFlags &= (~CHANNEL_CW_INT);
if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) {
ath_dbg(common, ATH_DBG_CALIBRATE,
"NF did not complete in calibration window\n");
return false;
}
ath9k_hw_do_getnf(ah, nfarray);
ath9k_hw_nf_sanitize(ah, nfarray);
nf = nfarray[0];
if (ath9k_hw_get_nf_thresh(ah, c->band, &nfThresh)
&& nf > nfThresh) {
ath_dbg(common, ATH_DBG_CALIBRATE,
"noise floor failed detected; detected %d, threshold %d\n",
nf, nfThresh);
chan->channelFlags |= CHANNEL_CW_INT;
}
if (!caldata) {
chan->noisefloor = nf;
return false;
}
h = caldata->nfCalHist;
caldata->nfcal_pending = false;
ath9k_hw_update_nfcal_hist_buffer(ah, caldata, nfarray);
chan->noisefloor = h[0].privNF;
return true;
}
void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
struct ath9k_channel *chan)
{
struct ath9k_nfcal_hist *h;
s16 default_nf;
int i, j;
ah->caldata->channel = chan->channel;
ah->caldata->channelFlags = chan->channelFlags & ~CHANNEL_CW_INT;
h = ah->caldata->nfCalHist;
default_nf = ath9k_hw_get_default_nf(ah, chan);
for (i = 0; i < NUM_NF_READINGS; i++) {
h[i].currIndex = 0;
h[i].privNF = default_nf;
h[i].invalidNFcount = AR_PHY_CCA_FILTERWINDOW_LENGTH;
for (j = 0; j < ATH9K_NF_CAL_HIST_MAX; j++) {
h[i].nfCalBuffer[j] = default_nf;
}
}
}
void ath9k_hw_bstuck_nfcal(struct ath_hw *ah)
{
struct ath9k_hw_cal_data *caldata = ah->caldata;
if (unlikely(!caldata))
return;
/*
* If beacons are stuck, the most likely cause is interference.
* Triggering a noise floor calibration at this point helps the
* hardware adapt to a noisy environment much faster.
* To ensure that we recover from stuck beacons quickly, let
* the baseband update the internal NF value itself, similar to
* what is being done after a full reset.
*/
if (!caldata->nfcal_pending)
ath9k_hw_start_nfcal(ah, true);
else if (!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF))
ath9k_hw_getnf(ah, ah->curchan);
caldata->nfcal_interference = true;
}
EXPORT_SYMBOL(ath9k_hw_bstuck_nfcal);
| gpl-2.0 |
TI-OpenLink/wl12xx_soldel_maintenance | drivers/mtd/maps/intel_vr_nor.c | 1248 | 7852 | /*
* drivers/mtd/maps/intel_vr_nor.c
*
* An MTD map driver for a NOR flash bank on the Expansion Bus of the Intel
* Vermilion Range chipset.
*
* The Vermilion Range Expansion Bus supports four chip selects, each of which
* has 64MiB of address space. The 2nd BAR of the Expansion Bus PCI Device
* is a 256MiB memory region containing the address spaces for all four of the
* chip selects, with start addresses hardcoded on 64MiB boundaries.
*
* This map driver only supports NOR flash on chip select 0. The buswidth
* (either 8 bits or 16 bits) is determined by reading the Expansion Bus Timing
* and Control Register for Chip Select 0 (EXP_TIMING_CS0). This driver does
* not modify the value in the EXP_TIMING_CS0 register except to enable writing
* and disable boot acceleration. The timing parameters in the register are
* assumed to have been properly initialized by the BIOS. The reset default
* timing parameters are maximally conservative (slow), so access to the flash
* will be slower than it should be if the BIOS has not initialized the timing
* parameters.
*
* Author: Andy Lowe <alowe@mvista.com>
*
* 2006 (c) MontaVista Software, Inc. This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/cfi.h>
#include <linux/mtd/flashchip.h>
#define DRV_NAME "vr_nor"
struct vr_nor_mtd {
void __iomem *csr_base;
struct map_info map;
struct mtd_info *info;
int nr_parts;
struct pci_dev *dev;
};
/* Expansion Bus Configuration and Status Registers are in BAR 0 */
#define EXP_CSR_MBAR 0
/* Expansion Bus Memory Window is BAR 1 */
#define EXP_WIN_MBAR 1
/* Maximum address space for Chip Select 0 is 64MiB */
#define CS0_SIZE 0x04000000
/* Chip Select 0 is at offset 0 in the Memory Window */
#define CS0_START 0x0
/* Chip Select 0 Timing Register is at offset 0 in CSR */
#define EXP_TIMING_CS0 0x00
#define TIMING_CS_EN (1 << 31) /* Chip Select Enable */
#define TIMING_BOOT_ACCEL_DIS (1 << 8) /* Boot Acceleration Disable */
#define TIMING_WR_EN (1 << 1) /* Write Enable */
#define TIMING_BYTE_EN (1 << 0) /* 8-bit vs 16-bit bus */
#define TIMING_MASK 0x3FFF0000
static void __devexit vr_nor_destroy_partitions(struct vr_nor_mtd *p)
{
if (p->nr_parts > 0) {
#if defined(CONFIG_MTD_PARTITIONS) || defined(CONFIG_MTD_PARTITIONS_MODULE)
del_mtd_partitions(p->info);
#endif
} else
del_mtd_device(p->info);
}
static int __devinit vr_nor_init_partitions(struct vr_nor_mtd *p)
{
int err = 0;
#if defined(CONFIG_MTD_PARTITIONS) || defined(CONFIG_MTD_PARTITIONS_MODULE)
struct mtd_partition *parts;
static const char *part_probes[] = { "cmdlinepart", NULL };
#endif
/* register the flash bank */
#if defined(CONFIG_MTD_PARTITIONS) || defined(CONFIG_MTD_PARTITIONS_MODULE)
/* partition the flash bank */
p->nr_parts = parse_mtd_partitions(p->info, part_probes, &parts, 0);
if (p->nr_parts > 0)
err = add_mtd_partitions(p->info, parts, p->nr_parts);
#endif
if (p->nr_parts <= 0)
err = add_mtd_device(p->info);
return err;
}
static void __devexit vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p)
{
map_destroy(p->info);
}
static int __devinit vr_nor_mtd_setup(struct vr_nor_mtd *p)
{
static const char *probe_types[] =
{ "cfi_probe", "jedec_probe", NULL };
const char **type;
for (type = probe_types; !p->info && *type; type++)
p->info = do_map_probe(*type, &p->map);
if (!p->info)
return -ENODEV;
p->info->owner = THIS_MODULE;
return 0;
}
static void __devexit vr_nor_destroy_maps(struct vr_nor_mtd *p)
{
unsigned int exp_timing_cs0;
/* write-protect the flash bank */
exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0);
exp_timing_cs0 &= ~TIMING_WR_EN;
writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0);
/* unmap the flash window */
iounmap(p->map.virt);
/* unmap the csr window */
iounmap(p->csr_base);
}
/*
* Initialize the map_info structure and map the flash.
* Returns 0 on success, nonzero otherwise.
*/
static int __devinit vr_nor_init_maps(struct vr_nor_mtd *p)
{
unsigned long csr_phys, csr_len;
unsigned long win_phys, win_len;
unsigned int exp_timing_cs0;
int err;
csr_phys = pci_resource_start(p->dev, EXP_CSR_MBAR);
csr_len = pci_resource_len(p->dev, EXP_CSR_MBAR);
win_phys = pci_resource_start(p->dev, EXP_WIN_MBAR);
win_len = pci_resource_len(p->dev, EXP_WIN_MBAR);
if (!csr_phys || !csr_len || !win_phys || !win_len)
return -ENODEV;
if (win_len < (CS0_START + CS0_SIZE))
return -ENXIO;
p->csr_base = ioremap_nocache(csr_phys, csr_len);
if (!p->csr_base)
return -ENOMEM;
exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0);
if (!(exp_timing_cs0 & TIMING_CS_EN)) {
dev_warn(&p->dev->dev, "Expansion Bus Chip Select 0 "
"is disabled.\n");
err = -ENODEV;
goto release;
}
if ((exp_timing_cs0 & TIMING_MASK) == TIMING_MASK) {
dev_warn(&p->dev->dev, "Expansion Bus Chip Select 0 "
"is configured for maximally slow access times.\n");
}
p->map.name = DRV_NAME;
p->map.bankwidth = (exp_timing_cs0 & TIMING_BYTE_EN) ? 1 : 2;
p->map.phys = win_phys + CS0_START;
p->map.size = CS0_SIZE;
p->map.virt = ioremap_nocache(p->map.phys, p->map.size);
if (!p->map.virt) {
err = -ENOMEM;
goto release;
}
simple_map_init(&p->map);
/* Enable writes to flash bank */
exp_timing_cs0 |= TIMING_BOOT_ACCEL_DIS | TIMING_WR_EN;
writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0);
return 0;
release:
iounmap(p->csr_base);
return err;
}
static struct pci_device_id vr_nor_pci_ids[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x500D)},
{0,}
};
static void __devexit vr_nor_pci_remove(struct pci_dev *dev)
{
struct vr_nor_mtd *p = pci_get_drvdata(dev);
pci_set_drvdata(dev, NULL);
vr_nor_destroy_partitions(p);
vr_nor_destroy_mtd_setup(p);
vr_nor_destroy_maps(p);
kfree(p);
pci_release_regions(dev);
pci_disable_device(dev);
}
static int __devinit
vr_nor_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct vr_nor_mtd *p = NULL;
unsigned int exp_timing_cs0;
int err;
err = pci_enable_device(dev);
if (err)
goto out;
err = pci_request_regions(dev, DRV_NAME);
if (err)
goto disable_dev;
p = kzalloc(sizeof(*p), GFP_KERNEL);
err = -ENOMEM;
if (!p)
goto release;
p->dev = dev;
err = vr_nor_init_maps(p);
if (err)
goto release;
err = vr_nor_mtd_setup(p);
if (err)
goto destroy_maps;
err = vr_nor_init_partitions(p);
if (err)
goto destroy_mtd_setup;
pci_set_drvdata(dev, p);
return 0;
destroy_mtd_setup:
map_destroy(p->info);
destroy_maps:
/* write-protect the flash bank */
exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0);
exp_timing_cs0 &= ~TIMING_WR_EN;
writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0);
/* unmap the flash window */
iounmap(p->map.virt);
/* unmap the csr window */
iounmap(p->csr_base);
release:
kfree(p);
pci_release_regions(dev);
disable_dev:
pci_disable_device(dev);
out:
return err;
}
static struct pci_driver vr_nor_pci_driver = {
.name = DRV_NAME,
.probe = vr_nor_pci_probe,
.remove = __devexit_p(vr_nor_pci_remove),
.id_table = vr_nor_pci_ids,
};
static int __init vr_nor_mtd_init(void)
{
return pci_register_driver(&vr_nor_pci_driver);
}
static void __exit vr_nor_mtd_exit(void)
{
pci_unregister_driver(&vr_nor_pci_driver);
}
module_init(vr_nor_mtd_init);
module_exit(vr_nor_mtd_exit);
MODULE_AUTHOR("Andy Lowe");
MODULE_DESCRIPTION("MTD map driver for NOR flash on Intel Vermilion Range");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, vr_nor_pci_ids);
| gpl-2.0 |
agrabren/android_kernel_htc_shooter | arch/alpha/kernel/sys_eb64p.c | 2272 | 5765 | /*
* linux/arch/alpha/kernel/sys_eb64p.c
*
* Copyright (C) 1995 David A Rusling
* Copyright (C) 1996 Jay A Estabrook
* Copyright (C) 1998, 1999 Richard Henderson
*
* Code supporting the EB64+ and EB66.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <asm/ptrace.h>
#include <asm/system.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/core_apecs.h>
#include <asm/core_lca.h>
#include <asm/hwrpb.h>
#include <asm/tlbflush.h>
#include "proto.h"
#include "irq_impl.h"
#include "pci_impl.h"
#include "machvec_impl.h"
/* Note mask bit is true for DISABLED irqs. */
static unsigned int cached_irq_mask = -1;
static inline void
eb64p_update_irq_hw(unsigned int irq, unsigned long mask)
{
outb(mask >> (irq >= 24 ? 24 : 16), (irq >= 24 ? 0x27 : 0x26));
}
static inline void
eb64p_enable_irq(struct irq_data *d)
{
eb64p_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq));
}
static void
eb64p_disable_irq(struct irq_data *d)
{
eb64p_update_irq_hw(d->irq, cached_irq_mask |= 1 << d->irq);
}
static struct irq_chip eb64p_irq_type = {
.name = "EB64P",
.irq_unmask = eb64p_enable_irq,
.irq_mask = eb64p_disable_irq,
.irq_mask_ack = eb64p_disable_irq,
};
static void
eb64p_device_interrupt(unsigned long vector)
{
unsigned long pld;
unsigned int i;
/* Read the interrupt summary registers */
pld = inb(0x26) | (inb(0x27) << 8);
/*
* Now, for every possible bit set, work through
* them and call the appropriate interrupt handler.
*/
while (pld) {
i = ffz(~pld);
pld &= pld - 1; /* clear least bit set */
if (i == 5) {
isa_device_interrupt(vector);
} else {
handle_irq(16 + i);
}
}
}
static void __init
eb64p_init_irq(void)
{
long i;
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_CABRIOLET)
/*
* CABRIO SRM may not set variation correctly, so here we test
* the high word of the interrupt summary register for the RAZ
* bits, and hope that a true EB64+ would read all ones...
*/
if (inw(0x806) != 0xffff) {
extern struct alpha_machine_vector cabriolet_mv;
printk("Detected Cabriolet: correcting HWRPB.\n");
hwrpb->sys_variation |= 2L << 10;
hwrpb_update_checksum(hwrpb);
alpha_mv = cabriolet_mv;
alpha_mv.init_irq();
return;
}
#endif /* GENERIC */
outb(0xff, 0x26);
outb(0xff, 0x27);
init_i8259a_irqs();
for (i = 16; i < 32; ++i) {
irq_set_chip_and_handler(i, &eb64p_irq_type, handle_level_irq);
irq_set_status_flags(i, IRQ_LEVEL);
}
common_init_isa_dma();
setup_irq(16+5, &isa_cascade_irqaction);
}
/*
* PCI Fixup configuration.
*
* There are two 8 bit external summary registers as follows:
*
* Summary @ 0x26:
* Bit Meaning
* 0 Interrupt Line A from slot 0
* 1 Interrupt Line A from slot 1
* 2 Interrupt Line B from slot 0
* 3 Interrupt Line B from slot 1
* 4 Interrupt Line C from slot 0
* 5 Interrupt line from the two ISA PICs
* 6 Tulip
* 7 NCR SCSI
*
* Summary @ 0x27
* Bit Meaning
* 0 Interrupt Line C from slot 1
* 1 Interrupt Line D from slot 0
* 2 Interrupt Line D from slot 1
* 3 RAZ
* 4 RAZ
* 5 RAZ
* 6 RAZ
* 7 RAZ
*
* The device to slot mapping looks like:
*
* Slot Device
* 5 NCR SCSI controller
* 6 PCI on board slot 0
* 7 PCI on board slot 1
* 8 Intel SIO PCI-ISA bridge chip
* 9 Tulip - DECchip 21040 Ethernet controller
*
*
* This two layered interrupt approach means that we allocate IRQ 16 and
* above for PCI interrupts. The IRQ relates to which bit the interrupt
* comes in on. This makes interrupt processing much easier.
*/
static int __init
eb64p_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[5][5] __initdata = {
/*INT INTA INTB INTC INTD */
{16+7, 16+7, 16+7, 16+7, 16+7}, /* IdSel 5, slot ?, ?? */
{16+0, 16+0, 16+2, 16+4, 16+9}, /* IdSel 6, slot ?, ?? */
{16+1, 16+1, 16+3, 16+8, 16+10}, /* IdSel 7, slot ?, ?? */
{ -1, -1, -1, -1, -1}, /* IdSel 8, SIO */
{16+6, 16+6, 16+6, 16+6, 16+6}, /* IdSel 9, TULIP */
};
const long min_idsel = 5, max_idsel = 9, irqs_per_slot = 5;
return COMMON_TABLE_LOOKUP;
}
/*
* The System Vector
*/
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EB64P)
struct alpha_machine_vector eb64p_mv __initmv = {
.vector_name = "EB64+",
DO_EV4_MMU,
DO_DEFAULT_RTC,
DO_APECS_IO,
.machine_check = apecs_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
.nr_irqs = 32,
.device_interrupt = eb64p_device_interrupt,
.init_arch = apecs_init_arch,
.init_irq = eb64p_init_irq,
.init_rtc = common_init_rtc,
.init_pci = common_init_pci,
.kill_arch = NULL,
.pci_map_irq = eb64p_map_irq,
.pci_swizzle = common_swizzle,
};
ALIAS_MV(eb64p)
#endif
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EB66)
struct alpha_machine_vector eb66_mv __initmv = {
.vector_name = "EB66",
DO_EV4_MMU,
DO_DEFAULT_RTC,
DO_LCA_IO,
.machine_check = lca_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = APECS_AND_LCA_DEFAULT_MEM_BASE,
.nr_irqs = 32,
.device_interrupt = eb64p_device_interrupt,
.init_arch = lca_init_arch,
.init_irq = eb64p_init_irq,
.init_rtc = common_init_rtc,
.init_pci = common_init_pci,
.pci_map_irq = eb64p_map_irq,
.pci_swizzle = common_swizzle,
};
ALIAS_MV(eb66)
#endif
| gpl-2.0 |
CyanogenMod/android_kernel_huawei_angler | drivers/scsi/nsp32.c | 2528 | 90538 | /*
* NinjaSCSI-32Bi Cardbus, NinjaSCSI-32UDE PCI/CardBus SCSI driver
* Copyright (C) 2001, 2002, 2003
* YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp>
* GOTO Masanori <gotom@debian.or.jp>, <gotom@debian.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* Revision History:
* 1.0: Initial Release.
* 1.1: Add /proc SDTR status.
* Remove obsolete error handler nsp32_reset.
* Some clean up.
* 1.2: PowerPC (big endian) support.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/ioport.h>
#include <linux/major.h>
#include <linux/blkdev.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/ctype.h>
#include <linux/dma-mapping.h>
#include <asm/dma.h>
#include <asm/io.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_ioctl.h>
#include "nsp32.h"
/***********************************************************************
* Module parameters
*/
static int trans_mode = 0; /* default: BIOS */
module_param (trans_mode, int, 0);
MODULE_PARM_DESC(trans_mode, "transfer mode (0: BIOS(default) 1: Async 2: Ultra20M");
#define ASYNC_MODE 1
#define ULTRA20M_MODE 2
static bool auto_param = 0; /* default: ON */
module_param (auto_param, bool, 0);
MODULE_PARM_DESC(auto_param, "AutoParameter mode (0: ON(default) 1: OFF)");
static bool disc_priv = 1; /* default: OFF */
module_param (disc_priv, bool, 0);
MODULE_PARM_DESC(disc_priv, "disconnection privilege mode (0: ON 1: OFF(default))");
MODULE_AUTHOR("YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp>, GOTO Masanori <gotom@debian.or.jp>");
MODULE_DESCRIPTION("Workbit NinjaSCSI-32Bi/UDE CardBus/PCI SCSI host bus adapter module");
MODULE_LICENSE("GPL");
static const char *nsp32_release_version = "1.2";
/****************************************************************************
* Supported hardware
*/
static struct pci_device_id nsp32_pci_table[] = {
{
.vendor = PCI_VENDOR_ID_IODATA,
.device = PCI_DEVICE_ID_NINJASCSI_32BI_CBSC_II,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = MODEL_IODATA,
},
{
.vendor = PCI_VENDOR_ID_WORKBIT,
.device = PCI_DEVICE_ID_NINJASCSI_32BI_KME,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = MODEL_KME,
},
{
.vendor = PCI_VENDOR_ID_WORKBIT,
.device = PCI_DEVICE_ID_NINJASCSI_32BI_WBT,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = MODEL_WORKBIT,
},
{
.vendor = PCI_VENDOR_ID_WORKBIT,
.device = PCI_DEVICE_ID_WORKBIT_STANDARD,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = MODEL_PCI_WORKBIT,
},
{
.vendor = PCI_VENDOR_ID_WORKBIT,
.device = PCI_DEVICE_ID_NINJASCSI_32BI_LOGITEC,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = MODEL_LOGITEC,
},
{
.vendor = PCI_VENDOR_ID_WORKBIT,
.device = PCI_DEVICE_ID_NINJASCSI_32BIB_LOGITEC,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = MODEL_PCI_LOGITEC,
},
{
.vendor = PCI_VENDOR_ID_WORKBIT,
.device = PCI_DEVICE_ID_NINJASCSI_32UDE_MELCO,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = MODEL_PCI_MELCO,
},
{
.vendor = PCI_VENDOR_ID_WORKBIT,
.device = PCI_DEVICE_ID_NINJASCSI_32UDE_MELCO_II,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = MODEL_PCI_MELCO,
},
{0,0,},
};
MODULE_DEVICE_TABLE(pci, nsp32_pci_table);
static nsp32_hw_data nsp32_data_base; /* probe <-> detect glue */
/*
* Period/AckWidth speed conversion table
*
* Note: This period/ackwidth speed table must be in descending order.
*/
static nsp32_sync_table nsp32_sync_table_40M[] = {
/* {PNo, AW, SP, EP, SREQ smpl} Speed(MB/s) Period AckWidth */
{0x1, 0, 0x0c, 0x0c, SMPL_40M}, /* 20.0 : 50ns, 25ns */
{0x2, 0, 0x0d, 0x18, SMPL_40M}, /* 13.3 : 75ns, 25ns */
{0x3, 1, 0x19, 0x19, SMPL_40M}, /* 10.0 : 100ns, 50ns */
{0x4, 1, 0x1a, 0x1f, SMPL_20M}, /* 8.0 : 125ns, 50ns */
{0x5, 2, 0x20, 0x25, SMPL_20M}, /* 6.7 : 150ns, 75ns */
{0x6, 2, 0x26, 0x31, SMPL_20M}, /* 5.7 : 175ns, 75ns */
{0x7, 3, 0x32, 0x32, SMPL_20M}, /* 5.0 : 200ns, 100ns */
{0x8, 3, 0x33, 0x38, SMPL_10M}, /* 4.4 : 225ns, 100ns */
{0x9, 3, 0x39, 0x3e, SMPL_10M}, /* 4.0 : 250ns, 100ns */
};
static nsp32_sync_table nsp32_sync_table_20M[] = {
{0x1, 0, 0x19, 0x19, SMPL_40M}, /* 10.0 : 100ns, 50ns */
{0x2, 0, 0x1a, 0x25, SMPL_20M}, /* 6.7 : 150ns, 50ns */
{0x3, 1, 0x26, 0x32, SMPL_20M}, /* 5.0 : 200ns, 100ns */
{0x4, 1, 0x33, 0x3e, SMPL_10M}, /* 4.0 : 250ns, 100ns */
{0x5, 2, 0x3f, 0x4b, SMPL_10M}, /* 3.3 : 300ns, 150ns */
{0x6, 2, 0x4c, 0x57, SMPL_10M}, /* 2.8 : 350ns, 150ns */
{0x7, 3, 0x58, 0x64, SMPL_10M}, /* 2.5 : 400ns, 200ns */
{0x8, 3, 0x65, 0x70, SMPL_10M}, /* 2.2 : 450ns, 200ns */
{0x9, 3, 0x71, 0x7d, SMPL_10M}, /* 2.0 : 500ns, 200ns */
};
static nsp32_sync_table nsp32_sync_table_pci[] = {
{0x1, 0, 0x0c, 0x0f, SMPL_40M}, /* 16.6 : 60ns, 30ns */
{0x2, 0, 0x10, 0x16, SMPL_40M}, /* 11.1 : 90ns, 30ns */
{0x3, 1, 0x17, 0x1e, SMPL_20M}, /* 8.3 : 120ns, 60ns */
{0x4, 1, 0x1f, 0x25, SMPL_20M}, /* 6.7 : 150ns, 60ns */
{0x5, 2, 0x26, 0x2d, SMPL_20M}, /* 5.6 : 180ns, 90ns */
{0x6, 2, 0x2e, 0x34, SMPL_10M}, /* 4.8 : 210ns, 90ns */
{0x7, 3, 0x35, 0x3c, SMPL_10M}, /* 4.2 : 240ns, 120ns */
{0x8, 3, 0x3d, 0x43, SMPL_10M}, /* 3.7 : 270ns, 120ns */
{0x9, 3, 0x44, 0x4b, SMPL_10M}, /* 3.3 : 300ns, 120ns */
};
/*
* function declaration
*/
/* module entry point */
static int nsp32_probe (struct pci_dev *, const struct pci_device_id *);
static void nsp32_remove(struct pci_dev *);
static int __init init_nsp32 (void);
static void __exit exit_nsp32 (void);
/* struct struct scsi_host_template */
static int nsp32_show_info (struct seq_file *, struct Scsi_Host *);
static int nsp32_detect (struct pci_dev *pdev);
static int nsp32_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
static const char *nsp32_info (struct Scsi_Host *);
static int nsp32_release (struct Scsi_Host *);
/* SCSI error handler */
static int nsp32_eh_abort (struct scsi_cmnd *);
static int nsp32_eh_bus_reset (struct scsi_cmnd *);
static int nsp32_eh_host_reset(struct scsi_cmnd *);
/* generate SCSI message */
static void nsp32_build_identify(struct scsi_cmnd *);
static void nsp32_build_nop (struct scsi_cmnd *);
static void nsp32_build_reject (struct scsi_cmnd *);
static void nsp32_build_sdtr (struct scsi_cmnd *, unsigned char, unsigned char);
/* SCSI message handler */
static int nsp32_busfree_occur(struct scsi_cmnd *, unsigned short);
static void nsp32_msgout_occur (struct scsi_cmnd *);
static void nsp32_msgin_occur (struct scsi_cmnd *, unsigned long, unsigned short);
static int nsp32_setup_sg_table (struct scsi_cmnd *);
static int nsp32_selection_autopara(struct scsi_cmnd *);
static int nsp32_selection_autoscsi(struct scsi_cmnd *);
static void nsp32_scsi_done (struct scsi_cmnd *);
static int nsp32_arbitration (struct scsi_cmnd *, unsigned int);
static int nsp32_reselection (struct scsi_cmnd *, unsigned char);
static void nsp32_adjust_busfree (struct scsi_cmnd *, unsigned int);
static void nsp32_restart_autoscsi (struct scsi_cmnd *, unsigned short);
/* SCSI SDTR */
static void nsp32_analyze_sdtr (struct scsi_cmnd *);
static int nsp32_search_period_entry(nsp32_hw_data *, nsp32_target *, unsigned char);
static void nsp32_set_async (nsp32_hw_data *, nsp32_target *);
static void nsp32_set_max_sync (nsp32_hw_data *, nsp32_target *, unsigned char *, unsigned char *);
static void nsp32_set_sync_entry (nsp32_hw_data *, nsp32_target *, int, unsigned char);
/* SCSI bus status handler */
static void nsp32_wait_req (nsp32_hw_data *, int);
static void nsp32_wait_sack (nsp32_hw_data *, int);
static void nsp32_sack_assert (nsp32_hw_data *);
static void nsp32_sack_negate (nsp32_hw_data *);
static void nsp32_do_bus_reset(nsp32_hw_data *);
/* hardware interrupt handler */
static irqreturn_t do_nsp32_isr(int, void *);
/* initialize hardware */
static int nsp32hw_init(nsp32_hw_data *);
/* EEPROM handler */
static int nsp32_getprom_param (nsp32_hw_data *);
static int nsp32_getprom_at24 (nsp32_hw_data *);
static int nsp32_getprom_c16 (nsp32_hw_data *);
static void nsp32_prom_start (nsp32_hw_data *);
static void nsp32_prom_stop (nsp32_hw_data *);
static int nsp32_prom_read (nsp32_hw_data *, int);
static int nsp32_prom_read_bit (nsp32_hw_data *);
static void nsp32_prom_write_bit(nsp32_hw_data *, int);
static void nsp32_prom_set (nsp32_hw_data *, int, int);
static int nsp32_prom_get (nsp32_hw_data *, int);
/* debug/warning/info message */
static void nsp32_message (const char *, int, char *, char *, ...);
#ifdef NSP32_DEBUG
static void nsp32_dmessage(const char *, int, int, char *, ...);
#endif
/*
* max_sectors is currently limited up to 128.
*/
static struct scsi_host_template nsp32_template = {
.proc_name = "nsp32",
.name = "Workbit NinjaSCSI-32Bi/UDE",
.show_info = nsp32_show_info,
.info = nsp32_info,
.queuecommand = nsp32_queuecommand,
.can_queue = 1,
.sg_tablesize = NSP32_SG_SIZE,
.max_sectors = 128,
.cmd_per_lun = 1,
.this_id = NSP32_HOST_SCSIID,
.use_clustering = DISABLE_CLUSTERING,
.eh_abort_handler = nsp32_eh_abort,
.eh_bus_reset_handler = nsp32_eh_bus_reset,
.eh_host_reset_handler = nsp32_eh_host_reset,
/* .highmem_io = 1, */
};
#include "nsp32_io.h"
/***********************************************************************
* debug, error print
*/
#ifndef NSP32_DEBUG
# define NSP32_DEBUG_MASK 0x000000
# define nsp32_msg(type, args...) nsp32_message ("", 0, (type), args)
# define nsp32_dbg(mask, args...) /* */
#else
# define NSP32_DEBUG_MASK 0xffffff
# define nsp32_msg(type, args...) \
nsp32_message (__func__, __LINE__, (type), args)
# define nsp32_dbg(mask, args...) \
nsp32_dmessage(__func__, __LINE__, (mask), args)
#endif
#define NSP32_DEBUG_QUEUECOMMAND BIT(0)
#define NSP32_DEBUG_REGISTER BIT(1)
#define NSP32_DEBUG_AUTOSCSI BIT(2)
#define NSP32_DEBUG_INTR BIT(3)
#define NSP32_DEBUG_SGLIST BIT(4)
#define NSP32_DEBUG_BUSFREE BIT(5)
#define NSP32_DEBUG_CDB_CONTENTS BIT(6)
#define NSP32_DEBUG_RESELECTION BIT(7)
#define NSP32_DEBUG_MSGINOCCUR BIT(8)
#define NSP32_DEBUG_EEPROM BIT(9)
#define NSP32_DEBUG_MSGOUTOCCUR BIT(10)
#define NSP32_DEBUG_BUSRESET BIT(11)
#define NSP32_DEBUG_RESTART BIT(12)
#define NSP32_DEBUG_SYNC BIT(13)
#define NSP32_DEBUG_WAIT BIT(14)
#define NSP32_DEBUG_TARGETFLAG BIT(15)
#define NSP32_DEBUG_PROC BIT(16)
#define NSP32_DEBUG_INIT BIT(17)
#define NSP32_SPECIAL_PRINT_REGISTER BIT(20)
#define NSP32_DEBUG_BUF_LEN 100
static void nsp32_message(const char *func, int line, char *type, char *fmt, ...)
{
va_list args;
char buf[NSP32_DEBUG_BUF_LEN];
va_start(args, fmt);
vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
#ifndef NSP32_DEBUG
printk("%snsp32: %s\n", type, buf);
#else
printk("%snsp32: %s (%d): %s\n", type, func, line, buf);
#endif
}
#ifdef NSP32_DEBUG
static void nsp32_dmessage(const char *func, int line, int mask, char *fmt, ...)
{
va_list args;
char buf[NSP32_DEBUG_BUF_LEN];
va_start(args, fmt);
vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
if (mask & NSP32_DEBUG_MASK) {
printk("nsp32-debug: 0x%x %s (%d): %s\n", mask, func, line, buf);
}
}
#endif
#ifdef NSP32_DEBUG
# include "nsp32_debug.c"
#else
# define show_command(arg) /* */
# define show_busphase(arg) /* */
# define show_autophase(arg) /* */
#endif
/*
* IDENTIFY Message
*/
static void nsp32_build_identify(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
int pos = data->msgout_len;
int mode = FALSE;
/* XXX: Auto DiscPriv detection is progressing... */
if (disc_priv == 0) {
/* mode = TRUE; */
}
data->msgoutbuf[pos] = IDENTIFY(mode, SCpnt->device->lun); pos++;
data->msgout_len = pos;
}
/*
* SDTR Message Routine
*/
static void nsp32_build_sdtr(struct scsi_cmnd *SCpnt,
unsigned char period,
unsigned char offset)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
int pos = data->msgout_len;
data->msgoutbuf[pos] = EXTENDED_MESSAGE; pos++;
data->msgoutbuf[pos] = EXTENDED_SDTR_LEN; pos++;
data->msgoutbuf[pos] = EXTENDED_SDTR; pos++;
data->msgoutbuf[pos] = period; pos++;
data->msgoutbuf[pos] = offset; pos++;
data->msgout_len = pos;
}
/*
* No Operation Message
*/
static void nsp32_build_nop(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
int pos = data->msgout_len;
if (pos != 0) {
nsp32_msg(KERN_WARNING,
"Some messages are already contained!");
return;
}
data->msgoutbuf[pos] = NOP; pos++;
data->msgout_len = pos;
}
/*
* Reject Message
*/
static void nsp32_build_reject(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
int pos = data->msgout_len;
data->msgoutbuf[pos] = MESSAGE_REJECT; pos++;
data->msgout_len = pos;
}
/*
* timer
*/
#if 0
static void nsp32_start_timer(struct scsi_cmnd *SCpnt, int time)
{
unsigned int base = SCpnt->host->io_port;
nsp32_dbg(NSP32_DEBUG_INTR, "timer=%d", time);
if (time & (~TIMER_CNT_MASK)) {
nsp32_dbg(NSP32_DEBUG_INTR, "timer set overflow");
}
nsp32_write2(base, TIMER_SET, time & TIMER_CNT_MASK);
}
#endif
/*
* set SCSI command and other parameter to asic, and start selection phase
*/
static int nsp32_selection_autopara(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
unsigned int base = SCpnt->device->host->io_port;
unsigned int host_id = SCpnt->device->host->this_id;
unsigned char target = scmd_id(SCpnt);
nsp32_autoparam *param = data->autoparam;
unsigned char phase;
int i, ret;
unsigned int msgout;
u16_le s;
nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "in");
/*
* check bus free
*/
phase = nsp32_read1(base, SCSI_BUS_MONITOR);
if (phase != BUSMON_BUS_FREE) {
nsp32_msg(KERN_WARNING, "bus busy");
show_busphase(phase & BUSMON_PHASE_MASK);
SCpnt->result = DID_BUS_BUSY << 16;
return FALSE;
}
/*
* message out
*
* Note: If the range of msgout_len is 1 - 3, fill scsi_msgout.
* over 3 messages needs another routine.
*/
if (data->msgout_len == 0) {
nsp32_msg(KERN_ERR, "SCSI MsgOut without any message!");
SCpnt->result = DID_ERROR << 16;
return FALSE;
} else if (data->msgout_len > 0 && data->msgout_len <= 3) {
msgout = 0;
for (i = 0; i < data->msgout_len; i++) {
/*
* the sending order of the message is:
* MCNT 3: MSG#0 -> MSG#1 -> MSG#2
* MCNT 2: MSG#1 -> MSG#2
* MCNT 1: MSG#2
*/
msgout >>= 8;
msgout |= ((unsigned int)(data->msgoutbuf[i]) << 24);
}
msgout |= MV_VALID; /* MV valid */
msgout |= (unsigned int)data->msgout_len; /* len */
} else {
/* data->msgout_len > 3 */
msgout = 0;
}
// nsp_dbg(NSP32_DEBUG_AUTOSCSI, "sel time out=0x%x\n", nsp32_read2(base, SEL_TIME_OUT));
// nsp32_write2(base, SEL_TIME_OUT, SEL_TIMEOUT_TIME);
/*
* setup asic parameter
*/
memset(param, 0, sizeof(nsp32_autoparam));
/* cdb */
for (i = 0; i < SCpnt->cmd_len; i++) {
param->cdb[4 * i] = SCpnt->cmnd[i];
}
/* outgoing messages */
param->msgout = cpu_to_le32(msgout);
/* syncreg, ackwidth, target id, SREQ sampling rate */
param->syncreg = data->cur_target->syncreg;
param->ackwidth = data->cur_target->ackwidth;
param->target_id = BIT(host_id) | BIT(target);
param->sample_reg = data->cur_target->sample_reg;
// nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "sample rate=0x%x\n", data->cur_target->sample_reg);
/* command control */
param->command_control = cpu_to_le16(CLEAR_CDB_FIFO_POINTER |
AUTOSCSI_START |
AUTO_MSGIN_00_OR_04 |
AUTO_MSGIN_02 |
AUTO_ATN );
/* transfer control */
s = 0;
switch (data->trans_method) {
case NSP32_TRANSFER_BUSMASTER:
s |= BM_START;
break;
case NSP32_TRANSFER_MMIO:
s |= CB_MMIO_MODE;
break;
case NSP32_TRANSFER_PIO:
s |= CB_IO_MODE;
break;
default:
nsp32_msg(KERN_ERR, "unknown trans_method");
break;
}
/*
* OR-ed BLIEND_MODE, FIFO intr is decreased, instead of PCI bus waits.
* For bus master transfer, it's taken off.
*/
s |= (TRANSFER_GO | ALL_COUNTER_CLR);
param->transfer_control = cpu_to_le16(s);
/* sg table addr */
param->sgt_pointer = cpu_to_le32(data->cur_lunt->sglun_paddr);
/*
* transfer parameter to ASIC
*/
nsp32_write4(base, SGT_ADR, data->auto_paddr);
nsp32_write2(base, COMMAND_CONTROL, CLEAR_CDB_FIFO_POINTER |
AUTO_PARAMETER );
/*
* Check arbitration
*/
ret = nsp32_arbitration(SCpnt, base);
return ret;
}
/*
* Selection with AUTO SCSI (without AUTO PARAMETER)
*/
static int nsp32_selection_autoscsi(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
unsigned int base = SCpnt->device->host->io_port;
unsigned int host_id = SCpnt->device->host->this_id;
unsigned char target = scmd_id(SCpnt);
unsigned char phase;
int status;
unsigned short command = 0;
unsigned int msgout = 0;
unsigned short execph;
int i;
nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "in");
/*
* IRQ disable
*/
nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK);
/*
* check bus line
*/
phase = nsp32_read1(base, SCSI_BUS_MONITOR);
if(((phase & BUSMON_BSY) == 1) || (phase & BUSMON_SEL) == 1) {
nsp32_msg(KERN_WARNING, "bus busy");
SCpnt->result = DID_BUS_BUSY << 16;
status = 1;
goto out;
}
/*
* clear execph
*/
execph = nsp32_read2(base, SCSI_EXECUTE_PHASE);
/*
* clear FIFO counter to set CDBs
*/
nsp32_write2(base, COMMAND_CONTROL, CLEAR_CDB_FIFO_POINTER);
/*
* set CDB0 - CDB15
*/
for (i = 0; i < SCpnt->cmd_len; i++) {
nsp32_write1(base, COMMAND_DATA, SCpnt->cmnd[i]);
}
nsp32_dbg(NSP32_DEBUG_CDB_CONTENTS, "CDB[0]=[0x%x]", SCpnt->cmnd[0]);
/*
* set SCSIOUT LATCH(initiator)/TARGET(target) (OR-ed) ID
*/
nsp32_write1(base, SCSI_OUT_LATCH_TARGET_ID, BIT(host_id) | BIT(target));
/*
* set SCSI MSGOUT REG
*
* Note: If the range of msgout_len is 1 - 3, fill scsi_msgout.
* over 3 messages needs another routine.
*/
if (data->msgout_len == 0) {
nsp32_msg(KERN_ERR, "SCSI MsgOut without any message!");
SCpnt->result = DID_ERROR << 16;
status = 1;
goto out;
} else if (data->msgout_len > 0 && data->msgout_len <= 3) {
msgout = 0;
for (i = 0; i < data->msgout_len; i++) {
/*
* the sending order of the message is:
* MCNT 3: MSG#0 -> MSG#1 -> MSG#2
* MCNT 2: MSG#1 -> MSG#2
* MCNT 1: MSG#2
*/
msgout >>= 8;
msgout |= ((unsigned int)(data->msgoutbuf[i]) << 24);
}
msgout |= MV_VALID; /* MV valid */
msgout |= (unsigned int)data->msgout_len; /* len */
nsp32_write4(base, SCSI_MSG_OUT, msgout);
} else {
/* data->msgout_len > 3 */
nsp32_write4(base, SCSI_MSG_OUT, 0);
}
/*
* set selection timeout(= 250ms)
*/
nsp32_write2(base, SEL_TIME_OUT, SEL_TIMEOUT_TIME);
/*
* set SREQ hazard killer sampling rate
*
* TODO: sample_rate (BASE+0F) is 0 when internal clock = 40MHz.
* check other internal clock!
*/
nsp32_write1(base, SREQ_SMPL_RATE, data->cur_target->sample_reg);
/*
* clear Arbit
*/
nsp32_write1(base, SET_ARBIT, ARBIT_CLEAR);
/*
* set SYNCREG
* Don't set BM_START_ADR before setting this register.
*/
nsp32_write1(base, SYNC_REG, data->cur_target->syncreg);
/*
* set ACKWIDTH
*/
nsp32_write1(base, ACK_WIDTH, data->cur_target->ackwidth);
nsp32_dbg(NSP32_DEBUG_AUTOSCSI,
"syncreg=0x%x, ackwidth=0x%x, sgtpaddr=0x%x, id=0x%x",
nsp32_read1(base, SYNC_REG), nsp32_read1(base, ACK_WIDTH),
nsp32_read4(base, SGT_ADR), nsp32_read1(base, SCSI_OUT_LATCH_TARGET_ID));
nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "msgout_len=%d, msgout=0x%x",
data->msgout_len, msgout);
/*
* set SGT ADDR (physical address)
*/
nsp32_write4(base, SGT_ADR, data->cur_lunt->sglun_paddr);
/*
* set TRANSFER CONTROL REG
*/
command = 0;
command |= (TRANSFER_GO | ALL_COUNTER_CLR);
if (data->trans_method & NSP32_TRANSFER_BUSMASTER) {
if (scsi_bufflen(SCpnt) > 0) {
command |= BM_START;
}
} else if (data->trans_method & NSP32_TRANSFER_MMIO) {
command |= CB_MMIO_MODE;
} else if (data->trans_method & NSP32_TRANSFER_PIO) {
command |= CB_IO_MODE;
}
nsp32_write2(base, TRANSFER_CONTROL, command);
/*
* start AUTO SCSI, kick off arbitration
*/
command = (CLEAR_CDB_FIFO_POINTER |
AUTOSCSI_START |
AUTO_MSGIN_00_OR_04 |
AUTO_MSGIN_02 |
AUTO_ATN );
nsp32_write2(base, COMMAND_CONTROL, command);
/*
* Check arbitration
*/
status = nsp32_arbitration(SCpnt, base);
out:
/*
* IRQ enable
*/
nsp32_write2(base, IRQ_CONTROL, 0);
return status;
}
/*
* Arbitration Status Check
*
* Note: Arbitration counter is waited during ARBIT_GO is not lifting.
* Using udelay(1) consumes CPU time and system time, but
* arbitration delay time is defined minimal 2.4us in SCSI
* specification, thus udelay works as coarse grained wait timer.
*/
static int nsp32_arbitration(struct scsi_cmnd *SCpnt, unsigned int base)
{
unsigned char arbit;
int status = TRUE;
int time = 0;
do {
arbit = nsp32_read1(base, ARBIT_STATUS);
time++;
} while ((arbit & (ARBIT_WIN | ARBIT_FAIL)) == 0 &&
(time <= ARBIT_TIMEOUT_TIME));
nsp32_dbg(NSP32_DEBUG_AUTOSCSI,
"arbit: 0x%x, delay time: %d", arbit, time);
if (arbit & ARBIT_WIN) {
/* Arbitration succeeded */
SCpnt->result = DID_OK << 16;
nsp32_index_write1(base, EXT_PORT, LED_ON); /* PCI LED on */
} else if (arbit & ARBIT_FAIL) {
/* Arbitration failed */
SCpnt->result = DID_BUS_BUSY << 16;
status = FALSE;
} else {
/*
* unknown error or ARBIT_GO timeout,
* something lock up! guess no connection.
*/
nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "arbit timeout");
SCpnt->result = DID_NO_CONNECT << 16;
status = FALSE;
}
/*
* clear Arbit
*/
nsp32_write1(base, SET_ARBIT, ARBIT_CLEAR);
return status;
}
/*
* reselection
*
* Note: This reselection routine is called from msgin_occur,
* reselection target id&lun must be already set.
* SCSI-2 says IDENTIFY implies RESTORE_POINTER operation.
*/
static int nsp32_reselection(struct scsi_cmnd *SCpnt, unsigned char newlun)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
unsigned int host_id = SCpnt->device->host->this_id;
unsigned int base = SCpnt->device->host->io_port;
unsigned char tmpid, newid;
nsp32_dbg(NSP32_DEBUG_RESELECTION, "enter");
/*
* calculate reselected SCSI ID
*/
tmpid = nsp32_read1(base, RESELECT_ID);
tmpid &= (~BIT(host_id));
newid = 0;
while (tmpid) {
if (tmpid & 1) {
break;
}
tmpid >>= 1;
newid++;
}
/*
* If reselected New ID:LUN is not existed
* or current nexus is not existed, unexpected
* reselection is occurred. Send reject message.
*/
if (newid >= ARRAY_SIZE(data->lunt) || newlun >= ARRAY_SIZE(data->lunt[0])) {
nsp32_msg(KERN_WARNING, "unknown id/lun");
return FALSE;
} else if(data->lunt[newid][newlun].SCpnt == NULL) {
nsp32_msg(KERN_WARNING, "no SCSI command is processing");
return FALSE;
}
data->cur_id = newid;
data->cur_lun = newlun;
data->cur_target = &(data->target[newid]);
data->cur_lunt = &(data->lunt[newid][newlun]);
/* reset SACK/SavedACK counter (or ALL clear?) */
nsp32_write4(base, CLR_COUNTER, CLRCOUNTER_ALLMASK);
return TRUE;
}
/*
* nsp32_setup_sg_table - build scatter gather list for transfer data
* with bus master.
*
* Note: NinjaSCSI-32Bi/UDE bus master can not transfer over 64KB at a time.
*/
static int nsp32_setup_sg_table(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
struct scatterlist *sg;
nsp32_sgtable *sgt = data->cur_lunt->sglun->sgt;
int num, i;
u32_le l;
if (sgt == NULL) {
nsp32_dbg(NSP32_DEBUG_SGLIST, "SGT == null");
return FALSE;
}
num = scsi_dma_map(SCpnt);
if (!num)
return TRUE;
else if (num < 0)
return FALSE;
else {
scsi_for_each_sg(SCpnt, sg, num, i) {
/*
* Build nsp32_sglist, substitute sg dma addresses.
*/
sgt[i].addr = cpu_to_le32(sg_dma_address(sg));
sgt[i].len = cpu_to_le32(sg_dma_len(sg));
if (le32_to_cpu(sgt[i].len) > 0x10000) {
nsp32_msg(KERN_ERR,
"can't transfer over 64KB at a time, size=0x%lx", le32_to_cpu(sgt[i].len));
return FALSE;
}
nsp32_dbg(NSP32_DEBUG_SGLIST,
"num 0x%x : addr 0x%lx len 0x%lx",
i,
le32_to_cpu(sgt[i].addr),
le32_to_cpu(sgt[i].len ));
}
/* set end mark */
l = le32_to_cpu(sgt[num-1].len);
sgt[num-1].len = cpu_to_le32(l | SGTEND);
}
return TRUE;
}
static int nsp32_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
nsp32_target *target;
nsp32_lunt *cur_lunt;
int ret;
nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND,
"enter. target: 0x%x LUN: 0x%x cmnd: 0x%x cmndlen: 0x%x "
"use_sg: 0x%x reqbuf: 0x%lx reqlen: 0x%x",
SCpnt->device->id, SCpnt->device->lun, SCpnt->cmnd[0], SCpnt->cmd_len,
scsi_sg_count(SCpnt), scsi_sglist(SCpnt), scsi_bufflen(SCpnt));
if (data->CurrentSC != NULL) {
nsp32_msg(KERN_ERR, "Currentsc != NULL. Cancel this command request");
data->CurrentSC = NULL;
SCpnt->result = DID_NO_CONNECT << 16;
done(SCpnt);
return 0;
}
/* check target ID is not same as this initiator ID */
if (scmd_id(SCpnt) == SCpnt->device->host->this_id) {
nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, "terget==host???");
SCpnt->result = DID_BAD_TARGET << 16;
done(SCpnt);
return 0;
}
/* check target LUN is allowable value */
if (SCpnt->device->lun >= MAX_LUN) {
nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, "no more lun");
SCpnt->result = DID_BAD_TARGET << 16;
done(SCpnt);
return 0;
}
show_command(SCpnt);
SCpnt->scsi_done = done;
data->CurrentSC = SCpnt;
SCpnt->SCp.Status = CHECK_CONDITION;
SCpnt->SCp.Message = 0;
scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
SCpnt->SCp.ptr = (char *)scsi_sglist(SCpnt);
SCpnt->SCp.this_residual = scsi_bufflen(SCpnt);
SCpnt->SCp.buffer = NULL;
SCpnt->SCp.buffers_residual = 0;
/* initialize data */
data->msgout_len = 0;
data->msgin_len = 0;
cur_lunt = &(data->lunt[SCpnt->device->id][SCpnt->device->lun]);
cur_lunt->SCpnt = SCpnt;
cur_lunt->save_datp = 0;
cur_lunt->msgin03 = FALSE;
data->cur_lunt = cur_lunt;
data->cur_id = SCpnt->device->id;
data->cur_lun = SCpnt->device->lun;
ret = nsp32_setup_sg_table(SCpnt);
if (ret == FALSE) {
nsp32_msg(KERN_ERR, "SGT fail");
SCpnt->result = DID_ERROR << 16;
nsp32_scsi_done(SCpnt);
return 0;
}
/* Build IDENTIFY */
nsp32_build_identify(SCpnt);
/*
* If target is the first time to transfer after the reset
* (target don't have SDTR_DONE and SDTR_INITIATOR), sync
* message SDTR is needed to do synchronous transfer.
*/
target = &data->target[scmd_id(SCpnt)];
data->cur_target = target;
if (!(target->sync_flag & (SDTR_DONE | SDTR_INITIATOR | SDTR_TARGET))) {
unsigned char period, offset;
if (trans_mode != ASYNC_MODE) {
nsp32_set_max_sync(data, target, &period, &offset);
nsp32_build_sdtr(SCpnt, period, offset);
target->sync_flag |= SDTR_INITIATOR;
} else {
nsp32_set_async(data, target);
target->sync_flag |= SDTR_DONE;
}
nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND,
"SDTR: entry: %d start_period: 0x%x offset: 0x%x\n",
target->limit_entry, period, offset);
} else if (target->sync_flag & SDTR_INITIATOR) {
/*
* It was negotiating SDTR with target, sending from the
* initiator, but there are no chance to remove this flag.
* Set async because we don't get proper negotiation.
*/
nsp32_set_async(data, target);
target->sync_flag &= ~SDTR_INITIATOR;
target->sync_flag |= SDTR_DONE;
nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND,
"SDTR_INITIATOR: fall back to async");
} else if (target->sync_flag & SDTR_TARGET) {
/*
* It was negotiating SDTR with target, sending from target,
* but there are no chance to remove this flag. Set async
* because we don't get proper negotiation.
*/
nsp32_set_async(data, target);
target->sync_flag &= ~SDTR_TARGET;
target->sync_flag |= SDTR_DONE;
nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND,
"Unknown SDTR from target is reached, fall back to async.");
}
nsp32_dbg(NSP32_DEBUG_TARGETFLAG,
"target: %d sync_flag: 0x%x syncreg: 0x%x ackwidth: 0x%x",
SCpnt->device->id, target->sync_flag, target->syncreg,
target->ackwidth);
/* Selection */
if (auto_param == 0) {
ret = nsp32_selection_autopara(SCpnt);
} else {
ret = nsp32_selection_autoscsi(SCpnt);
}
if (ret != TRUE) {
nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, "selection fail");
nsp32_scsi_done(SCpnt);
}
return 0;
}
static DEF_SCSI_QCMD(nsp32_queuecommand)
/* initialize asic */
static int nsp32hw_init(nsp32_hw_data *data)
{
unsigned int base = data->BaseAddress;
unsigned short irq_stat;
unsigned long lc_reg;
unsigned char power;
lc_reg = nsp32_index_read4(base, CFG_LATE_CACHE);
if ((lc_reg & 0xff00) == 0) {
lc_reg |= (0x20 << 8);
nsp32_index_write2(base, CFG_LATE_CACHE, lc_reg & 0xffff);
}
nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK);
nsp32_write2(base, TRANSFER_CONTROL, 0);
nsp32_write4(base, BM_CNT, 0);
nsp32_write2(base, SCSI_EXECUTE_PHASE, 0);
do {
irq_stat = nsp32_read2(base, IRQ_STATUS);
nsp32_dbg(NSP32_DEBUG_INIT, "irq_stat 0x%x", irq_stat);
} while (irq_stat & IRQSTATUS_ANY_IRQ);
/*
* Fill FIFO_FULL_SHLD, FIFO_EMPTY_SHLD. Below parameter is
* designated by specification.
*/
if ((data->trans_method & NSP32_TRANSFER_PIO) ||
(data->trans_method & NSP32_TRANSFER_MMIO)) {
nsp32_index_write1(base, FIFO_FULL_SHLD_COUNT, 0x40);
nsp32_index_write1(base, FIFO_EMPTY_SHLD_COUNT, 0x40);
} else if (data->trans_method & NSP32_TRANSFER_BUSMASTER) {
nsp32_index_write1(base, FIFO_FULL_SHLD_COUNT, 0x10);
nsp32_index_write1(base, FIFO_EMPTY_SHLD_COUNT, 0x60);
} else {
nsp32_dbg(NSP32_DEBUG_INIT, "unknown transfer mode");
}
nsp32_dbg(NSP32_DEBUG_INIT, "full 0x%x emp 0x%x",
nsp32_index_read1(base, FIFO_FULL_SHLD_COUNT),
nsp32_index_read1(base, FIFO_EMPTY_SHLD_COUNT));
nsp32_index_write1(base, CLOCK_DIV, data->clock);
nsp32_index_write1(base, BM_CYCLE, MEMRD_CMD1 | SGT_AUTO_PARA_MEMED_CMD);
nsp32_write1(base, PARITY_CONTROL, 0); /* parity check is disable */
/*
* initialize MISC_WRRD register
*
* Note: Designated parameters is obeyed as following:
* MISC_SCSI_DIRECTION_DETECTOR_SELECT: It must be set.
* MISC_MASTER_TERMINATION_SELECT: It must be set.
* MISC_BMREQ_NEGATE_TIMING_SEL: It should be set.
* MISC_AUTOSEL_TIMING_SEL: It should be set.
* MISC_BMSTOP_CHANGE2_NONDATA_PHASE: It should be set.
* MISC_DELAYED_BMSTART: It's selected for safety.
*
* Note: If MISC_BMSTOP_CHANGE2_NONDATA_PHASE is set, then
* we have to set TRANSFERCONTROL_BM_START as 0 and set
* appropriate value before restarting bus master transfer.
*/
nsp32_index_write2(base, MISC_WR,
(SCSI_DIRECTION_DETECTOR_SELECT |
DELAYED_BMSTART |
MASTER_TERMINATION_SELECT |
BMREQ_NEGATE_TIMING_SEL |
AUTOSEL_TIMING_SEL |
BMSTOP_CHANGE2_NONDATA_PHASE));
nsp32_index_write1(base, TERM_PWR_CONTROL, 0);
power = nsp32_index_read1(base, TERM_PWR_CONTROL);
if (!(power & SENSE)) {
nsp32_msg(KERN_INFO, "term power on");
nsp32_index_write1(base, TERM_PWR_CONTROL, BPWR);
}
nsp32_write2(base, TIMER_SET, TIMER_STOP);
nsp32_write2(base, TIMER_SET, TIMER_STOP); /* Required 2 times */
nsp32_write1(base, SYNC_REG, 0);
nsp32_write1(base, ACK_WIDTH, 0);
nsp32_write2(base, SEL_TIME_OUT, SEL_TIMEOUT_TIME);
/*
* enable to select designated IRQ (except for
* IRQSELECT_SERR, IRQSELECT_PERR, IRQSELECT_BMCNTERR)
*/
nsp32_index_write2(base, IRQ_SELECT, IRQSELECT_TIMER_IRQ |
IRQSELECT_SCSIRESET_IRQ |
IRQSELECT_FIFO_SHLD_IRQ |
IRQSELECT_RESELECT_IRQ |
IRQSELECT_PHASE_CHANGE_IRQ |
IRQSELECT_AUTO_SCSI_SEQ_IRQ |
// IRQSELECT_BMCNTERR_IRQ |
IRQSELECT_TARGET_ABORT_IRQ |
IRQSELECT_MASTER_ABORT_IRQ );
nsp32_write2(base, IRQ_CONTROL, 0);
/* PCI LED off */
nsp32_index_write1(base, EXT_PORT_DDR, LED_OFF);
nsp32_index_write1(base, EXT_PORT, LED_OFF);
return TRUE;
}
/* interrupt routine */
static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
{
nsp32_hw_data *data = dev_id;
unsigned int base = data->BaseAddress;
struct scsi_cmnd *SCpnt = data->CurrentSC;
unsigned short auto_stat, irq_stat, trans_stat;
unsigned char busmon, busphase;
unsigned long flags;
int ret;
int handled = 0;
struct Scsi_Host *host = data->Host;
spin_lock_irqsave(host->host_lock, flags);
/*
* IRQ check, then enable IRQ mask
*/
irq_stat = nsp32_read2(base, IRQ_STATUS);
nsp32_dbg(NSP32_DEBUG_INTR,
"enter IRQ: %d, IRQstatus: 0x%x", irq, irq_stat);
/* is this interrupt comes from Ninja asic? */
if ((irq_stat & IRQSTATUS_ANY_IRQ) == 0) {
nsp32_dbg(NSP32_DEBUG_INTR, "shared interrupt: irq other 0x%x", irq_stat);
goto out2;
}
handled = 1;
nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK);
busmon = nsp32_read1(base, SCSI_BUS_MONITOR);
busphase = busmon & BUSMON_PHASE_MASK;
trans_stat = nsp32_read2(base, TRANSFER_STATUS);
if ((irq_stat == 0xffff) && (trans_stat == 0xffff)) {
nsp32_msg(KERN_INFO, "card disconnect");
if (data->CurrentSC != NULL) {
nsp32_msg(KERN_INFO, "clean up current SCSI command");
SCpnt->result = DID_BAD_TARGET << 16;
nsp32_scsi_done(SCpnt);
}
goto out;
}
/* Timer IRQ */
if (irq_stat & IRQSTATUS_TIMER_IRQ) {
nsp32_dbg(NSP32_DEBUG_INTR, "timer stop");
nsp32_write2(base, TIMER_SET, TIMER_STOP);
goto out;
}
/* SCSI reset */
if (irq_stat & IRQSTATUS_SCSIRESET_IRQ) {
nsp32_msg(KERN_INFO, "detected someone do bus reset");
nsp32_do_bus_reset(data);
if (SCpnt != NULL) {
SCpnt->result = DID_RESET << 16;
nsp32_scsi_done(SCpnt);
}
goto out;
}
if (SCpnt == NULL) {
nsp32_msg(KERN_WARNING, "SCpnt==NULL this can't be happened");
nsp32_msg(KERN_WARNING, "irq_stat=0x%x trans_stat=0x%x", irq_stat, trans_stat);
goto out;
}
/*
* AutoSCSI Interrupt.
* Note: This interrupt is occurred when AutoSCSI is finished. Then
* check SCSIEXECUTEPHASE, and do appropriate action. Each phases are
* recorded when AutoSCSI sequencer has been processed.
*/
if(irq_stat & IRQSTATUS_AUTOSCSI_IRQ) {
/* getting SCSI executed phase */
auto_stat = nsp32_read2(base, SCSI_EXECUTE_PHASE);
nsp32_write2(base, SCSI_EXECUTE_PHASE, 0);
/* Selection Timeout, go busfree phase. */
if (auto_stat & SELECTION_TIMEOUT) {
nsp32_dbg(NSP32_DEBUG_INTR,
"selection timeout occurred");
SCpnt->result = DID_TIME_OUT << 16;
nsp32_scsi_done(SCpnt);
goto out;
}
if (auto_stat & MSGOUT_PHASE) {
/*
* MsgOut phase was processed.
* If MSG_IN_OCCUER is not set, then MsgOut phase is
* completed. Thus, msgout_len must reset. Otherwise,
* nothing to do here. If MSG_OUT_OCCUER is occurred,
* then we will encounter the condition and check.
*/
if (!(auto_stat & MSG_IN_OCCUER) &&
(data->msgout_len <= 3)) {
/*
* !MSG_IN_OCCUER && msgout_len <=3
* ---> AutoSCSI with MSGOUTreg is processed.
*/
data->msgout_len = 0;
};
nsp32_dbg(NSP32_DEBUG_INTR, "MsgOut phase processed");
}
if ((auto_stat & DATA_IN_PHASE) &&
(scsi_get_resid(SCpnt) > 0) &&
((nsp32_read2(base, FIFO_REST_CNT) & FIFO_REST_MASK) != 0)) {
printk( "auto+fifo\n");
//nsp32_pio_read(SCpnt);
}
if (auto_stat & (DATA_IN_PHASE | DATA_OUT_PHASE)) {
/* DATA_IN_PHASE/DATA_OUT_PHASE was processed. */
nsp32_dbg(NSP32_DEBUG_INTR,
"Data in/out phase processed");
/* read BMCNT, SGT pointer addr */
nsp32_dbg(NSP32_DEBUG_INTR, "BMCNT=0x%lx",
nsp32_read4(base, BM_CNT));
nsp32_dbg(NSP32_DEBUG_INTR, "addr=0x%lx",
nsp32_read4(base, SGT_ADR));
nsp32_dbg(NSP32_DEBUG_INTR, "SACK=0x%lx",
nsp32_read4(base, SACK_CNT));
nsp32_dbg(NSP32_DEBUG_INTR, "SSACK=0x%lx",
nsp32_read4(base, SAVED_SACK_CNT));
scsi_set_resid(SCpnt, 0); /* all data transferred! */
}
/*
* MsgIn Occur
*/
if (auto_stat & MSG_IN_OCCUER) {
nsp32_msgin_occur(SCpnt, irq_stat, auto_stat);
}
/*
* MsgOut Occur
*/
if (auto_stat & MSG_OUT_OCCUER) {
nsp32_msgout_occur(SCpnt);
}
/*
* Bus Free Occur
*/
if (auto_stat & BUS_FREE_OCCUER) {
ret = nsp32_busfree_occur(SCpnt, auto_stat);
if (ret == TRUE) {
goto out;
}
}
if (auto_stat & STATUS_PHASE) {
/*
* Read CSB and substitute CSB for SCpnt->result
* to save status phase stutas byte.
* scsi error handler checks host_byte (DID_*:
* low level driver to indicate status), then checks
* status_byte (SCSI status byte).
*/
SCpnt->result = (int)nsp32_read1(base, SCSI_CSB_IN);
}
if (auto_stat & ILLEGAL_PHASE) {
/* Illegal phase is detected. SACK is not back. */
nsp32_msg(KERN_WARNING,
"AUTO SCSI ILLEGAL PHASE OCCUR!!!!");
/* TODO: currently we don't have any action... bus reset? */
/*
* To send back SACK, assert, wait, and negate.
*/
nsp32_sack_assert(data);
nsp32_wait_req(data, NEGATE);
nsp32_sack_negate(data);
}
if (auto_stat & COMMAND_PHASE) {
/* nothing to do */
nsp32_dbg(NSP32_DEBUG_INTR, "Command phase processed");
}
if (auto_stat & AUTOSCSI_BUSY) {
/* AutoSCSI is running */
}
show_autophase(auto_stat);
}
/* FIFO_SHLD_IRQ */
if (irq_stat & IRQSTATUS_FIFO_SHLD_IRQ) {
nsp32_dbg(NSP32_DEBUG_INTR, "FIFO IRQ");
switch(busphase) {
case BUSPHASE_DATA_OUT:
nsp32_dbg(NSP32_DEBUG_INTR, "fifo/write");
//nsp32_pio_write(SCpnt);
break;
case BUSPHASE_DATA_IN:
nsp32_dbg(NSP32_DEBUG_INTR, "fifo/read");
//nsp32_pio_read(SCpnt);
break;
case BUSPHASE_STATUS:
nsp32_dbg(NSP32_DEBUG_INTR, "fifo/status");
SCpnt->SCp.Status = nsp32_read1(base, SCSI_CSB_IN);
break;
default:
nsp32_dbg(NSP32_DEBUG_INTR, "fifo/other phase");
nsp32_dbg(NSP32_DEBUG_INTR, "irq_stat=0x%x trans_stat=0x%x", irq_stat, trans_stat);
show_busphase(busphase);
break;
}
goto out;
}
/* Phase Change IRQ */
if (irq_stat & IRQSTATUS_PHASE_CHANGE_IRQ) {
nsp32_dbg(NSP32_DEBUG_INTR, "phase change IRQ");
switch(busphase) {
case BUSPHASE_MESSAGE_IN:
nsp32_dbg(NSP32_DEBUG_INTR, "phase chg/msg in");
nsp32_msgin_occur(SCpnt, irq_stat, 0);
break;
default:
nsp32_msg(KERN_WARNING, "phase chg/other phase?");
nsp32_msg(KERN_WARNING, "irq_stat=0x%x trans_stat=0x%x\n",
irq_stat, trans_stat);
show_busphase(busphase);
break;
}
goto out;
}
/* PCI_IRQ */
if (irq_stat & IRQSTATUS_PCI_IRQ) {
nsp32_dbg(NSP32_DEBUG_INTR, "PCI IRQ occurred");
/* Do nothing */
}
/* BMCNTERR_IRQ */
if (irq_stat & IRQSTATUS_BMCNTERR_IRQ) {
nsp32_msg(KERN_ERR, "Received unexpected BMCNTERR IRQ! ");
/*
* TODO: To be implemented improving bus master
* transfer reliability when BMCNTERR is occurred in
* AutoSCSI phase described in specification.
*/
}
#if 0
nsp32_dbg(NSP32_DEBUG_INTR,
"irq_stat=0x%x trans_stat=0x%x", irq_stat, trans_stat);
show_busphase(busphase);
#endif
out:
/* disable IRQ mask */
nsp32_write2(base, IRQ_CONTROL, 0);
out2:
spin_unlock_irqrestore(host->host_lock, flags);
nsp32_dbg(NSP32_DEBUG_INTR, "exit");
return IRQ_RETVAL(handled);
}
#undef SPRINTF
#define SPRINTF(args...) seq_printf(m, ##args)
static int nsp32_show_info(struct seq_file *m, struct Scsi_Host *host)
{
unsigned long flags;
nsp32_hw_data *data;
int hostno;
unsigned int base;
unsigned char mode_reg;
int id, speed;
long model;
hostno = host->host_no;
data = (nsp32_hw_data *)host->hostdata;
base = host->io_port;
SPRINTF("NinjaSCSI-32 status\n\n");
SPRINTF("Driver version: %s, $Revision: 1.33 $\n", nsp32_release_version);
SPRINTF("SCSI host No.: %d\n", hostno);
SPRINTF("IRQ: %d\n", host->irq);
SPRINTF("IO: 0x%lx-0x%lx\n", host->io_port, host->io_port + host->n_io_port - 1);
SPRINTF("MMIO(virtual address): 0x%lx-0x%lx\n", host->base, host->base + data->MmioLength - 1);
SPRINTF("sg_tablesize: %d\n", host->sg_tablesize);
SPRINTF("Chip revision: 0x%x\n", (nsp32_read2(base, INDEX_REG) >> 8) & 0xff);
mode_reg = nsp32_index_read1(base, CHIP_MODE);
model = data->pci_devid->driver_data;
#ifdef CONFIG_PM
SPRINTF("Power Management: %s\n", (mode_reg & OPTF) ? "yes" : "no");
#endif
SPRINTF("OEM: %ld, %s\n", (mode_reg & (OEM0|OEM1)), nsp32_model[model]);
spin_lock_irqsave(&(data->Lock), flags);
SPRINTF("CurrentSC: 0x%p\n\n", data->CurrentSC);
spin_unlock_irqrestore(&(data->Lock), flags);
SPRINTF("SDTR status\n");
for (id = 0; id < ARRAY_SIZE(data->target); id++) {
SPRINTF("id %d: ", id);
if (id == host->this_id) {
SPRINTF("----- NinjaSCSI-32 host adapter\n");
continue;
}
if (data->target[id].sync_flag == SDTR_DONE) {
if (data->target[id].period == 0 &&
data->target[id].offset == ASYNC_OFFSET ) {
SPRINTF("async");
} else {
SPRINTF(" sync");
}
} else {
SPRINTF(" none");
}
if (data->target[id].period != 0) {
speed = 1000000 / (data->target[id].period * 4);
SPRINTF(" transfer %d.%dMB/s, offset %d",
speed / 1000,
speed % 1000,
data->target[id].offset
);
}
SPRINTF("\n");
}
return 0;
}
#undef SPRINTF
/*
* Reset parameters and call scsi_done for data->cur_lunt.
* Be careful setting SCpnt->result = DID_* before calling this function.
*/
static void nsp32_scsi_done(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
unsigned int base = SCpnt->device->host->io_port;
scsi_dma_unmap(SCpnt);
/*
* clear TRANSFERCONTROL_BM_START
*/
nsp32_write2(base, TRANSFER_CONTROL, 0);
nsp32_write4(base, BM_CNT, 0);
/*
* call scsi_done
*/
(*SCpnt->scsi_done)(SCpnt);
/*
* reset parameters
*/
data->cur_lunt->SCpnt = NULL;
data->cur_lunt = NULL;
data->cur_target = NULL;
data->CurrentSC = NULL;
}
/*
* Bus Free Occur
*
* Current Phase is BUSFREE. AutoSCSI is automatically execute BUSFREE phase
* with ACK reply when below condition is matched:
* MsgIn 00: Command Complete.
* MsgIn 02: Save Data Pointer.
* MsgIn 04: Diconnect.
* In other case, unexpected BUSFREE is detected.
*/
static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
unsigned int base = SCpnt->device->host->io_port;
nsp32_dbg(NSP32_DEBUG_BUSFREE, "enter execph=0x%x", execph);
show_autophase(execph);
nsp32_write4(base, BM_CNT, 0);
nsp32_write2(base, TRANSFER_CONTROL, 0);
/*
* MsgIn 02: Save Data Pointer
*
* VALID:
* Save Data Pointer is received. Adjust pointer.
*
* NO-VALID:
* SCSI-3 says if Save Data Pointer is not received, then we restart
* processing and we can't adjust any SCSI data pointer in next data
* phase.
*/
if (execph & MSGIN_02_VALID) {
nsp32_dbg(NSP32_DEBUG_BUSFREE, "MsgIn02_Valid");
/*
* Check sack_cnt/saved_sack_cnt, then adjust sg table if
* needed.
*/
if (!(execph & MSGIN_00_VALID) &&
((execph & DATA_IN_PHASE) || (execph & DATA_OUT_PHASE))) {
unsigned int sacklen, s_sacklen;
/*
* Read SACK count and SAVEDSACK count, then compare.
*/
sacklen = nsp32_read4(base, SACK_CNT );
s_sacklen = nsp32_read4(base, SAVED_SACK_CNT);
/*
* If SAVEDSACKCNT == 0, it means SavedDataPointer is
* come after data transferring.
*/
if (s_sacklen > 0) {
/*
* Comparing between sack and savedsack to
* check the condition of AutoMsgIn03.
*
* If they are same, set msgin03 == TRUE,
* COMMANDCONTROL_AUTO_MSGIN_03 is enabled at
* reselection. On the other hand, if they
* aren't same, set msgin03 == FALSE, and
* COMMANDCONTROL_AUTO_MSGIN_03 is disabled at
* reselection.
*/
if (sacklen != s_sacklen) {
data->cur_lunt->msgin03 = FALSE;
} else {
data->cur_lunt->msgin03 = TRUE;
}
nsp32_adjust_busfree(SCpnt, s_sacklen);
}
}
/* This value has not substitude with valid value yet... */
//data->cur_lunt->save_datp = data->cur_datp;
} else {
/*
* no processing.
*/
}
if (execph & MSGIN_03_VALID) {
/* MsgIn03 was valid to be processed. No need processing. */
}
/*
* target SDTR check
*/
if (data->cur_target->sync_flag & SDTR_INITIATOR) {
/*
* SDTR negotiation pulled by the initiator has not
* finished yet. Fall back to ASYNC mode.
*/
nsp32_set_async(data, data->cur_target);
data->cur_target->sync_flag &= ~SDTR_INITIATOR;
data->cur_target->sync_flag |= SDTR_DONE;
} else if (data->cur_target->sync_flag & SDTR_TARGET) {
/*
* SDTR negotiation pulled by the target has been
* negotiating.
*/
if (execph & (MSGIN_00_VALID | MSGIN_04_VALID)) {
/*
* If valid message is received, then
* negotiation is succeeded.
*/
} else {
/*
* On the contrary, if unexpected bus free is
* occurred, then negotiation is failed. Fall
* back to ASYNC mode.
*/
nsp32_set_async(data, data->cur_target);
}
data->cur_target->sync_flag &= ~SDTR_TARGET;
data->cur_target->sync_flag |= SDTR_DONE;
}
/*
* It is always ensured by SCSI standard that initiator
* switches into Bus Free Phase after
* receiving message 00 (Command Complete), 04 (Disconnect).
* It's the reason that processing here is valid.
*/
if (execph & MSGIN_00_VALID) {
/* MsgIn 00: Command Complete */
nsp32_dbg(NSP32_DEBUG_BUSFREE, "command complete");
SCpnt->SCp.Status = nsp32_read1(base, SCSI_CSB_IN);
SCpnt->SCp.Message = 0;
nsp32_dbg(NSP32_DEBUG_BUSFREE,
"normal end stat=0x%x resid=0x%x\n",
SCpnt->SCp.Status, scsi_get_resid(SCpnt));
SCpnt->result = (DID_OK << 16) |
(SCpnt->SCp.Message << 8) |
(SCpnt->SCp.Status << 0);
nsp32_scsi_done(SCpnt);
/* All operation is done */
return TRUE;
} else if (execph & MSGIN_04_VALID) {
/* MsgIn 04: Disconnect */
SCpnt->SCp.Status = nsp32_read1(base, SCSI_CSB_IN);
SCpnt->SCp.Message = 4;
nsp32_dbg(NSP32_DEBUG_BUSFREE, "disconnect");
return TRUE;
} else {
/* Unexpected bus free */
nsp32_msg(KERN_WARNING, "unexpected bus free occurred");
/* DID_ERROR? */
//SCpnt->result = (DID_OK << 16) | (SCpnt->SCp.Message << 8) | (SCpnt->SCp.Status << 0);
SCpnt->result = DID_ERROR << 16;
nsp32_scsi_done(SCpnt);
return TRUE;
}
return FALSE;
}
/*
* nsp32_adjust_busfree - adjusting SG table
*
* Note: This driver adjust the SG table using SCSI ACK
* counter instead of BMCNT counter!
*/
static void nsp32_adjust_busfree(struct scsi_cmnd *SCpnt, unsigned int s_sacklen)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
int old_entry = data->cur_entry;
int new_entry;
int sg_num = data->cur_lunt->sg_num;
nsp32_sgtable *sgt = data->cur_lunt->sglun->sgt;
unsigned int restlen, sentlen;
u32_le len, addr;
nsp32_dbg(NSP32_DEBUG_SGLIST, "old resid=0x%x", scsi_get_resid(SCpnt));
/* adjust saved SACK count with 4 byte start address boundary */
s_sacklen -= le32_to_cpu(sgt[old_entry].addr) & 3;
/*
* calculate new_entry from sack count and each sgt[].len
* calculate the byte which is intent to send
*/
sentlen = 0;
for (new_entry = old_entry; new_entry < sg_num; new_entry++) {
sentlen += (le32_to_cpu(sgt[new_entry].len) & ~SGTEND);
if (sentlen > s_sacklen) {
break;
}
}
/* all sgt is processed */
if (new_entry == sg_num) {
goto last;
}
if (sentlen == s_sacklen) {
/* XXX: confirm it's ok or not */
/* In this case, it's ok because we are at
the head element of the sg. restlen is correctly calculated. */
}
/* calculate the rest length for transferring */
restlen = sentlen - s_sacklen;
/* update adjusting current SG table entry */
len = le32_to_cpu(sgt[new_entry].len);
addr = le32_to_cpu(sgt[new_entry].addr);
addr += (len - restlen);
sgt[new_entry].addr = cpu_to_le32(addr);
sgt[new_entry].len = cpu_to_le32(restlen);
/* set cur_entry with new_entry */
data->cur_entry = new_entry;
return;
last:
if (scsi_get_resid(SCpnt) < sentlen) {
nsp32_msg(KERN_ERR, "resid underflow");
}
scsi_set_resid(SCpnt, scsi_get_resid(SCpnt) - sentlen);
nsp32_dbg(NSP32_DEBUG_SGLIST, "new resid=0x%x", scsi_get_resid(SCpnt));
/* update hostdata and lun */
return;
}
/*
* It's called MsgOut phase occur.
* NinjaSCSI-32Bi/UDE automatically processes up to 3 messages in
* message out phase. It, however, has more than 3 messages,
* HBA creates the interrupt and we have to process by hand.
*/
static void nsp32_msgout_occur(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
unsigned int base = SCpnt->device->host->io_port;
//unsigned short command;
long new_sgtp;
int i;
nsp32_dbg(NSP32_DEBUG_MSGOUTOCCUR,
"enter: msgout_len: 0x%x", data->msgout_len);
/*
* If MsgOut phase is occurred without having any
* message, then No_Operation is sent (SCSI-2).
*/
if (data->msgout_len == 0) {
nsp32_build_nop(SCpnt);
}
/*
* Set SGTP ADDR current entry for restarting AUTOSCSI,
* because SGTP is incremented next point.
* There is few statement in the specification...
*/
new_sgtp = data->cur_lunt->sglun_paddr +
(data->cur_lunt->cur_entry * sizeof(nsp32_sgtable));
/*
* send messages
*/
for (i = 0; i < data->msgout_len; i++) {
nsp32_dbg(NSP32_DEBUG_MSGOUTOCCUR,
"%d : 0x%x", i, data->msgoutbuf[i]);
/*
* Check REQ is asserted.
*/
nsp32_wait_req(data, ASSERT);
if (i == (data->msgout_len - 1)) {
/*
* If the last message, set the AutoSCSI restart
* before send back the ack message. AutoSCSI
* restart automatically negate ATN signal.
*/
//command = (AUTO_MSGIN_00_OR_04 | AUTO_MSGIN_02);
//nsp32_restart_autoscsi(SCpnt, command);
nsp32_write2(base, COMMAND_CONTROL,
(CLEAR_CDB_FIFO_POINTER |
AUTO_COMMAND_PHASE |
AUTOSCSI_RESTART |
AUTO_MSGIN_00_OR_04 |
AUTO_MSGIN_02 ));
}
/*
* Write data with SACK, then wait sack is
* automatically negated.
*/
nsp32_write1(base, SCSI_DATA_WITH_ACK, data->msgoutbuf[i]);
nsp32_wait_sack(data, NEGATE);
nsp32_dbg(NSP32_DEBUG_MSGOUTOCCUR, "bus: 0x%x\n",
nsp32_read1(base, SCSI_BUS_MONITOR));
};
data->msgout_len = 0;
nsp32_dbg(NSP32_DEBUG_MSGOUTOCCUR, "exit");
}
/*
* Restart AutoSCSI
*
* Note: Restarting AutoSCSI needs set:
* SYNC_REG, ACK_WIDTH, SGT_ADR, TRANSFER_CONTROL
*/
static void nsp32_restart_autoscsi(struct scsi_cmnd *SCpnt, unsigned short command)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
unsigned int base = data->BaseAddress;
unsigned short transfer = 0;
nsp32_dbg(NSP32_DEBUG_RESTART, "enter");
if (data->cur_target == NULL || data->cur_lunt == NULL) {
nsp32_msg(KERN_ERR, "Target or Lun is invalid");
}
/*
* set SYNC_REG
* Don't set BM_START_ADR before setting this register.
*/
nsp32_write1(base, SYNC_REG, data->cur_target->syncreg);
/*
* set ACKWIDTH
*/
nsp32_write1(base, ACK_WIDTH, data->cur_target->ackwidth);
/*
* set SREQ hazard killer sampling rate
*/
nsp32_write1(base, SREQ_SMPL_RATE, data->cur_target->sample_reg);
/*
* set SGT ADDR (physical address)
*/
nsp32_write4(base, SGT_ADR, data->cur_lunt->sglun_paddr);
/*
* set TRANSFER CONTROL REG
*/
transfer = 0;
transfer |= (TRANSFER_GO | ALL_COUNTER_CLR);
if (data->trans_method & NSP32_TRANSFER_BUSMASTER) {
if (scsi_bufflen(SCpnt) > 0) {
transfer |= BM_START;
}
} else if (data->trans_method & NSP32_TRANSFER_MMIO) {
transfer |= CB_MMIO_MODE;
} else if (data->trans_method & NSP32_TRANSFER_PIO) {
transfer |= CB_IO_MODE;
}
nsp32_write2(base, TRANSFER_CONTROL, transfer);
/*
* restart AutoSCSI
*
* TODO: COMMANDCONTROL_AUTO_COMMAND_PHASE is needed ?
*/
command |= (CLEAR_CDB_FIFO_POINTER |
AUTO_COMMAND_PHASE |
AUTOSCSI_RESTART );
nsp32_write2(base, COMMAND_CONTROL, command);
nsp32_dbg(NSP32_DEBUG_RESTART, "exit");
}
/*
* cannot run automatically message in occur
*/
static void nsp32_msgin_occur(struct scsi_cmnd *SCpnt,
unsigned long irq_status,
unsigned short execph)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
unsigned int base = SCpnt->device->host->io_port;
unsigned char msg;
unsigned char msgtype;
unsigned char newlun;
unsigned short command = 0;
int msgclear = TRUE;
long new_sgtp;
int ret;
/*
* read first message
* Use SCSIDATA_W_ACK instead of SCSIDATAIN, because the procedure
* of Message-In have to be processed before sending back SCSI ACK.
*/
msg = nsp32_read1(base, SCSI_DATA_IN);
data->msginbuf[(unsigned char)data->msgin_len] = msg;
msgtype = data->msginbuf[0];
nsp32_dbg(NSP32_DEBUG_MSGINOCCUR,
"enter: msglen: 0x%x msgin: 0x%x msgtype: 0x%x",
data->msgin_len, msg, msgtype);
/*
* TODO: We need checking whether bus phase is message in?
*/
/*
* assert SCSI ACK
*/
nsp32_sack_assert(data);
/*
* processing IDENTIFY
*/
if (msgtype & 0x80) {
if (!(irq_status & IRQSTATUS_RESELECT_OCCUER)) {
/* Invalid (non reselect) phase */
goto reject;
}
newlun = msgtype & 0x1f; /* TODO: SPI-3 compliant? */
ret = nsp32_reselection(SCpnt, newlun);
if (ret == TRUE) {
goto restart;
} else {
goto reject;
}
}
/*
* processing messages except for IDENTIFY
*
* TODO: Messages are all SCSI-2 terminology. SCSI-3 compliance is TODO.
*/
switch (msgtype) {
/*
* 1-byte message
*/
case COMMAND_COMPLETE:
case DISCONNECT:
/*
* These messages should not be occurred.
* They should be processed on AutoSCSI sequencer.
*/
nsp32_msg(KERN_WARNING,
"unexpected message of AutoSCSI MsgIn: 0x%x", msg);
break;
case RESTORE_POINTERS:
/*
* AutoMsgIn03 is disabled, and HBA gets this message.
*/
if ((execph & DATA_IN_PHASE) || (execph & DATA_OUT_PHASE)) {
unsigned int s_sacklen;
s_sacklen = nsp32_read4(base, SAVED_SACK_CNT);
if ((execph & MSGIN_02_VALID) && (s_sacklen > 0)) {
nsp32_adjust_busfree(SCpnt, s_sacklen);
} else {
/* No need to rewrite SGT */
}
}
data->cur_lunt->msgin03 = FALSE;
/* Update with the new value */
/* reset SACK/SavedACK counter (or ALL clear?) */
nsp32_write4(base, CLR_COUNTER, CLRCOUNTER_ALLMASK);
/*
* set new sg pointer
*/
new_sgtp = data->cur_lunt->sglun_paddr +
(data->cur_lunt->cur_entry * sizeof(nsp32_sgtable));
nsp32_write4(base, SGT_ADR, new_sgtp);
break;
case SAVE_POINTERS:
/*
* These messages should not be occurred.
* They should be processed on AutoSCSI sequencer.
*/
nsp32_msg (KERN_WARNING,
"unexpected message of AutoSCSI MsgIn: SAVE_POINTERS");
break;
case MESSAGE_REJECT:
/* If previous message_out is sending SDTR, and get
message_reject from target, SDTR negotiation is failed */
if (data->cur_target->sync_flag &
(SDTR_INITIATOR | SDTR_TARGET)) {
/*
* Current target is negotiating SDTR, but it's
* failed. Fall back to async transfer mode, and set
* SDTR_DONE.
*/
nsp32_set_async(data, data->cur_target);
data->cur_target->sync_flag &= ~SDTR_INITIATOR;
data->cur_target->sync_flag |= SDTR_DONE;
}
break;
case LINKED_CMD_COMPLETE:
case LINKED_FLG_CMD_COMPLETE:
/* queue tag is not supported currently */
nsp32_msg (KERN_WARNING,
"unsupported message: 0x%x", msgtype);
break;
case INITIATE_RECOVERY:
/* staring ECA (Extended Contingent Allegiance) state. */
/* This message is declined in SPI2 or later. */
goto reject;
/*
* 2-byte message
*/
case SIMPLE_QUEUE_TAG:
case 0x23:
/*
* 0x23: Ignore_Wide_Residue is not declared in scsi.h.
* No support is needed.
*/
if (data->msgin_len >= 1) {
goto reject;
}
/* current position is 1-byte of 2 byte */
msgclear = FALSE;
break;
/*
* extended message
*/
case EXTENDED_MESSAGE:
if (data->msgin_len < 1) {
/*
* Current position does not reach 2-byte
* (2-byte is extended message length).
*/
msgclear = FALSE;
break;
}
if ((data->msginbuf[1] + 1) > data->msgin_len) {
/*
* Current extended message has msginbuf[1] + 2
* (msgin_len starts counting from 0, so buf[1] + 1).
* If current message position is not finished,
* continue receiving message.
*/
msgclear = FALSE;
break;
}
/*
* Reach here means regular length of each type of
* extended messages.
*/
switch (data->msginbuf[2]) {
case EXTENDED_MODIFY_DATA_POINTER:
/* TODO */
goto reject; /* not implemented yet */
break;
case EXTENDED_SDTR:
/*
* Exchange this message between initiator and target.
*/
if (data->msgin_len != EXTENDED_SDTR_LEN + 1) {
/*
* received inappropriate message.
*/
goto reject;
break;
}
nsp32_analyze_sdtr(SCpnt);
break;
case EXTENDED_EXTENDED_IDENTIFY:
/* SCSI-I only, not supported. */
goto reject; /* not implemented yet */
break;
case EXTENDED_WDTR:
goto reject; /* not implemented yet */
break;
default:
goto reject;
}
break;
default:
goto reject;
}
restart:
if (msgclear == TRUE) {
data->msgin_len = 0;
/*
* If restarting AutoSCSI, but there are some message to out
* (msgout_len > 0), set AutoATN, and set SCSIMSGOUT as 0
* (MV_VALID = 0). When commandcontrol is written with
* AutoSCSI restart, at the same time MsgOutOccur should be
* happened (however, such situation is really possible...?).
*/
if (data->msgout_len > 0) {
nsp32_write4(base, SCSI_MSG_OUT, 0);
command |= AUTO_ATN;
}
/*
* restart AutoSCSI
* If it's failed, COMMANDCONTROL_AUTO_COMMAND_PHASE is needed.
*/
command |= (AUTO_MSGIN_00_OR_04 | AUTO_MSGIN_02);
/*
* If current msgin03 is TRUE, then flag on.
*/
if (data->cur_lunt->msgin03 == TRUE) {
command |= AUTO_MSGIN_03;
}
data->cur_lunt->msgin03 = FALSE;
} else {
data->msgin_len++;
}
/*
* restart AutoSCSI
*/
nsp32_restart_autoscsi(SCpnt, command);
/*
* wait SCSI REQ negate for REQ-ACK handshake
*/
nsp32_wait_req(data, NEGATE);
/*
* negate SCSI ACK
*/
nsp32_sack_negate(data);
nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "exit");
return;
reject:
nsp32_msg(KERN_WARNING,
"invalid or unsupported MessageIn, rejected. "
"current msg: 0x%x (len: 0x%x), processing msg: 0x%x",
msg, data->msgin_len, msgtype);
nsp32_build_reject(SCpnt);
data->msgin_len = 0;
goto restart;
}
/*
*
*/
static void nsp32_analyze_sdtr(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
nsp32_target *target = data->cur_target;
nsp32_sync_table *synct;
unsigned char get_period = data->msginbuf[3];
unsigned char get_offset = data->msginbuf[4];
int entry;
int syncnum;
nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "enter");
synct = data->synct;
syncnum = data->syncnum;
/*
* If this inititor sent the SDTR message, then target responds SDTR,
* initiator SYNCREG, ACKWIDTH from SDTR parameter.
* Messages are not appropriate, then send back reject message.
* If initiator did not send the SDTR, but target sends SDTR,
* initiator calculator the appropriate parameter and send back SDTR.
*/
if (target->sync_flag & SDTR_INITIATOR) {
/*
* Initiator sent SDTR, the target responds and
* send back negotiation SDTR.
*/
nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "target responds SDTR");
target->sync_flag &= ~SDTR_INITIATOR;
target->sync_flag |= SDTR_DONE;
/*
* offset:
*/
if (get_offset > SYNC_OFFSET) {
/*
* Negotiation is failed, the target send back
* unexpected offset value.
*/
goto reject;
}
if (get_offset == ASYNC_OFFSET) {
/*
* Negotiation is succeeded, the target want
* to fall back into asynchronous transfer mode.
*/
goto async;
}
/*
* period:
* Check whether sync period is too short. If too short,
* fall back to async mode. If it's ok, then investigate
* the received sync period. If sync period is acceptable
* between sync table start_period and end_period, then
* set this I_T nexus as sent offset and period.
* If it's not acceptable, send back reject and fall back
* to async mode.
*/
if (get_period < data->synct[0].period_num) {
/*
* Negotiation is failed, the target send back
* unexpected period value.
*/
goto reject;
}
entry = nsp32_search_period_entry(data, target, get_period);
if (entry < 0) {
/*
* Target want to use long period which is not
* acceptable NinjaSCSI-32Bi/UDE.
*/
goto reject;
}
/*
* Set new sync table and offset in this I_T nexus.
*/
nsp32_set_sync_entry(data, target, entry, get_offset);
} else {
/* Target send SDTR to initiator. */
nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "target send SDTR");
target->sync_flag |= SDTR_INITIATOR;
/* offset: */
if (get_offset > SYNC_OFFSET) {
/* send back as SYNC_OFFSET */
get_offset = SYNC_OFFSET;
}
/* period: */
if (get_period < data->synct[0].period_num) {
get_period = data->synct[0].period_num;
}
entry = nsp32_search_period_entry(data, target, get_period);
if (get_offset == ASYNC_OFFSET || entry < 0) {
nsp32_set_async(data, target);
nsp32_build_sdtr(SCpnt, 0, ASYNC_OFFSET);
} else {
nsp32_set_sync_entry(data, target, entry, get_offset);
nsp32_build_sdtr(SCpnt, get_period, get_offset);
}
}
target->period = get_period;
nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "exit");
return;
reject:
/*
* If the current message is unacceptable, send back to the target
* with reject message.
*/
nsp32_build_reject(SCpnt);
async:
nsp32_set_async(data, target); /* set as ASYNC transfer mode */
target->period = 0;
nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "exit: set async");
return;
}
/*
* Search config entry number matched in sync_table from given
* target and speed period value. If failed to search, return negative value.
*/
static int nsp32_search_period_entry(nsp32_hw_data *data,
nsp32_target *target,
unsigned char period)
{
int i;
if (target->limit_entry >= data->syncnum) {
nsp32_msg(KERN_ERR, "limit_entry exceeds syncnum!");
target->limit_entry = 0;
}
for (i = target->limit_entry; i < data->syncnum; i++) {
if (period >= data->synct[i].start_period &&
period <= data->synct[i].end_period) {
break;
}
}
/*
* Check given period value is over the sync_table value.
* If so, return max value.
*/
if (i == data->syncnum) {
i = -1;
}
return i;
}
/*
* target <-> initiator use ASYNC transfer
*/
static void nsp32_set_async(nsp32_hw_data *data, nsp32_target *target)
{
unsigned char period = data->synct[target->limit_entry].period_num;
target->offset = ASYNC_OFFSET;
target->period = 0;
target->syncreg = TO_SYNCREG(period, ASYNC_OFFSET);
target->ackwidth = 0;
target->sample_reg = 0;
nsp32_dbg(NSP32_DEBUG_SYNC, "set async");
}
/*
* target <-> initiator use maximum SYNC transfer
*/
static void nsp32_set_max_sync(nsp32_hw_data *data,
nsp32_target *target,
unsigned char *period,
unsigned char *offset)
{
unsigned char period_num, ackwidth;
period_num = data->synct[target->limit_entry].period_num;
*period = data->synct[target->limit_entry].start_period;
ackwidth = data->synct[target->limit_entry].ackwidth;
*offset = SYNC_OFFSET;
target->syncreg = TO_SYNCREG(period_num, *offset);
target->ackwidth = ackwidth;
target->offset = *offset;
target->sample_reg = 0; /* disable SREQ sampling */
}
/*
* target <-> initiator use entry number speed
*/
static void nsp32_set_sync_entry(nsp32_hw_data *data,
nsp32_target *target,
int entry,
unsigned char offset)
{
unsigned char period, ackwidth, sample_rate;
period = data->synct[entry].period_num;
ackwidth = data->synct[entry].ackwidth;
offset = offset;
sample_rate = data->synct[entry].sample_rate;
target->syncreg = TO_SYNCREG(period, offset);
target->ackwidth = ackwidth;
target->offset = offset;
target->sample_reg = sample_rate | SAMPLING_ENABLE;
nsp32_dbg(NSP32_DEBUG_SYNC, "set sync");
}
/*
* It waits until SCSI REQ becomes assertion or negation state.
*
* Note: If nsp32_msgin_occur is called, we asserts SCSI ACK. Then
* connected target responds SCSI REQ negation. We have to wait
* SCSI REQ becomes negation in order to negate SCSI ACK signal for
* REQ-ACK handshake.
*/
static void nsp32_wait_req(nsp32_hw_data *data, int state)
{
unsigned int base = data->BaseAddress;
int wait_time = 0;
unsigned char bus, req_bit;
if (!((state == ASSERT) || (state == NEGATE))) {
nsp32_msg(KERN_ERR, "unknown state designation");
}
/* REQ is BIT(5) */
req_bit = (state == ASSERT ? BUSMON_REQ : 0);
do {
bus = nsp32_read1(base, SCSI_BUS_MONITOR);
if ((bus & BUSMON_REQ) == req_bit) {
nsp32_dbg(NSP32_DEBUG_WAIT,
"wait_time: %d", wait_time);
return;
}
udelay(1);
wait_time++;
} while (wait_time < REQSACK_TIMEOUT_TIME);
nsp32_msg(KERN_WARNING, "wait REQ timeout, req_bit: 0x%x", req_bit);
}
/*
* It waits until SCSI SACK becomes assertion or negation state.
*/
static void nsp32_wait_sack(nsp32_hw_data *data, int state)
{
unsigned int base = data->BaseAddress;
int wait_time = 0;
unsigned char bus, ack_bit;
if (!((state == ASSERT) || (state == NEGATE))) {
nsp32_msg(KERN_ERR, "unknown state designation");
}
/* ACK is BIT(4) */
ack_bit = (state == ASSERT ? BUSMON_ACK : 0);
do {
bus = nsp32_read1(base, SCSI_BUS_MONITOR);
if ((bus & BUSMON_ACK) == ack_bit) {
nsp32_dbg(NSP32_DEBUG_WAIT,
"wait_time: %d", wait_time);
return;
}
udelay(1);
wait_time++;
} while (wait_time < REQSACK_TIMEOUT_TIME);
nsp32_msg(KERN_WARNING, "wait SACK timeout, ack_bit: 0x%x", ack_bit);
}
/*
* assert SCSI ACK
*
* Note: SCSI ACK assertion needs with ACKENB=1, AUTODIRECTION=1.
*/
static void nsp32_sack_assert(nsp32_hw_data *data)
{
unsigned int base = data->BaseAddress;
unsigned char busctrl;
busctrl = nsp32_read1(base, SCSI_BUS_CONTROL);
busctrl |= (BUSCTL_ACK | AUTODIRECTION | ACKENB);
nsp32_write1(base, SCSI_BUS_CONTROL, busctrl);
}
/*
* negate SCSI ACK
*/
static void nsp32_sack_negate(nsp32_hw_data *data)
{
unsigned int base = data->BaseAddress;
unsigned char busctrl;
busctrl = nsp32_read1(base, SCSI_BUS_CONTROL);
busctrl &= ~BUSCTL_ACK;
nsp32_write1(base, SCSI_BUS_CONTROL, busctrl);
}
/*
* Note: n_io_port is defined as 0x7f because I/O register port is
* assigned as:
* 0x800-0x8ff: memory mapped I/O port
* 0x900-0xbff: (map same 0x800-0x8ff I/O port image repeatedly)
* 0xc00-0xfff: CardBus status registers
*/
static int nsp32_detect(struct pci_dev *pdev)
{
struct Scsi_Host *host; /* registered host structure */
struct resource *res;
nsp32_hw_data *data;
int ret;
int i, j;
nsp32_dbg(NSP32_DEBUG_REGISTER, "enter");
/*
* register this HBA as SCSI device
*/
host = scsi_host_alloc(&nsp32_template, sizeof(nsp32_hw_data));
if (host == NULL) {
nsp32_msg (KERN_ERR, "failed to scsi register");
goto err;
}
/*
* set nsp32_hw_data
*/
data = (nsp32_hw_data *)host->hostdata;
memcpy(data, &nsp32_data_base, sizeof(nsp32_hw_data));
host->irq = data->IrqNumber;
host->io_port = data->BaseAddress;
host->unique_id = data->BaseAddress;
host->n_io_port = data->NumAddress;
host->base = (unsigned long)data->MmioAddress;
data->Host = host;
spin_lock_init(&(data->Lock));
data->cur_lunt = NULL;
data->cur_target = NULL;
/*
* Bus master transfer mode is supported currently.
*/
data->trans_method = NSP32_TRANSFER_BUSMASTER;
/*
* Set clock div, CLOCK_4 (HBA has own external clock, and
* dividing * 100ns/4).
* Currently CLOCK_4 has only tested, not for CLOCK_2/PCICLK yet.
*/
data->clock = CLOCK_4;
/*
* Select appropriate nsp32_sync_table and set I_CLOCKDIV.
*/
switch (data->clock) {
case CLOCK_4:
/* If data->clock is CLOCK_4, then select 40M sync table. */
data->synct = nsp32_sync_table_40M;
data->syncnum = ARRAY_SIZE(nsp32_sync_table_40M);
break;
case CLOCK_2:
/* If data->clock is CLOCK_2, then select 20M sync table. */
data->synct = nsp32_sync_table_20M;
data->syncnum = ARRAY_SIZE(nsp32_sync_table_20M);
break;
case PCICLK:
/* If data->clock is PCICLK, then select pci sync table. */
data->synct = nsp32_sync_table_pci;
data->syncnum = ARRAY_SIZE(nsp32_sync_table_pci);
break;
default:
nsp32_msg(KERN_WARNING,
"Invalid clock div is selected, set CLOCK_4.");
/* Use default value CLOCK_4 */
data->clock = CLOCK_4;
data->synct = nsp32_sync_table_40M;
data->syncnum = ARRAY_SIZE(nsp32_sync_table_40M);
}
/*
* setup nsp32_lunt
*/
/*
* setup DMA
*/
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
nsp32_msg (KERN_ERR, "failed to set PCI DMA mask");
goto scsi_unregister;
}
/*
* allocate autoparam DMA resource.
*/
data->autoparam = pci_alloc_consistent(pdev, sizeof(nsp32_autoparam), &(data->auto_paddr));
if (data->autoparam == NULL) {
nsp32_msg(KERN_ERR, "failed to allocate DMA memory");
goto scsi_unregister;
}
/*
* allocate scatter-gather DMA resource.
*/
data->sg_list = pci_alloc_consistent(pdev, NSP32_SG_TABLE_SIZE,
&(data->sg_paddr));
if (data->sg_list == NULL) {
nsp32_msg(KERN_ERR, "failed to allocate DMA memory");
goto free_autoparam;
}
for (i = 0; i < ARRAY_SIZE(data->lunt); i++) {
for (j = 0; j < ARRAY_SIZE(data->lunt[0]); j++) {
int offset = i * ARRAY_SIZE(data->lunt[0]) + j;
nsp32_lunt tmp = {
.SCpnt = NULL,
.save_datp = 0,
.msgin03 = FALSE,
.sg_num = 0,
.cur_entry = 0,
.sglun = &(data->sg_list[offset]),
.sglun_paddr = data->sg_paddr + (offset * sizeof(nsp32_sglun)),
};
data->lunt[i][j] = tmp;
}
}
/*
* setup target
*/
for (i = 0; i < ARRAY_SIZE(data->target); i++) {
nsp32_target *target = &(data->target[i]);
target->limit_entry = 0;
target->sync_flag = 0;
nsp32_set_async(data, target);
}
/*
* EEPROM check
*/
ret = nsp32_getprom_param(data);
if (ret == FALSE) {
data->resettime = 3; /* default 3 */
}
/*
* setup HBA
*/
nsp32hw_init(data);
snprintf(data->info_str, sizeof(data->info_str),
"NinjaSCSI-32Bi/UDE: irq %d, io 0x%lx+0x%x",
host->irq, host->io_port, host->n_io_port);
/*
* SCSI bus reset
*
* Note: It's important to reset SCSI bus in initialization phase.
* NinjaSCSI-32Bi/UDE HBA EEPROM seems to exchange SDTR when
* system is coming up, so SCSI devices connected to HBA is set as
* un-asynchronous mode. It brings the merit that this HBA is
* ready to start synchronous transfer without any preparation,
* but we are difficult to control transfer speed. In addition,
* it prevents device transfer speed from effecting EEPROM start-up
* SDTR. NinjaSCSI-32Bi/UDE has the feature if EEPROM is set as
* Auto Mode, then FAST-10M is selected when SCSI devices are
* connected same or more than 4 devices. It should be avoided
* depending on this specification. Thus, resetting the SCSI bus
* restores all connected SCSI devices to asynchronous mode, then
* this driver set SDTR safely later, and we can control all SCSI
* device transfer mode.
*/
nsp32_do_bus_reset(data);
ret = request_irq(host->irq, do_nsp32_isr, IRQF_SHARED, "nsp32", data);
if (ret < 0) {
nsp32_msg(KERN_ERR, "Unable to allocate IRQ for NinjaSCSI32 "
"SCSI PCI controller. Interrupt: %d", host->irq);
goto free_sg_list;
}
/*
* PCI IO register
*/
res = request_region(host->io_port, host->n_io_port, "nsp32");
if (res == NULL) {
nsp32_msg(KERN_ERR,
"I/O region 0x%lx+0x%lx is already used",
data->BaseAddress, data->NumAddress);
goto free_irq;
}
ret = scsi_add_host(host, &pdev->dev);
if (ret) {
nsp32_msg(KERN_ERR, "failed to add scsi host");
goto free_region;
}
scsi_scan_host(host);
pci_set_drvdata(pdev, host);
return 0;
free_region:
release_region(host->io_port, host->n_io_port);
free_irq:
free_irq(host->irq, data);
free_sg_list:
pci_free_consistent(pdev, NSP32_SG_TABLE_SIZE,
data->sg_list, data->sg_paddr);
free_autoparam:
pci_free_consistent(pdev, sizeof(nsp32_autoparam),
data->autoparam, data->auto_paddr);
scsi_unregister:
scsi_host_put(host);
err:
return 1;
}
static int nsp32_release(struct Scsi_Host *host)
{
nsp32_hw_data *data = (nsp32_hw_data *)host->hostdata;
if (data->autoparam) {
pci_free_consistent(data->Pci, sizeof(nsp32_autoparam),
data->autoparam, data->auto_paddr);
}
if (data->sg_list) {
pci_free_consistent(data->Pci, NSP32_SG_TABLE_SIZE,
data->sg_list, data->sg_paddr);
}
if (host->irq) {
free_irq(host->irq, data);
}
if (host->io_port && host->n_io_port) {
release_region(host->io_port, host->n_io_port);
}
if (data->MmioAddress) {
iounmap(data->MmioAddress);
}
return 0;
}
static const char *nsp32_info(struct Scsi_Host *shpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)shpnt->hostdata;
return data->info_str;
}
/****************************************************************************
* error handler
*/
static int nsp32_eh_abort(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
unsigned int base = SCpnt->device->host->io_port;
nsp32_msg(KERN_WARNING, "abort");
if (data->cur_lunt->SCpnt == NULL) {
nsp32_dbg(NSP32_DEBUG_BUSRESET, "abort failed");
return FAILED;
}
if (data->cur_target->sync_flag & (SDTR_INITIATOR | SDTR_TARGET)) {
/* reset SDTR negotiation */
data->cur_target->sync_flag = 0;
nsp32_set_async(data, data->cur_target);
}
nsp32_write2(base, TRANSFER_CONTROL, 0);
nsp32_write2(base, BM_CNT, 0);
SCpnt->result = DID_ABORT << 16;
nsp32_scsi_done(SCpnt);
nsp32_dbg(NSP32_DEBUG_BUSRESET, "abort success");
return SUCCESS;
}
static int nsp32_eh_bus_reset(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
unsigned int base = SCpnt->device->host->io_port;
spin_lock_irq(SCpnt->device->host->host_lock);
nsp32_msg(KERN_INFO, "Bus Reset");
nsp32_dbg(NSP32_DEBUG_BUSRESET, "SCpnt=0x%x", SCpnt);
nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK);
nsp32_do_bus_reset(data);
nsp32_write2(base, IRQ_CONTROL, 0);
spin_unlock_irq(SCpnt->device->host->host_lock);
return SUCCESS; /* SCSI bus reset is succeeded at any time. */
}
static void nsp32_do_bus_reset(nsp32_hw_data *data)
{
unsigned int base = data->BaseAddress;
unsigned short intrdat;
int i;
nsp32_dbg(NSP32_DEBUG_BUSRESET, "in");
/*
* stop all transfer
* clear TRANSFERCONTROL_BM_START
* clear counter
*/
nsp32_write2(base, TRANSFER_CONTROL, 0);
nsp32_write4(base, BM_CNT, 0);
nsp32_write4(base, CLR_COUNTER, CLRCOUNTER_ALLMASK);
/*
* fall back to asynchronous transfer mode
* initialize SDTR negotiation flag
*/
for (i = 0; i < ARRAY_SIZE(data->target); i++) {
nsp32_target *target = &data->target[i];
target->sync_flag = 0;
nsp32_set_async(data, target);
}
/*
* reset SCSI bus
*/
nsp32_write1(base, SCSI_BUS_CONTROL, BUSCTL_RST);
mdelay(RESET_HOLD_TIME / 1000);
nsp32_write1(base, SCSI_BUS_CONTROL, 0);
for(i = 0; i < 5; i++) {
intrdat = nsp32_read2(base, IRQ_STATUS); /* dummy read */
nsp32_dbg(NSP32_DEBUG_BUSRESET, "irq:1: 0x%x", intrdat);
}
data->CurrentSC = NULL;
}
static int nsp32_eh_host_reset(struct scsi_cmnd *SCpnt)
{
struct Scsi_Host *host = SCpnt->device->host;
unsigned int base = SCpnt->device->host->io_port;
nsp32_hw_data *data = (nsp32_hw_data *)host->hostdata;
nsp32_msg(KERN_INFO, "Host Reset");
nsp32_dbg(NSP32_DEBUG_BUSRESET, "SCpnt=0x%x", SCpnt);
spin_lock_irq(SCpnt->device->host->host_lock);
nsp32hw_init(data);
nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK);
nsp32_do_bus_reset(data);
nsp32_write2(base, IRQ_CONTROL, 0);
spin_unlock_irq(SCpnt->device->host->host_lock);
return SUCCESS; /* Host reset is succeeded at any time. */
}
/**************************************************************************
* EEPROM handler
*/
/*
* getting EEPROM parameter
*/
static int nsp32_getprom_param(nsp32_hw_data *data)
{
int vendor = data->pci_devid->vendor;
int device = data->pci_devid->device;
int ret, val, i;
/*
* EEPROM checking.
*/
ret = nsp32_prom_read(data, 0x7e);
if (ret != 0x55) {
nsp32_msg(KERN_INFO, "No EEPROM detected: 0x%x", ret);
return FALSE;
}
ret = nsp32_prom_read(data, 0x7f);
if (ret != 0xaa) {
nsp32_msg(KERN_INFO, "Invalid number: 0x%x", ret);
return FALSE;
}
/*
* check EEPROM type
*/
if (vendor == PCI_VENDOR_ID_WORKBIT &&
device == PCI_DEVICE_ID_WORKBIT_STANDARD) {
ret = nsp32_getprom_c16(data);
} else if (vendor == PCI_VENDOR_ID_WORKBIT &&
device == PCI_DEVICE_ID_NINJASCSI_32BIB_LOGITEC) {
ret = nsp32_getprom_at24(data);
} else if (vendor == PCI_VENDOR_ID_WORKBIT &&
device == PCI_DEVICE_ID_NINJASCSI_32UDE_MELCO ) {
ret = nsp32_getprom_at24(data);
} else {
nsp32_msg(KERN_WARNING, "Unknown EEPROM");
ret = FALSE;
}
/* for debug : SPROM data full checking */
for (i = 0; i <= 0x1f; i++) {
val = nsp32_prom_read(data, i);
nsp32_dbg(NSP32_DEBUG_EEPROM,
"rom address 0x%x : 0x%x", i, val);
}
return ret;
}
/*
* AT24C01A (Logitec: LHA-600S), AT24C02 (Melco Buffalo: IFC-USLP) data map:
*
* ROMADDR
* 0x00 - 0x06 : Device Synchronous Transfer Period (SCSI ID 0 - 6)
* Value 0x0: ASYNC, 0x0c: Ultra-20M, 0x19: Fast-10M
* 0x07 : HBA Synchronous Transfer Period
* Value 0: AutoSync, 1: Manual Setting
* 0x08 - 0x0f : Not Used? (0x0)
* 0x10 : Bus Termination
* Value 0: Auto[ON], 1: ON, 2: OFF
* 0x11 : Not Used? (0)
* 0x12 : Bus Reset Delay Time (0x03)
* 0x13 : Bootable CD Support
* Value 0: Disable, 1: Enable
* 0x14 : Device Scan
* Bit 7 6 5 4 3 2 1 0
* | <----------------->
* | SCSI ID: Value 0: Skip, 1: YES
* |-> Value 0: ALL scan, Value 1: Manual
* 0x15 - 0x1b : Not Used? (0)
* 0x1c : Constant? (0x01) (clock div?)
* 0x1d - 0x7c : Not Used (0xff)
* 0x7d : Not Used? (0xff)
* 0x7e : Constant (0x55), Validity signature
* 0x7f : Constant (0xaa), Validity signature
*/
static int nsp32_getprom_at24(nsp32_hw_data *data)
{
int ret, i;
int auto_sync;
nsp32_target *target;
int entry;
/*
* Reset time which is designated by EEPROM.
*
* TODO: Not used yet.
*/
data->resettime = nsp32_prom_read(data, 0x12);
/*
* HBA Synchronous Transfer Period
*
* Note: auto_sync = 0: auto, 1: manual. Ninja SCSI HBA spec says
* that if auto_sync is 0 (auto), and connected SCSI devices are
* same or lower than 3, then transfer speed is set as ULTRA-20M.
* On the contrary if connected SCSI devices are same or higher
* than 4, then transfer speed is set as FAST-10M.
*
* I break this rule. The number of connected SCSI devices are
* only ignored. If auto_sync is 0 (auto), then transfer speed is
* forced as ULTRA-20M.
*/
ret = nsp32_prom_read(data, 0x07);
switch (ret) {
case 0:
auto_sync = TRUE;
break;
case 1:
auto_sync = FALSE;
break;
default:
nsp32_msg(KERN_WARNING,
"Unsupported Auto Sync mode. Fall back to manual mode.");
auto_sync = TRUE;
}
if (trans_mode == ULTRA20M_MODE) {
auto_sync = TRUE;
}
/*
* each device Synchronous Transfer Period
*/
for (i = 0; i < NSP32_HOST_SCSIID; i++) {
target = &data->target[i];
if (auto_sync == TRUE) {
target->limit_entry = 0; /* set as ULTRA20M */
} else {
ret = nsp32_prom_read(data, i);
entry = nsp32_search_period_entry(data, target, ret);
if (entry < 0) {
/* search failed... set maximum speed */
entry = 0;
}
target->limit_entry = entry;
}
}
return TRUE;
}
/*
* C16 110 (I-O Data: SC-NBD) data map:
*
* ROMADDR
* 0x00 - 0x06 : Device Synchronous Transfer Period (SCSI ID 0 - 6)
* Value 0x0: 20MB/S, 0x1: 10MB/S, 0x2: 5MB/S, 0x3: ASYNC
* 0x07 : 0 (HBA Synchronous Transfer Period: Auto Sync)
* 0x08 - 0x0f : Not Used? (0x0)
* 0x10 : Transfer Mode
* Value 0: PIO, 1: Busmater
* 0x11 : Bus Reset Delay Time (0x00-0x20)
* 0x12 : Bus Termination
* Value 0: Disable, 1: Enable
* 0x13 - 0x19 : Disconnection
* Value 0: Disable, 1: Enable
* 0x1a - 0x7c : Not Used? (0)
* 0x7d : Not Used? (0xf8)
* 0x7e : Constant (0x55), Validity signature
* 0x7f : Constant (0xaa), Validity signature
*/
static int nsp32_getprom_c16(nsp32_hw_data *data)
{
int ret, i;
nsp32_target *target;
int entry, val;
/*
* Reset time which is designated by EEPROM.
*
* TODO: Not used yet.
*/
data->resettime = nsp32_prom_read(data, 0x11);
/*
* each device Synchronous Transfer Period
*/
for (i = 0; i < NSP32_HOST_SCSIID; i++) {
target = &data->target[i];
ret = nsp32_prom_read(data, i);
switch (ret) {
case 0: /* 20MB/s */
val = 0x0c;
break;
case 1: /* 10MB/s */
val = 0x19;
break;
case 2: /* 5MB/s */
val = 0x32;
break;
case 3: /* ASYNC */
val = 0x00;
break;
default: /* default 20MB/s */
val = 0x0c;
break;
}
entry = nsp32_search_period_entry(data, target, val);
if (entry < 0 || trans_mode == ULTRA20M_MODE) {
/* search failed... set maximum speed */
entry = 0;
}
target->limit_entry = entry;
}
return TRUE;
}
/*
* Atmel AT24C01A (drived in 5V) serial EEPROM routines
*/
static int nsp32_prom_read(nsp32_hw_data *data, int romaddr)
{
int i, val;
/* start condition */
nsp32_prom_start(data);
/* device address */
nsp32_prom_write_bit(data, 1); /* 1 */
nsp32_prom_write_bit(data, 0); /* 0 */
nsp32_prom_write_bit(data, 1); /* 1 */
nsp32_prom_write_bit(data, 0); /* 0 */
nsp32_prom_write_bit(data, 0); /* A2: 0 (GND) */
nsp32_prom_write_bit(data, 0); /* A1: 0 (GND) */
nsp32_prom_write_bit(data, 0); /* A0: 0 (GND) */
/* R/W: W for dummy write */
nsp32_prom_write_bit(data, 0);
/* ack */
nsp32_prom_write_bit(data, 0);
/* word address */
for (i = 7; i >= 0; i--) {
nsp32_prom_write_bit(data, ((romaddr >> i) & 1));
}
/* ack */
nsp32_prom_write_bit(data, 0);
/* start condition */
nsp32_prom_start(data);
/* device address */
nsp32_prom_write_bit(data, 1); /* 1 */
nsp32_prom_write_bit(data, 0); /* 0 */
nsp32_prom_write_bit(data, 1); /* 1 */
nsp32_prom_write_bit(data, 0); /* 0 */
nsp32_prom_write_bit(data, 0); /* A2: 0 (GND) */
nsp32_prom_write_bit(data, 0); /* A1: 0 (GND) */
nsp32_prom_write_bit(data, 0); /* A0: 0 (GND) */
/* R/W: R */
nsp32_prom_write_bit(data, 1);
/* ack */
nsp32_prom_write_bit(data, 0);
/* data... */
val = 0;
for (i = 7; i >= 0; i--) {
val += (nsp32_prom_read_bit(data) << i);
}
/* no ack */
nsp32_prom_write_bit(data, 1);
/* stop condition */
nsp32_prom_stop(data);
return val;
}
static void nsp32_prom_set(nsp32_hw_data *data, int bit, int val)
{
int base = data->BaseAddress;
int tmp;
tmp = nsp32_index_read1(base, SERIAL_ROM_CTL);
if (val == 0) {
tmp &= ~bit;
} else {
tmp |= bit;
}
nsp32_index_write1(base, SERIAL_ROM_CTL, tmp);
udelay(10);
}
static int nsp32_prom_get(nsp32_hw_data *data, int bit)
{
int base = data->BaseAddress;
int tmp, ret;
if (bit != SDA) {
nsp32_msg(KERN_ERR, "return value is not appropriate");
return 0;
}
tmp = nsp32_index_read1(base, SERIAL_ROM_CTL) & bit;
if (tmp == 0) {
ret = 0;
} else {
ret = 1;
}
udelay(10);
return ret;
}
static void nsp32_prom_start (nsp32_hw_data *data)
{
/* start condition */
nsp32_prom_set(data, SCL, 1);
nsp32_prom_set(data, SDA, 1);
nsp32_prom_set(data, ENA, 1); /* output mode */
nsp32_prom_set(data, SDA, 0); /* keeping SCL=1 and transiting
* SDA 1->0 is start condition */
nsp32_prom_set(data, SCL, 0);
}
static void nsp32_prom_stop (nsp32_hw_data *data)
{
/* stop condition */
nsp32_prom_set(data, SCL, 1);
nsp32_prom_set(data, SDA, 0);
nsp32_prom_set(data, ENA, 1); /* output mode */
nsp32_prom_set(data, SDA, 1);
nsp32_prom_set(data, SCL, 0);
}
static void nsp32_prom_write_bit(nsp32_hw_data *data, int val)
{
/* write */
nsp32_prom_set(data, SDA, val);
nsp32_prom_set(data, SCL, 1 );
nsp32_prom_set(data, SCL, 0 );
}
static int nsp32_prom_read_bit(nsp32_hw_data *data)
{
int val;
/* read */
nsp32_prom_set(data, ENA, 0); /* input mode */
nsp32_prom_set(data, SCL, 1);
val = nsp32_prom_get(data, SDA);
nsp32_prom_set(data, SCL, 0);
nsp32_prom_set(data, ENA, 1); /* output mode */
return val;
}
/**************************************************************************
* Power Management
*/
#ifdef CONFIG_PM
/* Device suspended */
static int nsp32_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct Scsi_Host *host = pci_get_drvdata(pdev);
nsp32_msg(KERN_INFO, "pci-suspend: pdev=0x%p, state=%ld, slot=%s, host=0x%p", pdev, state, pci_name(pdev), host);
pci_save_state (pdev);
pci_disable_device (pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
/* Device woken up */
static int nsp32_resume(struct pci_dev *pdev)
{
struct Scsi_Host *host = pci_get_drvdata(pdev);
nsp32_hw_data *data = (nsp32_hw_data *)host->hostdata;
unsigned short reg;
nsp32_msg(KERN_INFO, "pci-resume: pdev=0x%p, slot=%s, host=0x%p", pdev, pci_name(pdev), host);
pci_set_power_state(pdev, PCI_D0);
pci_enable_wake (pdev, PCI_D0, 0);
pci_restore_state (pdev);
reg = nsp32_read2(data->BaseAddress, INDEX_REG);
nsp32_msg(KERN_INFO, "io=0x%x reg=0x%x", data->BaseAddress, reg);
if (reg == 0xffff) {
nsp32_msg(KERN_INFO, "missing device. abort resume.");
return 0;
}
nsp32hw_init (data);
nsp32_do_bus_reset(data);
nsp32_msg(KERN_INFO, "resume success");
return 0;
}
#endif
/************************************************************************
* PCI/Cardbus probe/remove routine
*/
static int nsp32_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int ret;
nsp32_hw_data *data = &nsp32_data_base;
nsp32_dbg(NSP32_DEBUG_REGISTER, "enter");
ret = pci_enable_device(pdev);
if (ret) {
nsp32_msg(KERN_ERR, "failed to enable pci device");
return ret;
}
data->Pci = pdev;
data->pci_devid = id;
data->IrqNumber = pdev->irq;
data->BaseAddress = pci_resource_start(pdev, 0);
data->NumAddress = pci_resource_len (pdev, 0);
data->MmioAddress = pci_ioremap_bar(pdev, 1);
data->MmioLength = pci_resource_len (pdev, 1);
pci_set_master(pdev);
ret = nsp32_detect(pdev);
nsp32_msg(KERN_INFO, "irq: %i mmio: %p+0x%lx slot: %s model: %s",
pdev->irq,
data->MmioAddress, data->MmioLength,
pci_name(pdev),
nsp32_model[id->driver_data]);
nsp32_dbg(NSP32_DEBUG_REGISTER, "exit %d", ret);
return ret;
}
static void nsp32_remove(struct pci_dev *pdev)
{
struct Scsi_Host *host = pci_get_drvdata(pdev);
nsp32_dbg(NSP32_DEBUG_REGISTER, "enter");
scsi_remove_host(host);
nsp32_release(host);
scsi_host_put(host);
}
static struct pci_driver nsp32_driver = {
.name = "nsp32",
.id_table = nsp32_pci_table,
.probe = nsp32_probe,
.remove = nsp32_remove,
#ifdef CONFIG_PM
.suspend = nsp32_suspend,
.resume = nsp32_resume,
#endif
};
/*********************************************************************
* Moule entry point
*/
static int __init init_nsp32(void) {
nsp32_msg(KERN_INFO, "loading...");
return pci_register_driver(&nsp32_driver);
}
static void __exit exit_nsp32(void) {
nsp32_msg(KERN_INFO, "unloading...");
pci_unregister_driver(&nsp32_driver);
}
module_init(init_nsp32);
module_exit(exit_nsp32);
/* end */
| gpl-2.0 |
ISTweak/android_kernel_sharp_is15sh | drivers/media/video/s5p-fimc/mipi-csis.c | 2784 | 18239 | /*
* Samsung S5P/EXYNOS4 SoC series MIPI-CSI receiver driver
*
* Copyright (C) 2011 Samsung Electronics Co., Ltd.
* Contact: Sylwester Nawrocki, <s.nawrocki@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/memory.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/videodev2.h>
#include <media/v4l2-subdev.h>
#include <plat/mipi_csis.h>
#include "mipi-csis.h"
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Debug level (0-1)");
/* Register map definition */
/* CSIS global control */
#define S5PCSIS_CTRL 0x00
#define S5PCSIS_CTRL_DPDN_DEFAULT (0 << 31)
#define S5PCSIS_CTRL_DPDN_SWAP (1 << 31)
#define S5PCSIS_CTRL_ALIGN_32BIT (1 << 20)
#define S5PCSIS_CTRL_UPDATE_SHADOW (1 << 16)
#define S5PCSIS_CTRL_WCLK_EXTCLK (1 << 8)
#define S5PCSIS_CTRL_RESET (1 << 4)
#define S5PCSIS_CTRL_ENABLE (1 << 0)
/* D-PHY control */
#define S5PCSIS_DPHYCTRL 0x04
#define S5PCSIS_DPHYCTRL_HSS_MASK (0x1f << 27)
#define S5PCSIS_DPHYCTRL_ENABLE (0x1f << 0)
#define S5PCSIS_CONFIG 0x08
#define S5PCSIS_CFG_FMT_YCBCR422_8BIT (0x1e << 2)
#define S5PCSIS_CFG_FMT_RAW8 (0x2a << 2)
#define S5PCSIS_CFG_FMT_RAW10 (0x2b << 2)
#define S5PCSIS_CFG_FMT_RAW12 (0x2c << 2)
/* User defined formats, x = 1...4 */
#define S5PCSIS_CFG_FMT_USER(x) ((0x30 + x - 1) << 2)
#define S5PCSIS_CFG_FMT_MASK (0x3f << 2)
#define S5PCSIS_CFG_NR_LANE_MASK 3
/* Interrupt mask. */
#define S5PCSIS_INTMSK 0x10
#define S5PCSIS_INTMSK_EN_ALL 0xf000003f
#define S5PCSIS_INTSRC 0x14
/* Pixel resolution */
#define S5PCSIS_RESOL 0x2c
#define CSIS_MAX_PIX_WIDTH 0xffff
#define CSIS_MAX_PIX_HEIGHT 0xffff
enum {
CSIS_CLK_MUX,
CSIS_CLK_GATE,
};
static char *csi_clock_name[] = {
[CSIS_CLK_MUX] = "sclk_csis",
[CSIS_CLK_GATE] = "csis",
};
#define NUM_CSIS_CLOCKS ARRAY_SIZE(csi_clock_name)
enum {
ST_POWERED = 1,
ST_STREAMING = 2,
ST_SUSPENDED = 4,
};
/**
* struct csis_state - the driver's internal state data structure
* @lock: mutex serializing the subdev and power management operations,
* protecting @format and @flags members
* @pads: CSIS pads array
* @sd: v4l2_subdev associated with CSIS device instance
* @pdev: CSIS platform device
* @regs_res: requested I/O register memory resource
* @regs: mmaped I/O registers memory
* @clock: CSIS clocks
* @irq: requested s5p-mipi-csis irq number
* @flags: the state variable for power and streaming control
* @csis_fmt: current CSIS pixel format
* @format: common media bus format for the source and sink pad
*/
struct csis_state {
struct mutex lock;
struct media_pad pads[CSIS_PADS_NUM];
struct v4l2_subdev sd;
struct platform_device *pdev;
struct resource *regs_res;
void __iomem *regs;
struct clk *clock[NUM_CSIS_CLOCKS];
int irq;
struct regulator *supply;
u32 flags;
const struct csis_pix_format *csis_fmt;
struct v4l2_mbus_framefmt format;
};
/**
* struct csis_pix_format - CSIS pixel format description
* @pix_width_alignment: horizontal pixel alignment, width will be
* multiple of 2^pix_width_alignment
* @code: corresponding media bus code
* @fmt_reg: S5PCSIS_CONFIG register value
*/
struct csis_pix_format {
unsigned int pix_width_alignment;
enum v4l2_mbus_pixelcode code;
u32 fmt_reg;
};
static const struct csis_pix_format s5pcsis_formats[] = {
{
.code = V4L2_MBUS_FMT_VYUY8_2X8,
.fmt_reg = S5PCSIS_CFG_FMT_YCBCR422_8BIT,
}, {
.code = V4L2_MBUS_FMT_JPEG_1X8,
.fmt_reg = S5PCSIS_CFG_FMT_USER(1),
},
};
#define s5pcsis_write(__csis, __r, __v) writel(__v, __csis->regs + __r)
#define s5pcsis_read(__csis, __r) readl(__csis->regs + __r)
static struct csis_state *sd_to_csis_state(struct v4l2_subdev *sdev)
{
return container_of(sdev, struct csis_state, sd);
}
static const struct csis_pix_format *find_csis_format(
struct v4l2_mbus_framefmt *mf)
{
int i;
for (i = 0; i < ARRAY_SIZE(s5pcsis_formats); i++)
if (mf->code == s5pcsis_formats[i].code)
return &s5pcsis_formats[i];
return NULL;
}
static void s5pcsis_enable_interrupts(struct csis_state *state, bool on)
{
u32 val = s5pcsis_read(state, S5PCSIS_INTMSK);
val = on ? val | S5PCSIS_INTMSK_EN_ALL :
val & ~S5PCSIS_INTMSK_EN_ALL;
s5pcsis_write(state, S5PCSIS_INTMSK, val);
}
static void s5pcsis_reset(struct csis_state *state)
{
u32 val = s5pcsis_read(state, S5PCSIS_CTRL);
s5pcsis_write(state, S5PCSIS_CTRL, val | S5PCSIS_CTRL_RESET);
udelay(10);
}
static void s5pcsis_system_enable(struct csis_state *state, int on)
{
u32 val;
val = s5pcsis_read(state, S5PCSIS_CTRL);
if (on)
val |= S5PCSIS_CTRL_ENABLE;
else
val &= ~S5PCSIS_CTRL_ENABLE;
s5pcsis_write(state, S5PCSIS_CTRL, val);
val = s5pcsis_read(state, S5PCSIS_DPHYCTRL);
if (on)
val |= S5PCSIS_DPHYCTRL_ENABLE;
else
val &= ~S5PCSIS_DPHYCTRL_ENABLE;
s5pcsis_write(state, S5PCSIS_DPHYCTRL, val);
}
/* Called with the state.lock mutex held */
static void __s5pcsis_set_format(struct csis_state *state)
{
struct v4l2_mbus_framefmt *mf = &state->format;
u32 val;
v4l2_dbg(1, debug, &state->sd, "fmt: %d, %d x %d\n",
mf->code, mf->width, mf->height);
/* Color format */
val = s5pcsis_read(state, S5PCSIS_CONFIG);
val = (val & ~S5PCSIS_CFG_FMT_MASK) | state->csis_fmt->fmt_reg;
s5pcsis_write(state, S5PCSIS_CONFIG, val);
/* Pixel resolution */
val = (mf->width << 16) | mf->height;
s5pcsis_write(state, S5PCSIS_RESOL, val);
}
static void s5pcsis_set_hsync_settle(struct csis_state *state, int settle)
{
u32 val = s5pcsis_read(state, S5PCSIS_DPHYCTRL);
val = (val & ~S5PCSIS_DPHYCTRL_HSS_MASK) | (settle << 27);
s5pcsis_write(state, S5PCSIS_DPHYCTRL, val);
}
static void s5pcsis_set_params(struct csis_state *state)
{
struct s5p_platform_mipi_csis *pdata = state->pdev->dev.platform_data;
u32 val;
val = s5pcsis_read(state, S5PCSIS_CONFIG);
val = (val & ~S5PCSIS_CFG_NR_LANE_MASK) | (pdata->lanes - 1);
s5pcsis_write(state, S5PCSIS_CONFIG, val);
__s5pcsis_set_format(state);
s5pcsis_set_hsync_settle(state, pdata->hs_settle);
val = s5pcsis_read(state, S5PCSIS_CTRL);
if (pdata->alignment == 32)
val |= S5PCSIS_CTRL_ALIGN_32BIT;
else /* 24-bits */
val &= ~S5PCSIS_CTRL_ALIGN_32BIT;
/* Not using external clock. */
val &= ~S5PCSIS_CTRL_WCLK_EXTCLK;
s5pcsis_write(state, S5PCSIS_CTRL, val);
/* Update the shadow register. */
val = s5pcsis_read(state, S5PCSIS_CTRL);
s5pcsis_write(state, S5PCSIS_CTRL, val | S5PCSIS_CTRL_UPDATE_SHADOW);
}
static void s5pcsis_clk_put(struct csis_state *state)
{
int i;
for (i = 0; i < NUM_CSIS_CLOCKS; i++)
if (!IS_ERR_OR_NULL(state->clock[i]))
clk_put(state->clock[i]);
}
static int s5pcsis_clk_get(struct csis_state *state)
{
struct device *dev = &state->pdev->dev;
int i;
for (i = 0; i < NUM_CSIS_CLOCKS; i++) {
state->clock[i] = clk_get(dev, csi_clock_name[i]);
if (IS_ERR(state->clock[i])) {
s5pcsis_clk_put(state);
dev_err(dev, "failed to get clock: %s\n",
csi_clock_name[i]);
return -ENXIO;
}
}
return 0;
}
static int s5pcsis_s_power(struct v4l2_subdev *sd, int on)
{
struct csis_state *state = sd_to_csis_state(sd);
struct device *dev = &state->pdev->dev;
if (on)
return pm_runtime_get_sync(dev);
return pm_runtime_put_sync(dev);
}
static void s5pcsis_start_stream(struct csis_state *state)
{
s5pcsis_reset(state);
s5pcsis_set_params(state);
s5pcsis_system_enable(state, true);
s5pcsis_enable_interrupts(state, true);
}
static void s5pcsis_stop_stream(struct csis_state *state)
{
s5pcsis_enable_interrupts(state, false);
s5pcsis_system_enable(state, false);
}
/* v4l2_subdev operations */
static int s5pcsis_s_stream(struct v4l2_subdev *sd, int enable)
{
struct csis_state *state = sd_to_csis_state(sd);
int ret = 0;
v4l2_dbg(1, debug, sd, "%s: %d, state: 0x%x\n",
__func__, enable, state->flags);
if (enable) {
ret = pm_runtime_get_sync(&state->pdev->dev);
if (ret && ret != 1)
return ret;
}
mutex_lock(&state->lock);
if (enable) {
if (state->flags & ST_SUSPENDED) {
ret = -EBUSY;
goto unlock;
}
s5pcsis_start_stream(state);
state->flags |= ST_STREAMING;
} else {
s5pcsis_stop_stream(state);
state->flags &= ~ST_STREAMING;
}
unlock:
mutex_unlock(&state->lock);
if (!enable)
pm_runtime_put(&state->pdev->dev);
return ret == 1 ? 0 : ret;
}
static int s5pcsis_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_fh *fh,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= ARRAY_SIZE(s5pcsis_formats))
return -EINVAL;
code->code = s5pcsis_formats[code->index].code;
return 0;
}
static struct csis_pix_format const *s5pcsis_try_format(
struct v4l2_mbus_framefmt *mf)
{
struct csis_pix_format const *csis_fmt;
csis_fmt = find_csis_format(mf);
if (csis_fmt == NULL)
csis_fmt = &s5pcsis_formats[0];
mf->code = csis_fmt->code;
v4l_bound_align_image(&mf->width, 1, CSIS_MAX_PIX_WIDTH,
csis_fmt->pix_width_alignment,
&mf->height, 1, CSIS_MAX_PIX_HEIGHT, 1,
0);
return csis_fmt;
}
static struct v4l2_mbus_framefmt *__s5pcsis_get_format(
struct csis_state *state, struct v4l2_subdev_fh *fh,
u32 pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
return fh ? v4l2_subdev_get_try_format(fh, pad) : NULL;
return &state->format;
}
static int s5pcsis_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
struct v4l2_subdev_format *fmt)
{
struct csis_state *state = sd_to_csis_state(sd);
struct csis_pix_format const *csis_fmt;
struct v4l2_mbus_framefmt *mf;
if (fmt->pad != CSIS_PAD_SOURCE && fmt->pad != CSIS_PAD_SINK)
return -EINVAL;
mf = __s5pcsis_get_format(state, fh, fmt->pad, fmt->which);
if (fmt->pad == CSIS_PAD_SOURCE) {
if (mf) {
mutex_lock(&state->lock);
fmt->format = *mf;
mutex_unlock(&state->lock);
}
return 0;
}
csis_fmt = s5pcsis_try_format(&fmt->format);
if (mf) {
mutex_lock(&state->lock);
*mf = fmt->format;
if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
state->csis_fmt = csis_fmt;
mutex_unlock(&state->lock);
}
return 0;
}
static int s5pcsis_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
struct v4l2_subdev_format *fmt)
{
struct csis_state *state = sd_to_csis_state(sd);
struct v4l2_mbus_framefmt *mf;
if (fmt->pad != CSIS_PAD_SOURCE && fmt->pad != CSIS_PAD_SINK)
return -EINVAL;
mf = __s5pcsis_get_format(state, fh, fmt->pad, fmt->which);
if (!mf)
return -EINVAL;
mutex_lock(&state->lock);
fmt->format = *mf;
mutex_unlock(&state->lock);
return 0;
}
static struct v4l2_subdev_core_ops s5pcsis_core_ops = {
.s_power = s5pcsis_s_power,
};
static struct v4l2_subdev_pad_ops s5pcsis_pad_ops = {
.enum_mbus_code = s5pcsis_enum_mbus_code,
.get_fmt = s5pcsis_get_fmt,
.set_fmt = s5pcsis_set_fmt,
};
static struct v4l2_subdev_video_ops s5pcsis_video_ops = {
.s_stream = s5pcsis_s_stream,
};
static struct v4l2_subdev_ops s5pcsis_subdev_ops = {
.core = &s5pcsis_core_ops,
.pad = &s5pcsis_pad_ops,
.video = &s5pcsis_video_ops,
};
static irqreturn_t s5pcsis_irq_handler(int irq, void *dev_id)
{
struct csis_state *state = dev_id;
u32 val;
/* Just clear the interrupt pending bits. */
val = s5pcsis_read(state, S5PCSIS_INTSRC);
s5pcsis_write(state, S5PCSIS_INTSRC, val);
return IRQ_HANDLED;
}
static int __devinit s5pcsis_probe(struct platform_device *pdev)
{
struct s5p_platform_mipi_csis *pdata;
struct resource *mem_res;
struct resource *regs_res;
struct csis_state *state;
int ret = -ENOMEM;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return -ENOMEM;
mutex_init(&state->lock);
state->pdev = pdev;
pdata = pdev->dev.platform_data;
if (pdata == NULL || pdata->phy_enable == NULL) {
dev_err(&pdev->dev, "Platform data not fully specified\n");
goto e_free;
}
if ((pdev->id == 1 && pdata->lanes > CSIS1_MAX_LANES) ||
pdata->lanes > CSIS0_MAX_LANES) {
ret = -EINVAL;
dev_err(&pdev->dev, "Unsupported number of data lanes: %d\n",
pdata->lanes);
goto e_free;
}
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem_res) {
dev_err(&pdev->dev, "Failed to get IO memory region\n");
goto e_free;
}
regs_res = request_mem_region(mem_res->start, resource_size(mem_res),
pdev->name);
if (!regs_res) {
dev_err(&pdev->dev, "Failed to request IO memory region\n");
goto e_free;
}
state->regs_res = regs_res;
state->regs = ioremap(mem_res->start, resource_size(mem_res));
if (!state->regs) {
dev_err(&pdev->dev, "Failed to remap IO region\n");
goto e_reqmem;
}
ret = s5pcsis_clk_get(state);
if (ret)
goto e_unmap;
clk_enable(state->clock[CSIS_CLK_MUX]);
if (pdata->clk_rate)
clk_set_rate(state->clock[CSIS_CLK_MUX], pdata->clk_rate);
else
dev_WARN(&pdev->dev, "No clock frequency specified!\n");
state->irq = platform_get_irq(pdev, 0);
if (state->irq < 0) {
ret = state->irq;
dev_err(&pdev->dev, "Failed to get irq\n");
goto e_clkput;
}
if (!pdata->fixed_phy_vdd) {
state->supply = regulator_get(&pdev->dev, "vdd");
if (IS_ERR(state->supply)) {
ret = PTR_ERR(state->supply);
state->supply = NULL;
goto e_clkput;
}
}
ret = request_irq(state->irq, s5pcsis_irq_handler, 0,
dev_name(&pdev->dev), state);
if (ret) {
dev_err(&pdev->dev, "request_irq failed\n");
goto e_regput;
}
v4l2_subdev_init(&state->sd, &s5pcsis_subdev_ops);
state->sd.owner = THIS_MODULE;
strlcpy(state->sd.name, dev_name(&pdev->dev), sizeof(state->sd.name));
state->csis_fmt = &s5pcsis_formats[0];
state->pads[CSIS_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
state->pads[CSIS_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_init(&state->sd.entity,
CSIS_PADS_NUM, state->pads, 0);
if (ret < 0)
goto e_irqfree;
/* This allows to retrieve the platform device id by the host driver */
v4l2_set_subdevdata(&state->sd, pdev);
/* .. and a pointer to the subdev. */
platform_set_drvdata(pdev, &state->sd);
state->flags = ST_SUSPENDED;
pm_runtime_enable(&pdev->dev);
return 0;
e_irqfree:
free_irq(state->irq, state);
e_regput:
if (state->supply)
regulator_put(state->supply);
e_clkput:
clk_disable(state->clock[CSIS_CLK_MUX]);
s5pcsis_clk_put(state);
e_unmap:
iounmap(state->regs);
e_reqmem:
release_mem_region(regs_res->start, resource_size(regs_res));
e_free:
kfree(state);
return ret;
}
static int s5pcsis_suspend(struct device *dev)
{
struct s5p_platform_mipi_csis *pdata = dev->platform_data;
struct platform_device *pdev = to_platform_device(dev);
struct v4l2_subdev *sd = platform_get_drvdata(pdev);
struct csis_state *state = sd_to_csis_state(sd);
int ret = 0;
v4l2_dbg(1, debug, sd, "%s: flags: 0x%x\n",
__func__, state->flags);
mutex_lock(&state->lock);
if (state->flags & ST_POWERED) {
s5pcsis_stop_stream(state);
ret = pdata->phy_enable(state->pdev, false);
if (ret)
goto unlock;
if (state->supply) {
ret = regulator_disable(state->supply);
if (ret)
goto unlock;
}
clk_disable(state->clock[CSIS_CLK_GATE]);
state->flags &= ~ST_POWERED;
}
state->flags |= ST_SUSPENDED;
unlock:
mutex_unlock(&state->lock);
return ret ? -EAGAIN : 0;
}
static int s5pcsis_resume(struct device *dev)
{
struct s5p_platform_mipi_csis *pdata = dev->platform_data;
struct platform_device *pdev = to_platform_device(dev);
struct v4l2_subdev *sd = platform_get_drvdata(pdev);
struct csis_state *state = sd_to_csis_state(sd);
int ret = 0;
v4l2_dbg(1, debug, sd, "%s: flags: 0x%x\n",
__func__, state->flags);
mutex_lock(&state->lock);
if (!(state->flags & ST_SUSPENDED))
goto unlock;
if (!(state->flags & ST_POWERED)) {
if (state->supply)
ret = regulator_enable(state->supply);
if (ret)
goto unlock;
ret = pdata->phy_enable(state->pdev, true);
if (!ret) {
state->flags |= ST_POWERED;
} else if (state->supply) {
regulator_disable(state->supply);
goto unlock;
}
clk_enable(state->clock[CSIS_CLK_GATE]);
}
if (state->flags & ST_STREAMING)
s5pcsis_start_stream(state);
state->flags &= ~ST_SUSPENDED;
unlock:
mutex_unlock(&state->lock);
return ret ? -EAGAIN : 0;
}
#ifdef CONFIG_PM_SLEEP
static int s5pcsis_pm_suspend(struct device *dev)
{
return s5pcsis_suspend(dev);
}
static int s5pcsis_pm_resume(struct device *dev)
{
int ret;
ret = s5pcsis_resume(dev);
if (!ret) {
pm_runtime_disable(dev);
ret = pm_runtime_set_active(dev);
pm_runtime_enable(dev);
}
return ret;
}
#endif
static int __devexit s5pcsis_remove(struct platform_device *pdev)
{
struct v4l2_subdev *sd = platform_get_drvdata(pdev);
struct csis_state *state = sd_to_csis_state(sd);
struct resource *res = state->regs_res;
pm_runtime_disable(&pdev->dev);
s5pcsis_suspend(&pdev->dev);
clk_disable(state->clock[CSIS_CLK_MUX]);
pm_runtime_set_suspended(&pdev->dev);
s5pcsis_clk_put(state);
if (state->supply)
regulator_put(state->supply);
media_entity_cleanup(&state->sd.entity);
free_irq(state->irq, state);
iounmap(state->regs);
release_mem_region(res->start, resource_size(res));
kfree(state);
return 0;
}
static const struct dev_pm_ops s5pcsis_pm_ops = {
SET_RUNTIME_PM_OPS(s5pcsis_suspend, s5pcsis_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(s5pcsis_pm_suspend, s5pcsis_pm_resume)
};
static struct platform_driver s5pcsis_driver = {
.probe = s5pcsis_probe,
.remove = __devexit_p(s5pcsis_remove),
.driver = {
.name = CSIS_DRIVER_NAME,
.owner = THIS_MODULE,
.pm = &s5pcsis_pm_ops,
},
};
static int __init s5pcsis_init(void)
{
return platform_driver_probe(&s5pcsis_driver, s5pcsis_probe);
}
static void __exit s5pcsis_exit(void)
{
platform_driver_unregister(&s5pcsis_driver);
}
module_init(s5pcsis_init);
module_exit(s5pcsis_exit);
MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
MODULE_DESCRIPTION("S5P/EXYNOS4 MIPI CSI receiver driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
hastalafiesta/Samsung_STE_Kernel | drivers/media/dvb/pt1/pt1.c | 3040 | 24980 | /*
* driver for Earthsoft PT1/PT2
*
* Copyright (C) 2009 HIRANO Takahito <hiranotaka@zng.info>
*
* based on pt1dvr - http://pt1dvr.sourceforge.jp/
* by Tomoaki Ishikawa <tomy@users.sourceforge.jp>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/pci.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
#include "dvbdev.h"
#include "dvb_demux.h"
#include "dmxdev.h"
#include "dvb_net.h"
#include "dvb_frontend.h"
#include "va1j5jf8007t.h"
#include "va1j5jf8007s.h"
#define DRIVER_NAME "earth-pt1"
#define PT1_PAGE_SHIFT 12
#define PT1_PAGE_SIZE (1 << PT1_PAGE_SHIFT)
#define PT1_NR_UPACKETS 1024
#define PT1_NR_BUFS 511
struct pt1_buffer_page {
__le32 upackets[PT1_NR_UPACKETS];
};
struct pt1_table_page {
__le32 next_pfn;
__le32 buf_pfns[PT1_NR_BUFS];
};
struct pt1_buffer {
struct pt1_buffer_page *page;
dma_addr_t addr;
};
struct pt1_table {
struct pt1_table_page *page;
dma_addr_t addr;
struct pt1_buffer bufs[PT1_NR_BUFS];
};
#define PT1_NR_ADAPS 4
struct pt1_adapter;
struct pt1 {
struct pci_dev *pdev;
void __iomem *regs;
struct i2c_adapter i2c_adap;
int i2c_running;
struct pt1_adapter *adaps[PT1_NR_ADAPS];
struct pt1_table *tables;
struct task_struct *kthread;
struct mutex lock;
int power;
int reset;
};
struct pt1_adapter {
struct pt1 *pt1;
int index;
u8 *buf;
int upacket_count;
int packet_count;
struct dvb_adapter adap;
struct dvb_demux demux;
int users;
struct dmxdev dmxdev;
struct dvb_net net;
struct dvb_frontend *fe;
int (*orig_set_voltage)(struct dvb_frontend *fe,
fe_sec_voltage_t voltage);
int (*orig_sleep)(struct dvb_frontend *fe);
int (*orig_init)(struct dvb_frontend *fe);
fe_sec_voltage_t voltage;
int sleep;
};
#define pt1_printk(level, pt1, format, arg...) \
dev_printk(level, &(pt1)->pdev->dev, format, ##arg)
static void pt1_write_reg(struct pt1 *pt1, int reg, u32 data)
{
writel(data, pt1->regs + reg * 4);
}
static u32 pt1_read_reg(struct pt1 *pt1, int reg)
{
return readl(pt1->regs + reg * 4);
}
static int pt1_nr_tables = 64;
module_param_named(nr_tables, pt1_nr_tables, int, 0);
static void pt1_increment_table_count(struct pt1 *pt1)
{
pt1_write_reg(pt1, 0, 0x00000020);
}
static void pt1_init_table_count(struct pt1 *pt1)
{
pt1_write_reg(pt1, 0, 0x00000010);
}
static void pt1_register_tables(struct pt1 *pt1, u32 first_pfn)
{
pt1_write_reg(pt1, 5, first_pfn);
pt1_write_reg(pt1, 0, 0x0c000040);
}
static void pt1_unregister_tables(struct pt1 *pt1)
{
pt1_write_reg(pt1, 0, 0x08080000);
}
static int pt1_sync(struct pt1 *pt1)
{
int i;
for (i = 0; i < 57; i++) {
if (pt1_read_reg(pt1, 0) & 0x20000000)
return 0;
pt1_write_reg(pt1, 0, 0x00000008);
}
pt1_printk(KERN_ERR, pt1, "could not sync\n");
return -EIO;
}
static u64 pt1_identify(struct pt1 *pt1)
{
int i;
u64 id;
id = 0;
for (i = 0; i < 57; i++) {
id |= (u64)(pt1_read_reg(pt1, 0) >> 30 & 1) << i;
pt1_write_reg(pt1, 0, 0x00000008);
}
return id;
}
static int pt1_unlock(struct pt1 *pt1)
{
int i;
pt1_write_reg(pt1, 0, 0x00000008);
for (i = 0; i < 3; i++) {
if (pt1_read_reg(pt1, 0) & 0x80000000)
return 0;
schedule_timeout_uninterruptible((HZ + 999) / 1000);
}
pt1_printk(KERN_ERR, pt1, "could not unlock\n");
return -EIO;
}
static int pt1_reset_pci(struct pt1 *pt1)
{
int i;
pt1_write_reg(pt1, 0, 0x01010000);
pt1_write_reg(pt1, 0, 0x01000000);
for (i = 0; i < 10; i++) {
if (pt1_read_reg(pt1, 0) & 0x00000001)
return 0;
schedule_timeout_uninterruptible((HZ + 999) / 1000);
}
pt1_printk(KERN_ERR, pt1, "could not reset PCI\n");
return -EIO;
}
static int pt1_reset_ram(struct pt1 *pt1)
{
int i;
pt1_write_reg(pt1, 0, 0x02020000);
pt1_write_reg(pt1, 0, 0x02000000);
for (i = 0; i < 10; i++) {
if (pt1_read_reg(pt1, 0) & 0x00000002)
return 0;
schedule_timeout_uninterruptible((HZ + 999) / 1000);
}
pt1_printk(KERN_ERR, pt1, "could not reset RAM\n");
return -EIO;
}
static int pt1_do_enable_ram(struct pt1 *pt1)
{
int i, j;
u32 status;
status = pt1_read_reg(pt1, 0) & 0x00000004;
pt1_write_reg(pt1, 0, 0x00000002);
for (i = 0; i < 10; i++) {
for (j = 0; j < 1024; j++) {
if ((pt1_read_reg(pt1, 0) & 0x00000004) != status)
return 0;
}
schedule_timeout_uninterruptible((HZ + 999) / 1000);
}
pt1_printk(KERN_ERR, pt1, "could not enable RAM\n");
return -EIO;
}
static int pt1_enable_ram(struct pt1 *pt1)
{
int i, ret;
int phase;
schedule_timeout_uninterruptible((HZ + 999) / 1000);
phase = pt1->pdev->device == 0x211a ? 128 : 166;
for (i = 0; i < phase; i++) {
ret = pt1_do_enable_ram(pt1);
if (ret < 0)
return ret;
}
return 0;
}
static void pt1_disable_ram(struct pt1 *pt1)
{
pt1_write_reg(pt1, 0, 0x0b0b0000);
}
static void pt1_set_stream(struct pt1 *pt1, int index, int enabled)
{
pt1_write_reg(pt1, 2, 1 << (index + 8) | enabled << index);
}
static void pt1_init_streams(struct pt1 *pt1)
{
int i;
for (i = 0; i < PT1_NR_ADAPS; i++)
pt1_set_stream(pt1, i, 0);
}
static int pt1_filter(struct pt1 *pt1, struct pt1_buffer_page *page)
{
u32 upacket;
int i;
int index;
struct pt1_adapter *adap;
int offset;
u8 *buf;
if (!page->upackets[PT1_NR_UPACKETS - 1])
return 0;
for (i = 0; i < PT1_NR_UPACKETS; i++) {
upacket = le32_to_cpu(page->upackets[i]);
index = (upacket >> 29) - 1;
if (index < 0 || index >= PT1_NR_ADAPS)
continue;
adap = pt1->adaps[index];
if (upacket >> 25 & 1)
adap->upacket_count = 0;
else if (!adap->upacket_count)
continue;
buf = adap->buf;
offset = adap->packet_count * 188 + adap->upacket_count * 3;
buf[offset] = upacket >> 16;
buf[offset + 1] = upacket >> 8;
if (adap->upacket_count != 62)
buf[offset + 2] = upacket;
if (++adap->upacket_count >= 63) {
adap->upacket_count = 0;
if (++adap->packet_count >= 21) {
dvb_dmx_swfilter_packets(&adap->demux, buf, 21);
adap->packet_count = 0;
}
}
}
page->upackets[PT1_NR_UPACKETS - 1] = 0;
return 1;
}
static int pt1_thread(void *data)
{
struct pt1 *pt1;
int table_index;
int buf_index;
struct pt1_buffer_page *page;
pt1 = data;
set_freezable();
table_index = 0;
buf_index = 0;
while (!kthread_should_stop()) {
try_to_freeze();
page = pt1->tables[table_index].bufs[buf_index].page;
if (!pt1_filter(pt1, page)) {
schedule_timeout_interruptible((HZ + 999) / 1000);
continue;
}
if (++buf_index >= PT1_NR_BUFS) {
pt1_increment_table_count(pt1);
buf_index = 0;
if (++table_index >= pt1_nr_tables)
table_index = 0;
}
}
return 0;
}
static void pt1_free_page(struct pt1 *pt1, void *page, dma_addr_t addr)
{
dma_free_coherent(&pt1->pdev->dev, PT1_PAGE_SIZE, page, addr);
}
static void *pt1_alloc_page(struct pt1 *pt1, dma_addr_t *addrp, u32 *pfnp)
{
void *page;
dma_addr_t addr;
page = dma_alloc_coherent(&pt1->pdev->dev, PT1_PAGE_SIZE, &addr,
GFP_KERNEL);
if (page == NULL)
return NULL;
BUG_ON(addr & (PT1_PAGE_SIZE - 1));
BUG_ON(addr >> PT1_PAGE_SHIFT >> 31 >> 1);
*addrp = addr;
*pfnp = addr >> PT1_PAGE_SHIFT;
return page;
}
static void pt1_cleanup_buffer(struct pt1 *pt1, struct pt1_buffer *buf)
{
pt1_free_page(pt1, buf->page, buf->addr);
}
static int
pt1_init_buffer(struct pt1 *pt1, struct pt1_buffer *buf, u32 *pfnp)
{
struct pt1_buffer_page *page;
dma_addr_t addr;
page = pt1_alloc_page(pt1, &addr, pfnp);
if (page == NULL)
return -ENOMEM;
page->upackets[PT1_NR_UPACKETS - 1] = 0;
buf->page = page;
buf->addr = addr;
return 0;
}
static void pt1_cleanup_table(struct pt1 *pt1, struct pt1_table *table)
{
int i;
for (i = 0; i < PT1_NR_BUFS; i++)
pt1_cleanup_buffer(pt1, &table->bufs[i]);
pt1_free_page(pt1, table->page, table->addr);
}
static int
pt1_init_table(struct pt1 *pt1, struct pt1_table *table, u32 *pfnp)
{
struct pt1_table_page *page;
dma_addr_t addr;
int i, ret;
u32 buf_pfn;
page = pt1_alloc_page(pt1, &addr, pfnp);
if (page == NULL)
return -ENOMEM;
for (i = 0; i < PT1_NR_BUFS; i++) {
ret = pt1_init_buffer(pt1, &table->bufs[i], &buf_pfn);
if (ret < 0)
goto err;
page->buf_pfns[i] = cpu_to_le32(buf_pfn);
}
pt1_increment_table_count(pt1);
table->page = page;
table->addr = addr;
return 0;
err:
while (i--)
pt1_cleanup_buffer(pt1, &table->bufs[i]);
pt1_free_page(pt1, page, addr);
return ret;
}
static void pt1_cleanup_tables(struct pt1 *pt1)
{
struct pt1_table *tables;
int i;
tables = pt1->tables;
pt1_unregister_tables(pt1);
for (i = 0; i < pt1_nr_tables; i++)
pt1_cleanup_table(pt1, &tables[i]);
vfree(tables);
}
static int pt1_init_tables(struct pt1 *pt1)
{
struct pt1_table *tables;
int i, ret;
u32 first_pfn, pfn;
tables = vmalloc(sizeof(struct pt1_table) * pt1_nr_tables);
if (tables == NULL)
return -ENOMEM;
pt1_init_table_count(pt1);
i = 0;
if (pt1_nr_tables) {
ret = pt1_init_table(pt1, &tables[0], &first_pfn);
if (ret)
goto err;
i++;
}
while (i < pt1_nr_tables) {
ret = pt1_init_table(pt1, &tables[i], &pfn);
if (ret)
goto err;
tables[i - 1].page->next_pfn = cpu_to_le32(pfn);
i++;
}
tables[pt1_nr_tables - 1].page->next_pfn = cpu_to_le32(first_pfn);
pt1_register_tables(pt1, first_pfn);
pt1->tables = tables;
return 0;
err:
while (i--)
pt1_cleanup_table(pt1, &tables[i]);
vfree(tables);
return ret;
}
static int pt1_start_feed(struct dvb_demux_feed *feed)
{
struct pt1_adapter *adap;
adap = container_of(feed->demux, struct pt1_adapter, demux);
if (!adap->users++)
pt1_set_stream(adap->pt1, adap->index, 1);
return 0;
}
static int pt1_stop_feed(struct dvb_demux_feed *feed)
{
struct pt1_adapter *adap;
adap = container_of(feed->demux, struct pt1_adapter, demux);
if (!--adap->users)
pt1_set_stream(adap->pt1, adap->index, 0);
return 0;
}
static void
pt1_update_power(struct pt1 *pt1)
{
int bits;
int i;
struct pt1_adapter *adap;
static const int sleep_bits[] = {
1 << 4,
1 << 6 | 1 << 7,
1 << 5,
1 << 6 | 1 << 8,
};
bits = pt1->power | !pt1->reset << 3;
mutex_lock(&pt1->lock);
for (i = 0; i < PT1_NR_ADAPS; i++) {
adap = pt1->adaps[i];
switch (adap->voltage) {
case SEC_VOLTAGE_13: /* actually 11V */
bits |= 1 << 1;
break;
case SEC_VOLTAGE_18: /* actually 15V */
bits |= 1 << 1 | 1 << 2;
break;
default:
break;
}
/* XXX: The bits should be changed depending on adap->sleep. */
bits |= sleep_bits[i];
}
pt1_write_reg(pt1, 1, bits);
mutex_unlock(&pt1->lock);
}
static int pt1_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
{
struct pt1_adapter *adap;
adap = container_of(fe->dvb, struct pt1_adapter, adap);
adap->voltage = voltage;
pt1_update_power(adap->pt1);
if (adap->orig_set_voltage)
return adap->orig_set_voltage(fe, voltage);
else
return 0;
}
static int pt1_sleep(struct dvb_frontend *fe)
{
struct pt1_adapter *adap;
adap = container_of(fe->dvb, struct pt1_adapter, adap);
adap->sleep = 1;
pt1_update_power(adap->pt1);
if (adap->orig_sleep)
return adap->orig_sleep(fe);
else
return 0;
}
static int pt1_wakeup(struct dvb_frontend *fe)
{
struct pt1_adapter *adap;
adap = container_of(fe->dvb, struct pt1_adapter, adap);
adap->sleep = 0;
pt1_update_power(adap->pt1);
schedule_timeout_uninterruptible((HZ + 999) / 1000);
if (adap->orig_init)
return adap->orig_init(fe);
else
return 0;
}
static void pt1_free_adapter(struct pt1_adapter *adap)
{
dvb_net_release(&adap->net);
adap->demux.dmx.close(&adap->demux.dmx);
dvb_dmxdev_release(&adap->dmxdev);
dvb_dmx_release(&adap->demux);
dvb_unregister_adapter(&adap->adap);
free_page((unsigned long)adap->buf);
kfree(adap);
}
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
static struct pt1_adapter *
pt1_alloc_adapter(struct pt1 *pt1)
{
struct pt1_adapter *adap;
void *buf;
struct dvb_adapter *dvb_adap;
struct dvb_demux *demux;
struct dmxdev *dmxdev;
int ret;
adap = kzalloc(sizeof(struct pt1_adapter), GFP_KERNEL);
if (!adap) {
ret = -ENOMEM;
goto err;
}
adap->pt1 = pt1;
adap->voltage = SEC_VOLTAGE_OFF;
adap->sleep = 1;
buf = (u8 *)__get_free_page(GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
goto err_kfree;
}
adap->buf = buf;
adap->upacket_count = 0;
adap->packet_count = 0;
dvb_adap = &adap->adap;
dvb_adap->priv = adap;
ret = dvb_register_adapter(dvb_adap, DRIVER_NAME, THIS_MODULE,
&pt1->pdev->dev, adapter_nr);
if (ret < 0)
goto err_free_page;
demux = &adap->demux;
demux->dmx.capabilities = DMX_TS_FILTERING | DMX_SECTION_FILTERING;
demux->priv = adap;
demux->feednum = 256;
demux->filternum = 256;
demux->start_feed = pt1_start_feed;
demux->stop_feed = pt1_stop_feed;
demux->write_to_decoder = NULL;
ret = dvb_dmx_init(demux);
if (ret < 0)
goto err_unregister_adapter;
dmxdev = &adap->dmxdev;
dmxdev->filternum = 256;
dmxdev->demux = &demux->dmx;
dmxdev->capabilities = 0;
ret = dvb_dmxdev_init(dmxdev, dvb_adap);
if (ret < 0)
goto err_dmx_release;
dvb_net_init(dvb_adap, &adap->net, &demux->dmx);
return adap;
err_dmx_release:
dvb_dmx_release(demux);
err_unregister_adapter:
dvb_unregister_adapter(dvb_adap);
err_free_page:
free_page((unsigned long)buf);
err_kfree:
kfree(adap);
err:
return ERR_PTR(ret);
}
static void pt1_cleanup_adapters(struct pt1 *pt1)
{
int i;
for (i = 0; i < PT1_NR_ADAPS; i++)
pt1_free_adapter(pt1->adaps[i]);
}
static int pt1_init_adapters(struct pt1 *pt1)
{
int i;
struct pt1_adapter *adap;
int ret;
for (i = 0; i < PT1_NR_ADAPS; i++) {
adap = pt1_alloc_adapter(pt1);
if (IS_ERR(adap)) {
ret = PTR_ERR(adap);
goto err;
}
adap->index = i;
pt1->adaps[i] = adap;
}
return 0;
err:
while (i--)
pt1_free_adapter(pt1->adaps[i]);
return ret;
}
static void pt1_cleanup_frontend(struct pt1_adapter *adap)
{
dvb_unregister_frontend(adap->fe);
}
static int pt1_init_frontend(struct pt1_adapter *adap, struct dvb_frontend *fe)
{
int ret;
adap->orig_set_voltage = fe->ops.set_voltage;
adap->orig_sleep = fe->ops.sleep;
adap->orig_init = fe->ops.init;
fe->ops.set_voltage = pt1_set_voltage;
fe->ops.sleep = pt1_sleep;
fe->ops.init = pt1_wakeup;
ret = dvb_register_frontend(&adap->adap, fe);
if (ret < 0)
return ret;
adap->fe = fe;
return 0;
}
static void pt1_cleanup_frontends(struct pt1 *pt1)
{
int i;
for (i = 0; i < PT1_NR_ADAPS; i++)
pt1_cleanup_frontend(pt1->adaps[i]);
}
struct pt1_config {
struct va1j5jf8007s_config va1j5jf8007s_config;
struct va1j5jf8007t_config va1j5jf8007t_config;
};
static const struct pt1_config pt1_configs[2] = {
{
{
.demod_address = 0x1b,
.frequency = VA1J5JF8007S_20MHZ,
},
{
.demod_address = 0x1a,
.frequency = VA1J5JF8007T_20MHZ,
},
}, {
{
.demod_address = 0x19,
.frequency = VA1J5JF8007S_20MHZ,
},
{
.demod_address = 0x18,
.frequency = VA1J5JF8007T_20MHZ,
},
},
};
static const struct pt1_config pt2_configs[2] = {
{
{
.demod_address = 0x1b,
.frequency = VA1J5JF8007S_25MHZ,
},
{
.demod_address = 0x1a,
.frequency = VA1J5JF8007T_25MHZ,
},
}, {
{
.demod_address = 0x19,
.frequency = VA1J5JF8007S_25MHZ,
},
{
.demod_address = 0x18,
.frequency = VA1J5JF8007T_25MHZ,
},
},
};
static int pt1_init_frontends(struct pt1 *pt1)
{
int i, j;
struct i2c_adapter *i2c_adap;
const struct pt1_config *configs, *config;
struct dvb_frontend *fe[4];
int ret;
i = 0;
j = 0;
i2c_adap = &pt1->i2c_adap;
configs = pt1->pdev->device == 0x211a ? pt1_configs : pt2_configs;
do {
config = &configs[i / 2];
fe[i] = va1j5jf8007s_attach(&config->va1j5jf8007s_config,
i2c_adap);
if (!fe[i]) {
ret = -ENODEV; /* This does not sound nice... */
goto err;
}
i++;
fe[i] = va1j5jf8007t_attach(&config->va1j5jf8007t_config,
i2c_adap);
if (!fe[i]) {
ret = -ENODEV;
goto err;
}
i++;
ret = va1j5jf8007s_prepare(fe[i - 2]);
if (ret < 0)
goto err;
ret = va1j5jf8007t_prepare(fe[i - 1]);
if (ret < 0)
goto err;
} while (i < 4);
do {
ret = pt1_init_frontend(pt1->adaps[j], fe[j]);
if (ret < 0)
goto err;
} while (++j < 4);
return 0;
err:
while (i-- > j)
fe[i]->ops.release(fe[i]);
while (j--)
dvb_unregister_frontend(fe[j]);
return ret;
}
static void pt1_i2c_emit(struct pt1 *pt1, int addr, int busy, int read_enable,
int clock, int data, int next_addr)
{
pt1_write_reg(pt1, 4, addr << 18 | busy << 13 | read_enable << 12 |
!clock << 11 | !data << 10 | next_addr);
}
static void pt1_i2c_write_bit(struct pt1 *pt1, int addr, int *addrp, int data)
{
pt1_i2c_emit(pt1, addr, 1, 0, 0, data, addr + 1);
pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, data, addr + 2);
pt1_i2c_emit(pt1, addr + 2, 1, 0, 0, data, addr + 3);
*addrp = addr + 3;
}
static void pt1_i2c_read_bit(struct pt1 *pt1, int addr, int *addrp)
{
pt1_i2c_emit(pt1, addr, 1, 0, 0, 1, addr + 1);
pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, 1, addr + 2);
pt1_i2c_emit(pt1, addr + 2, 1, 1, 1, 1, addr + 3);
pt1_i2c_emit(pt1, addr + 3, 1, 0, 0, 1, addr + 4);
*addrp = addr + 4;
}
static void pt1_i2c_write_byte(struct pt1 *pt1, int addr, int *addrp, int data)
{
int i;
for (i = 0; i < 8; i++)
pt1_i2c_write_bit(pt1, addr, &addr, data >> (7 - i) & 1);
pt1_i2c_write_bit(pt1, addr, &addr, 1);
*addrp = addr;
}
static void pt1_i2c_read_byte(struct pt1 *pt1, int addr, int *addrp, int last)
{
int i;
for (i = 0; i < 8; i++)
pt1_i2c_read_bit(pt1, addr, &addr);
pt1_i2c_write_bit(pt1, addr, &addr, last);
*addrp = addr;
}
static void pt1_i2c_prepare(struct pt1 *pt1, int addr, int *addrp)
{
pt1_i2c_emit(pt1, addr, 1, 0, 1, 1, addr + 1);
pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, 0, addr + 2);
pt1_i2c_emit(pt1, addr + 2, 1, 0, 0, 0, addr + 3);
*addrp = addr + 3;
}
static void
pt1_i2c_write_msg(struct pt1 *pt1, int addr, int *addrp, struct i2c_msg *msg)
{
int i;
pt1_i2c_prepare(pt1, addr, &addr);
pt1_i2c_write_byte(pt1, addr, &addr, msg->addr << 1);
for (i = 0; i < msg->len; i++)
pt1_i2c_write_byte(pt1, addr, &addr, msg->buf[i]);
*addrp = addr;
}
static void
pt1_i2c_read_msg(struct pt1 *pt1, int addr, int *addrp, struct i2c_msg *msg)
{
int i;
pt1_i2c_prepare(pt1, addr, &addr);
pt1_i2c_write_byte(pt1, addr, &addr, msg->addr << 1 | 1);
for (i = 0; i < msg->len; i++)
pt1_i2c_read_byte(pt1, addr, &addr, i == msg->len - 1);
*addrp = addr;
}
static int pt1_i2c_end(struct pt1 *pt1, int addr)
{
pt1_i2c_emit(pt1, addr, 1, 0, 0, 0, addr + 1);
pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, 0, addr + 2);
pt1_i2c_emit(pt1, addr + 2, 1, 0, 1, 1, 0);
pt1_write_reg(pt1, 0, 0x00000004);
do {
if (signal_pending(current))
return -EINTR;
schedule_timeout_interruptible((HZ + 999) / 1000);
} while (pt1_read_reg(pt1, 0) & 0x00000080);
return 0;
}
static void pt1_i2c_begin(struct pt1 *pt1, int *addrp)
{
int addr;
addr = 0;
pt1_i2c_emit(pt1, addr, 0, 0, 1, 1, addr /* itself */);
addr = addr + 1;
if (!pt1->i2c_running) {
pt1_i2c_emit(pt1, addr, 1, 0, 1, 1, addr + 1);
pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, 0, addr + 2);
addr = addr + 2;
pt1->i2c_running = 1;
}
*addrp = addr;
}
static int pt1_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
struct pt1 *pt1;
int i;
struct i2c_msg *msg, *next_msg;
int addr, ret;
u16 len;
u32 word;
pt1 = i2c_get_adapdata(adap);
for (i = 0; i < num; i++) {
msg = &msgs[i];
if (msg->flags & I2C_M_RD)
return -ENOTSUPP;
if (i + 1 < num)
next_msg = &msgs[i + 1];
else
next_msg = NULL;
if (next_msg && next_msg->flags & I2C_M_RD) {
i++;
len = next_msg->len;
if (len > 4)
return -ENOTSUPP;
pt1_i2c_begin(pt1, &addr);
pt1_i2c_write_msg(pt1, addr, &addr, msg);
pt1_i2c_read_msg(pt1, addr, &addr, next_msg);
ret = pt1_i2c_end(pt1, addr);
if (ret < 0)
return ret;
word = pt1_read_reg(pt1, 2);
while (len--) {
next_msg->buf[len] = word;
word >>= 8;
}
} else {
pt1_i2c_begin(pt1, &addr);
pt1_i2c_write_msg(pt1, addr, &addr, msg);
ret = pt1_i2c_end(pt1, addr);
if (ret < 0)
return ret;
}
}
return num;
}
static u32 pt1_i2c_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C;
}
static const struct i2c_algorithm pt1_i2c_algo = {
.master_xfer = pt1_i2c_xfer,
.functionality = pt1_i2c_func,
};
static void pt1_i2c_wait(struct pt1 *pt1)
{
int i;
for (i = 0; i < 128; i++)
pt1_i2c_emit(pt1, 0, 0, 0, 1, 1, 0);
}
static void pt1_i2c_init(struct pt1 *pt1)
{
int i;
for (i = 0; i < 1024; i++)
pt1_i2c_emit(pt1, i, 0, 0, 1, 1, 0);
}
static void __devexit pt1_remove(struct pci_dev *pdev)
{
struct pt1 *pt1;
void __iomem *regs;
pt1 = pci_get_drvdata(pdev);
regs = pt1->regs;
kthread_stop(pt1->kthread);
pt1_cleanup_tables(pt1);
pt1_cleanup_frontends(pt1);
pt1_disable_ram(pt1);
pt1->power = 0;
pt1->reset = 1;
pt1_update_power(pt1);
pt1_cleanup_adapters(pt1);
i2c_del_adapter(&pt1->i2c_adap);
pci_set_drvdata(pdev, NULL);
kfree(pt1);
pci_iounmap(pdev, regs);
pci_release_regions(pdev);
pci_disable_device(pdev);
}
static int __devinit
pt1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int ret;
void __iomem *regs;
struct pt1 *pt1;
struct i2c_adapter *i2c_adap;
struct task_struct *kthread;
ret = pci_enable_device(pdev);
if (ret < 0)
goto err;
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret < 0)
goto err_pci_disable_device;
pci_set_master(pdev);
ret = pci_request_regions(pdev, DRIVER_NAME);
if (ret < 0)
goto err_pci_disable_device;
regs = pci_iomap(pdev, 0, 0);
if (!regs) {
ret = -EIO;
goto err_pci_release_regions;
}
pt1 = kzalloc(sizeof(struct pt1), GFP_KERNEL);
if (!pt1) {
ret = -ENOMEM;
goto err_pci_iounmap;
}
mutex_init(&pt1->lock);
pt1->pdev = pdev;
pt1->regs = regs;
pci_set_drvdata(pdev, pt1);
ret = pt1_init_adapters(pt1);
if (ret < 0)
goto err_kfree;
mutex_init(&pt1->lock);
pt1->power = 0;
pt1->reset = 1;
pt1_update_power(pt1);
i2c_adap = &pt1->i2c_adap;
i2c_adap->algo = &pt1_i2c_algo;
i2c_adap->algo_data = NULL;
i2c_adap->dev.parent = &pdev->dev;
strcpy(i2c_adap->name, DRIVER_NAME);
i2c_set_adapdata(i2c_adap, pt1);
ret = i2c_add_adapter(i2c_adap);
if (ret < 0)
goto err_pt1_cleanup_adapters;
pt1_i2c_init(pt1);
pt1_i2c_wait(pt1);
ret = pt1_sync(pt1);
if (ret < 0)
goto err_i2c_del_adapter;
pt1_identify(pt1);
ret = pt1_unlock(pt1);
if (ret < 0)
goto err_i2c_del_adapter;
ret = pt1_reset_pci(pt1);
if (ret < 0)
goto err_i2c_del_adapter;
ret = pt1_reset_ram(pt1);
if (ret < 0)
goto err_i2c_del_adapter;
ret = pt1_enable_ram(pt1);
if (ret < 0)
goto err_i2c_del_adapter;
pt1_init_streams(pt1);
pt1->power = 1;
pt1_update_power(pt1);
schedule_timeout_uninterruptible((HZ + 49) / 50);
pt1->reset = 0;
pt1_update_power(pt1);
schedule_timeout_uninterruptible((HZ + 999) / 1000);
ret = pt1_init_frontends(pt1);
if (ret < 0)
goto err_pt1_disable_ram;
ret = pt1_init_tables(pt1);
if (ret < 0)
goto err_pt1_cleanup_frontends;
kthread = kthread_run(pt1_thread, pt1, "pt1");
if (IS_ERR(kthread)) {
ret = PTR_ERR(kthread);
goto err_pt1_cleanup_tables;
}
pt1->kthread = kthread;
return 0;
err_pt1_cleanup_tables:
pt1_cleanup_tables(pt1);
err_pt1_cleanup_frontends:
pt1_cleanup_frontends(pt1);
err_pt1_disable_ram:
pt1_disable_ram(pt1);
pt1->power = 0;
pt1->reset = 1;
pt1_update_power(pt1);
err_i2c_del_adapter:
i2c_del_adapter(i2c_adap);
err_pt1_cleanup_adapters:
pt1_cleanup_adapters(pt1);
err_kfree:
pci_set_drvdata(pdev, NULL);
kfree(pt1);
err_pci_iounmap:
pci_iounmap(pdev, regs);
err_pci_release_regions:
pci_release_regions(pdev);
err_pci_disable_device:
pci_disable_device(pdev);
err:
return ret;
}
static struct pci_device_id pt1_id_table[] = {
{ PCI_DEVICE(0x10ee, 0x211a) },
{ PCI_DEVICE(0x10ee, 0x222a) },
{ },
};
MODULE_DEVICE_TABLE(pci, pt1_id_table);
static struct pci_driver pt1_driver = {
.name = DRIVER_NAME,
.probe = pt1_probe,
.remove = __devexit_p(pt1_remove),
.id_table = pt1_id_table,
};
static int __init pt1_init(void)
{
return pci_register_driver(&pt1_driver);
}
static void __exit pt1_cleanup(void)
{
pci_unregister_driver(&pt1_driver);
}
module_init(pt1_init);
module_exit(pt1_cleanup);
MODULE_AUTHOR("Takahito HIRANO <hiranotaka@zng.info>");
MODULE_DESCRIPTION("Earthsoft PT1/PT2 Driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
wSedlacek/kernel_moto_shamu | drivers/media/platform/s5p-tv/mixer_reg.c | 3808 | 14917 | /*
* Samsung TV Mixer driver
*
* Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
*
* Tomasz Stanislawski, <t.stanislaws@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published
* by the Free Software Foundiation. either version 2 of the License,
* or (at your option) any later version
*/
#include "mixer.h"
#include "regs-mixer.h"
#include "regs-vp.h"
#include <linux/delay.h>
/* Register access subroutines */
static inline u32 vp_read(struct mxr_device *mdev, u32 reg_id)
{
return readl(mdev->res.vp_regs + reg_id);
}
static inline void vp_write(struct mxr_device *mdev, u32 reg_id, u32 val)
{
writel(val, mdev->res.vp_regs + reg_id);
}
static inline void vp_write_mask(struct mxr_device *mdev, u32 reg_id,
u32 val, u32 mask)
{
u32 old = vp_read(mdev, reg_id);
val = (val & mask) | (old & ~mask);
writel(val, mdev->res.vp_regs + reg_id);
}
static inline u32 mxr_read(struct mxr_device *mdev, u32 reg_id)
{
return readl(mdev->res.mxr_regs + reg_id);
}
static inline void mxr_write(struct mxr_device *mdev, u32 reg_id, u32 val)
{
writel(val, mdev->res.mxr_regs + reg_id);
}
static inline void mxr_write_mask(struct mxr_device *mdev, u32 reg_id,
u32 val, u32 mask)
{
u32 old = mxr_read(mdev, reg_id);
val = (val & mask) | (old & ~mask);
writel(val, mdev->res.mxr_regs + reg_id);
}
void mxr_vsync_set_update(struct mxr_device *mdev, int en)
{
/* block update on vsync */
mxr_write_mask(mdev, MXR_STATUS, en ? MXR_STATUS_SYNC_ENABLE : 0,
MXR_STATUS_SYNC_ENABLE);
vp_write(mdev, VP_SHADOW_UPDATE, en ? VP_SHADOW_UPDATE_ENABLE : 0);
}
static void __mxr_reg_vp_reset(struct mxr_device *mdev)
{
int tries = 100;
vp_write(mdev, VP_SRESET, VP_SRESET_PROCESSING);
for (tries = 100; tries; --tries) {
/* waiting until VP_SRESET_PROCESSING is 0 */
if (~vp_read(mdev, VP_SRESET) & VP_SRESET_PROCESSING)
break;
mdelay(10);
}
WARN(tries == 0, "failed to reset Video Processor\n");
}
static void mxr_reg_vp_default_filter(struct mxr_device *mdev);
void mxr_reg_reset(struct mxr_device *mdev)
{
unsigned long flags;
u32 val; /* value stored to register */
spin_lock_irqsave(&mdev->reg_slock, flags);
mxr_vsync_set_update(mdev, MXR_DISABLE);
/* set output in RGB888 mode */
mxr_write(mdev, MXR_CFG, MXR_CFG_OUT_RGB888);
/* 16 beat burst in DMA */
mxr_write_mask(mdev, MXR_STATUS, MXR_STATUS_16_BURST,
MXR_STATUS_BURST_MASK);
/* setting default layer priority: layer1 > video > layer0
* because typical usage scenario would be
* layer0 - framebuffer
* video - video overlay
* layer1 - OSD
*/
val = MXR_LAYER_CFG_GRP0_VAL(1);
val |= MXR_LAYER_CFG_VP_VAL(2);
val |= MXR_LAYER_CFG_GRP1_VAL(3);
mxr_write(mdev, MXR_LAYER_CFG, val);
/* use dark gray background color */
mxr_write(mdev, MXR_BG_COLOR0, 0x808080);
mxr_write(mdev, MXR_BG_COLOR1, 0x808080);
mxr_write(mdev, MXR_BG_COLOR2, 0x808080);
/* setting graphical layers */
val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
val |= MXR_GRP_CFG_BLEND_PRE_MUL; /* premul mode */
val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
/* the same configuration for both layers */
mxr_write(mdev, MXR_GRAPHIC_CFG(0), val);
mxr_write(mdev, MXR_GRAPHIC_CFG(1), val);
/* configuration of Video Processor Registers */
__mxr_reg_vp_reset(mdev);
mxr_reg_vp_default_filter(mdev);
/* enable all interrupts */
mxr_write_mask(mdev, MXR_INT_EN, ~0, MXR_INT_EN_ALL);
mxr_vsync_set_update(mdev, MXR_ENABLE);
spin_unlock_irqrestore(&mdev->reg_slock, flags);
}
void mxr_reg_graph_format(struct mxr_device *mdev, int idx,
const struct mxr_format *fmt, const struct mxr_geometry *geo)
{
u32 val;
unsigned long flags;
spin_lock_irqsave(&mdev->reg_slock, flags);
mxr_vsync_set_update(mdev, MXR_DISABLE);
/* setup format */
mxr_write_mask(mdev, MXR_GRAPHIC_CFG(idx),
MXR_GRP_CFG_FORMAT_VAL(fmt->cookie), MXR_GRP_CFG_FORMAT_MASK);
/* setup geometry */
mxr_write(mdev, MXR_GRAPHIC_SPAN(idx), geo->src.full_width);
val = MXR_GRP_WH_WIDTH(geo->src.width);
val |= MXR_GRP_WH_HEIGHT(geo->src.height);
val |= MXR_GRP_WH_H_SCALE(geo->x_ratio);
val |= MXR_GRP_WH_V_SCALE(geo->y_ratio);
mxr_write(mdev, MXR_GRAPHIC_WH(idx), val);
/* setup offsets in source image */
val = MXR_GRP_SXY_SX(geo->src.x_offset);
val |= MXR_GRP_SXY_SY(geo->src.y_offset);
mxr_write(mdev, MXR_GRAPHIC_SXY(idx), val);
/* setup offsets in display image */
val = MXR_GRP_DXY_DX(geo->dst.x_offset);
val |= MXR_GRP_DXY_DY(geo->dst.y_offset);
mxr_write(mdev, MXR_GRAPHIC_DXY(idx), val);
mxr_vsync_set_update(mdev, MXR_ENABLE);
spin_unlock_irqrestore(&mdev->reg_slock, flags);
}
void mxr_reg_vp_format(struct mxr_device *mdev,
const struct mxr_format *fmt, const struct mxr_geometry *geo)
{
unsigned long flags;
spin_lock_irqsave(&mdev->reg_slock, flags);
mxr_vsync_set_update(mdev, MXR_DISABLE);
vp_write_mask(mdev, VP_MODE, fmt->cookie, VP_MODE_FMT_MASK);
/* setting size of input image */
vp_write(mdev, VP_IMG_SIZE_Y, VP_IMG_HSIZE(geo->src.full_width) |
VP_IMG_VSIZE(geo->src.full_height));
/* chroma height has to reduced by 2 to avoid chroma distorions */
vp_write(mdev, VP_IMG_SIZE_C, VP_IMG_HSIZE(geo->src.full_width) |
VP_IMG_VSIZE(geo->src.full_height / 2));
vp_write(mdev, VP_SRC_WIDTH, geo->src.width);
vp_write(mdev, VP_SRC_HEIGHT, geo->src.height);
vp_write(mdev, VP_SRC_H_POSITION,
VP_SRC_H_POSITION_VAL(geo->src.x_offset));
vp_write(mdev, VP_SRC_V_POSITION, geo->src.y_offset);
vp_write(mdev, VP_DST_WIDTH, geo->dst.width);
vp_write(mdev, VP_DST_H_POSITION, geo->dst.x_offset);
if (geo->dst.field == V4L2_FIELD_INTERLACED) {
vp_write(mdev, VP_DST_HEIGHT, geo->dst.height / 2);
vp_write(mdev, VP_DST_V_POSITION, geo->dst.y_offset / 2);
} else {
vp_write(mdev, VP_DST_HEIGHT, geo->dst.height);
vp_write(mdev, VP_DST_V_POSITION, geo->dst.y_offset);
}
vp_write(mdev, VP_H_RATIO, geo->x_ratio);
vp_write(mdev, VP_V_RATIO, geo->y_ratio);
vp_write(mdev, VP_ENDIAN_MODE, VP_ENDIAN_MODE_LITTLE);
mxr_vsync_set_update(mdev, MXR_ENABLE);
spin_unlock_irqrestore(&mdev->reg_slock, flags);
}
void mxr_reg_graph_buffer(struct mxr_device *mdev, int idx, dma_addr_t addr)
{
u32 val = addr ? ~0 : 0;
unsigned long flags;
spin_lock_irqsave(&mdev->reg_slock, flags);
mxr_vsync_set_update(mdev, MXR_DISABLE);
if (idx == 0)
mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_GRP0_ENABLE);
else
mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_GRP1_ENABLE);
mxr_write(mdev, MXR_GRAPHIC_BASE(idx), addr);
mxr_vsync_set_update(mdev, MXR_ENABLE);
spin_unlock_irqrestore(&mdev->reg_slock, flags);
}
void mxr_reg_vp_buffer(struct mxr_device *mdev,
dma_addr_t luma_addr[2], dma_addr_t chroma_addr[2])
{
u32 val = luma_addr[0] ? ~0 : 0;
unsigned long flags;
spin_lock_irqsave(&mdev->reg_slock, flags);
mxr_vsync_set_update(mdev, MXR_DISABLE);
mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_VP_ENABLE);
vp_write_mask(mdev, VP_ENABLE, val, VP_ENABLE_ON);
/* TODO: fix tiled mode */
vp_write(mdev, VP_TOP_Y_PTR, luma_addr[0]);
vp_write(mdev, VP_TOP_C_PTR, chroma_addr[0]);
vp_write(mdev, VP_BOT_Y_PTR, luma_addr[1]);
vp_write(mdev, VP_BOT_C_PTR, chroma_addr[1]);
mxr_vsync_set_update(mdev, MXR_ENABLE);
spin_unlock_irqrestore(&mdev->reg_slock, flags);
}
static void mxr_irq_layer_handle(struct mxr_layer *layer)
{
struct list_head *head = &layer->enq_list;
struct mxr_buffer *done;
/* skip non-existing layer */
if (layer == NULL)
return;
spin_lock(&layer->enq_slock);
if (layer->state == MXR_LAYER_IDLE)
goto done;
done = layer->shadow_buf;
layer->shadow_buf = layer->update_buf;
if (list_empty(head)) {
if (layer->state != MXR_LAYER_STREAMING)
layer->update_buf = NULL;
} else {
struct mxr_buffer *next;
next = list_first_entry(head, struct mxr_buffer, list);
list_del(&next->list);
layer->update_buf = next;
}
layer->ops.buffer_set(layer, layer->update_buf);
if (done && done != layer->shadow_buf)
vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
done:
spin_unlock(&layer->enq_slock);
}
irqreturn_t mxr_irq_handler(int irq, void *dev_data)
{
struct mxr_device *mdev = dev_data;
u32 i, val;
spin_lock(&mdev->reg_slock);
val = mxr_read(mdev, MXR_INT_STATUS);
/* wake up process waiting for VSYNC */
if (val & MXR_INT_STATUS_VSYNC) {
set_bit(MXR_EVENT_VSYNC, &mdev->event_flags);
/* toggle TOP field event if working in interlaced mode */
if (~mxr_read(mdev, MXR_CFG) & MXR_CFG_SCAN_PROGRASSIVE)
change_bit(MXR_EVENT_TOP, &mdev->event_flags);
wake_up(&mdev->event_queue);
/* vsync interrupt use different bit for read and clear */
val &= ~MXR_INT_STATUS_VSYNC;
val |= MXR_INT_CLEAR_VSYNC;
}
/* clear interrupts */
mxr_write(mdev, MXR_INT_STATUS, val);
spin_unlock(&mdev->reg_slock);
/* leave on non-vsync event */
if (~val & MXR_INT_CLEAR_VSYNC)
return IRQ_HANDLED;
/* skip layer update on bottom field */
if (!test_bit(MXR_EVENT_TOP, &mdev->event_flags))
return IRQ_HANDLED;
for (i = 0; i < MXR_MAX_LAYERS; ++i)
mxr_irq_layer_handle(mdev->layer[i]);
return IRQ_HANDLED;
}
void mxr_reg_s_output(struct mxr_device *mdev, int cookie)
{
u32 val;
val = cookie == 0 ? MXR_CFG_DST_SDO : MXR_CFG_DST_HDMI;
mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_DST_MASK);
}
void mxr_reg_streamon(struct mxr_device *mdev)
{
unsigned long flags;
spin_lock_irqsave(&mdev->reg_slock, flags);
/* single write -> no need to block vsync update */
/* start MIXER */
mxr_write_mask(mdev, MXR_STATUS, ~0, MXR_STATUS_REG_RUN);
set_bit(MXR_EVENT_TOP, &mdev->event_flags);
spin_unlock_irqrestore(&mdev->reg_slock, flags);
}
void mxr_reg_streamoff(struct mxr_device *mdev)
{
unsigned long flags;
spin_lock_irqsave(&mdev->reg_slock, flags);
/* single write -> no need to block vsync update */
/* stop MIXER */
mxr_write_mask(mdev, MXR_STATUS, 0, MXR_STATUS_REG_RUN);
spin_unlock_irqrestore(&mdev->reg_slock, flags);
}
int mxr_reg_wait4vsync(struct mxr_device *mdev)
{
int ret;
clear_bit(MXR_EVENT_VSYNC, &mdev->event_flags);
/* TODO: consider adding interruptible */
ret = wait_event_timeout(mdev->event_queue,
test_bit(MXR_EVENT_VSYNC, &mdev->event_flags),
msecs_to_jiffies(1000));
if (ret > 0)
return 0;
if (ret < 0)
return ret;
mxr_warn(mdev, "no vsync detected - timeout\n");
return -ETIME;
}
void mxr_reg_set_mbus_fmt(struct mxr_device *mdev,
struct v4l2_mbus_framefmt *fmt)
{
u32 val = 0;
unsigned long flags;
spin_lock_irqsave(&mdev->reg_slock, flags);
mxr_vsync_set_update(mdev, MXR_DISABLE);
/* selecting colorspace accepted by output */
if (fmt->colorspace == V4L2_COLORSPACE_JPEG)
val |= MXR_CFG_OUT_YUV444;
else
val |= MXR_CFG_OUT_RGB888;
/* choosing between interlace and progressive mode */
if (fmt->field == V4L2_FIELD_INTERLACED)
val |= MXR_CFG_SCAN_INTERLACE;
else
val |= MXR_CFG_SCAN_PROGRASSIVE;
/* choosing between porper HD and SD mode */
if (fmt->height == 480)
val |= MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD;
else if (fmt->height == 576)
val |= MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD;
else if (fmt->height == 720)
val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
else if (fmt->height == 1080)
val |= MXR_CFG_SCAN_HD_1080 | MXR_CFG_SCAN_HD;
else
WARN(1, "unrecognized mbus height %u!\n", fmt->height);
mxr_write_mask(mdev, MXR_CFG, val, MXR_CFG_SCAN_MASK |
MXR_CFG_OUT_MASK);
val = (fmt->field == V4L2_FIELD_INTERLACED) ? ~0 : 0;
vp_write_mask(mdev, VP_MODE, val,
VP_MODE_LINE_SKIP | VP_MODE_FIELD_ID_AUTO_TOGGLING);
mxr_vsync_set_update(mdev, MXR_ENABLE);
spin_unlock_irqrestore(&mdev->reg_slock, flags);
}
void mxr_reg_graph_layer_stream(struct mxr_device *mdev, int idx, int en)
{
/* no extra actions need to be done */
}
void mxr_reg_vp_layer_stream(struct mxr_device *mdev, int en)
{
/* no extra actions need to be done */
}
static const u8 filter_y_horiz_tap8[] = {
0, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, 0, 0, 0,
0, 2, 4, 5, 6, 6, 6, 6,
6, 5, 5, 4, 3, 2, 1, 1,
0, -6, -12, -16, -18, -20, -21, -20,
-20, -18, -16, -13, -10, -8, -5, -2,
127, 126, 125, 121, 114, 107, 99, 89,
79, 68, 57, 46, 35, 25, 16, 8,
};
static const u8 filter_y_vert_tap4[] = {
0, -3, -6, -8, -8, -8, -8, -7,
-6, -5, -4, -3, -2, -1, -1, 0,
127, 126, 124, 118, 111, 102, 92, 81,
70, 59, 48, 37, 27, 19, 11, 5,
0, 5, 11, 19, 27, 37, 48, 59,
70, 81, 92, 102, 111, 118, 124, 126,
0, 0, -1, -1, -2, -3, -4, -5,
-6, -7, -8, -8, -8, -8, -6, -3,
};
static const u8 filter_cr_horiz_tap4[] = {
0, -3, -6, -8, -8, -8, -8, -7,
-6, -5, -4, -3, -2, -1, -1, 0,
127, 126, 124, 118, 111, 102, 92, 81,
70, 59, 48, 37, 27, 19, 11, 5,
};
static inline void mxr_reg_vp_filter_set(struct mxr_device *mdev,
int reg_id, const u8 *data, unsigned int size)
{
/* assure 4-byte align */
BUG_ON(size & 3);
for (; size; size -= 4, reg_id += 4, data += 4) {
u32 val = (data[0] << 24) | (data[1] << 16) |
(data[2] << 8) | data[3];
vp_write(mdev, reg_id, val);
}
}
static void mxr_reg_vp_default_filter(struct mxr_device *mdev)
{
mxr_reg_vp_filter_set(mdev, VP_POLY8_Y0_LL,
filter_y_horiz_tap8, sizeof(filter_y_horiz_tap8));
mxr_reg_vp_filter_set(mdev, VP_POLY4_Y0_LL,
filter_y_vert_tap4, sizeof(filter_y_vert_tap4));
mxr_reg_vp_filter_set(mdev, VP_POLY4_C0_LL,
filter_cr_horiz_tap4, sizeof(filter_cr_horiz_tap4));
}
static void mxr_reg_mxr_dump(struct mxr_device *mdev)
{
#define DUMPREG(reg_id) \
do { \
mxr_dbg(mdev, #reg_id " = %08x\n", \
(u32)readl(mdev->res.mxr_regs + reg_id)); \
} while (0)
DUMPREG(MXR_STATUS);
DUMPREG(MXR_CFG);
DUMPREG(MXR_INT_EN);
DUMPREG(MXR_INT_STATUS);
DUMPREG(MXR_LAYER_CFG);
DUMPREG(MXR_VIDEO_CFG);
DUMPREG(MXR_GRAPHIC0_CFG);
DUMPREG(MXR_GRAPHIC0_BASE);
DUMPREG(MXR_GRAPHIC0_SPAN);
DUMPREG(MXR_GRAPHIC0_WH);
DUMPREG(MXR_GRAPHIC0_SXY);
DUMPREG(MXR_GRAPHIC0_DXY);
DUMPREG(MXR_GRAPHIC1_CFG);
DUMPREG(MXR_GRAPHIC1_BASE);
DUMPREG(MXR_GRAPHIC1_SPAN);
DUMPREG(MXR_GRAPHIC1_WH);
DUMPREG(MXR_GRAPHIC1_SXY);
DUMPREG(MXR_GRAPHIC1_DXY);
#undef DUMPREG
}
static void mxr_reg_vp_dump(struct mxr_device *mdev)
{
#define DUMPREG(reg_id) \
do { \
mxr_dbg(mdev, #reg_id " = %08x\n", \
(u32) readl(mdev->res.vp_regs + reg_id)); \
} while (0)
DUMPREG(VP_ENABLE);
DUMPREG(VP_SRESET);
DUMPREG(VP_SHADOW_UPDATE);
DUMPREG(VP_FIELD_ID);
DUMPREG(VP_MODE);
DUMPREG(VP_IMG_SIZE_Y);
DUMPREG(VP_IMG_SIZE_C);
DUMPREG(VP_PER_RATE_CTRL);
DUMPREG(VP_TOP_Y_PTR);
DUMPREG(VP_BOT_Y_PTR);
DUMPREG(VP_TOP_C_PTR);
DUMPREG(VP_BOT_C_PTR);
DUMPREG(VP_ENDIAN_MODE);
DUMPREG(VP_SRC_H_POSITION);
DUMPREG(VP_SRC_V_POSITION);
DUMPREG(VP_SRC_WIDTH);
DUMPREG(VP_SRC_HEIGHT);
DUMPREG(VP_DST_H_POSITION);
DUMPREG(VP_DST_V_POSITION);
DUMPREG(VP_DST_WIDTH);
DUMPREG(VP_DST_HEIGHT);
DUMPREG(VP_H_RATIO);
DUMPREG(VP_V_RATIO);
#undef DUMPREG
}
void mxr_reg_dump(struct mxr_device *mdev)
{
mxr_reg_mxr_dump(mdev);
mxr_reg_vp_dump(mdev);
}
| gpl-2.0 |
uberlaggydarwin/bugfree-wookie | drivers/staging/iio/impedance-analyzer/ad5933.c | 4320 | 20309 | /*
* AD5933 AD5934 Impedance Converter, Network Analyzer
*
* Copyright 2011 Analog Devices Inc.
*
* Licensed under the GPL-2.
*/
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/sysfs.h>
#include <linux/i2c.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <asm/div64.h>
#include "../iio.h"
#include "../sysfs.h"
#include "../buffer.h"
#include "../ring_sw.h"
#include "ad5933.h"
/* AD5933/AD5934 Registers */
#define AD5933_REG_CONTROL_HB 0x80 /* R/W, 2 bytes */
#define AD5933_REG_CONTROL_LB 0x81 /* R/W, 2 bytes */
#define AD5933_REG_FREQ_START 0x82 /* R/W, 3 bytes */
#define AD5933_REG_FREQ_INC 0x85 /* R/W, 3 bytes */
#define AD5933_REG_INC_NUM 0x88 /* R/W, 2 bytes, 9 bit */
#define AD5933_REG_SETTLING_CYCLES 0x8A /* R/W, 2 bytes */
#define AD5933_REG_STATUS 0x8F /* R, 1 byte */
#define AD5933_REG_TEMP_DATA 0x92 /* R, 2 bytes*/
#define AD5933_REG_REAL_DATA 0x94 /* R, 2 bytes*/
#define AD5933_REG_IMAG_DATA 0x96 /* R, 2 bytes*/
/* AD5933_REG_CONTROL_HB Bits */
#define AD5933_CTRL_INIT_START_FREQ (0x1 << 4)
#define AD5933_CTRL_START_SWEEP (0x2 << 4)
#define AD5933_CTRL_INC_FREQ (0x3 << 4)
#define AD5933_CTRL_REPEAT_FREQ (0x4 << 4)
#define AD5933_CTRL_MEASURE_TEMP (0x9 << 4)
#define AD5933_CTRL_POWER_DOWN (0xA << 4)
#define AD5933_CTRL_STANDBY (0xB << 4)
#define AD5933_CTRL_RANGE_2000mVpp (0x0 << 1)
#define AD5933_CTRL_RANGE_200mVpp (0x1 << 1)
#define AD5933_CTRL_RANGE_400mVpp (0x2 << 1)
#define AD5933_CTRL_RANGE_1000mVpp (0x3 << 1)
#define AD5933_CTRL_RANGE(x) ((x) << 1)
#define AD5933_CTRL_PGA_GAIN_1 (0x1 << 0)
#define AD5933_CTRL_PGA_GAIN_5 (0x0 << 0)
/* AD5933_REG_CONTROL_LB Bits */
#define AD5933_CTRL_RESET (0x1 << 4)
#define AD5933_CTRL_INT_SYSCLK (0x0 << 3)
#define AD5933_CTRL_EXT_SYSCLK (0x1 << 3)
/* AD5933_REG_STATUS Bits */
#define AD5933_STAT_TEMP_VALID (0x1 << 0)
#define AD5933_STAT_DATA_VALID (0x1 << 1)
#define AD5933_STAT_SWEEP_DONE (0x1 << 2)
/* I2C Block Commands */
#define AD5933_I2C_BLOCK_WRITE 0xA0
#define AD5933_I2C_BLOCK_READ 0xA1
#define AD5933_I2C_ADDR_POINTER 0xB0
/* Device Specs */
#define AD5933_INT_OSC_FREQ_Hz 16776000
#define AD5933_MAX_OUTPUT_FREQ_Hz 100000
#define AD5933_MAX_RETRIES 100
#define AD5933_OUT_RANGE 1
#define AD5933_OUT_RANGE_AVAIL 2
#define AD5933_OUT_SETTLING_CYCLES 3
#define AD5933_IN_PGA_GAIN 4
#define AD5933_IN_PGA_GAIN_AVAIL 5
#define AD5933_FREQ_POINTS 6
#define AD5933_POLL_TIME_ms 10
#define AD5933_INIT_EXCITATION_TIME_ms 100
struct ad5933_state {
struct i2c_client *client;
struct regulator *reg;
struct ad5933_platform_data *pdata;
struct delayed_work work;
unsigned long mclk_hz;
unsigned char ctrl_hb;
unsigned char ctrl_lb;
unsigned range_avail[4];
unsigned short vref_mv;
unsigned short settling_cycles;
unsigned short freq_points;
unsigned freq_start;
unsigned freq_inc;
unsigned state;
unsigned poll_time_jiffies;
};
static struct ad5933_platform_data ad5933_default_pdata = {
.vref_mv = 3300,
};
static struct iio_chan_spec ad5933_channels[] = {
IIO_CHAN(IIO_TEMP, 0, 1, 1, NULL, 0, 0, 0,
0, AD5933_REG_TEMP_DATA, IIO_ST('s', 14, 16, 0), 0),
/* Ring Channels */
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "real_raw", 0, 0,
IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
AD5933_REG_REAL_DATA, 0, IIO_ST('s', 16, 16, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "imag_raw", 0, 0,
IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
AD5933_REG_IMAG_DATA, 1, IIO_ST('s', 16, 16, 0), 0),
};
static int ad5933_i2c_write(struct i2c_client *client,
u8 reg, u8 len, u8 *data)
{
int ret;
while (len--) {
ret = i2c_smbus_write_byte_data(client, reg++, *data++);
if (ret < 0) {
dev_err(&client->dev, "I2C write error\n");
return ret;
}
}
return 0;
}
static int ad5933_i2c_read(struct i2c_client *client,
u8 reg, u8 len, u8 *data)
{
int ret;
while (len--) {
ret = i2c_smbus_read_byte_data(client, reg++);
if (ret < 0) {
dev_err(&client->dev, "I2C read error\n");
return ret;
}
*data++ = ret;
}
return 0;
}
static int ad5933_cmd(struct ad5933_state *st, unsigned char cmd)
{
unsigned char dat = st->ctrl_hb | cmd;
return ad5933_i2c_write(st->client,
AD5933_REG_CONTROL_HB, 1, &dat);
}
static int ad5933_reset(struct ad5933_state *st)
{
unsigned char dat = st->ctrl_lb | AD5933_CTRL_RESET;
return ad5933_i2c_write(st->client,
AD5933_REG_CONTROL_LB, 1, &dat);
}
static int ad5933_wait_busy(struct ad5933_state *st, unsigned char event)
{
unsigned char val, timeout = AD5933_MAX_RETRIES;
int ret;
while (timeout--) {
ret = ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &val);
if (ret < 0)
return ret;
if (val & event)
return val;
cpu_relax();
mdelay(1);
}
return -EAGAIN;
}
static int ad5933_set_freq(struct ad5933_state *st,
unsigned reg, unsigned long freq)
{
unsigned long long freqreg;
union {
u32 d32;
u8 d8[4];
} dat;
freqreg = (u64) freq * (u64) (1 << 27);
do_div(freqreg, st->mclk_hz / 4);
switch (reg) {
case AD5933_REG_FREQ_START:
st->freq_start = freq;
break;
case AD5933_REG_FREQ_INC:
st->freq_inc = freq;
break;
default:
return -EINVAL;
}
dat.d32 = cpu_to_be32(freqreg);
return ad5933_i2c_write(st->client, reg, 3, &dat.d8[1]);
}
static int ad5933_setup(struct ad5933_state *st)
{
unsigned short dat;
int ret;
ret = ad5933_reset(st);
if (ret < 0)
return ret;
ret = ad5933_set_freq(st, AD5933_REG_FREQ_START, 10000);
if (ret < 0)
return ret;
ret = ad5933_set_freq(st, AD5933_REG_FREQ_INC, 200);
if (ret < 0)
return ret;
st->settling_cycles = 10;
dat = cpu_to_be16(st->settling_cycles);
ret = ad5933_i2c_write(st->client,
AD5933_REG_SETTLING_CYCLES, 2, (u8 *)&dat);
if (ret < 0)
return ret;
st->freq_points = 100;
dat = cpu_to_be16(st->freq_points);
return ad5933_i2c_write(st->client, AD5933_REG_INC_NUM, 2, (u8 *)&dat);
}
static void ad5933_calc_out_ranges(struct ad5933_state *st)
{
int i;
unsigned normalized_3v3[4] = {1980, 198, 383, 970};
for (i = 0; i < 4; i++)
st->range_avail[i] = normalized_3v3[i] * st->vref_mv / 3300;
}
/*
* handles: AD5933_REG_FREQ_START and AD5933_REG_FREQ_INC
*/
static ssize_t ad5933_show_frequency(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad5933_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
unsigned long long freqreg;
union {
u32 d32;
u8 d8[4];
} dat;
mutex_lock(&indio_dev->mlock);
ret = ad5933_i2c_read(st->client, this_attr->address, 3, &dat.d8[1]);
mutex_unlock(&indio_dev->mlock);
if (ret < 0)
return ret;
freqreg = be32_to_cpu(dat.d32) & 0xFFFFFF;
freqreg = (u64) freqreg * (u64) (st->mclk_hz / 4);
do_div(freqreg, 1 << 27);
return sprintf(buf, "%d\n", (int) freqreg);
}
static ssize_t ad5933_store_frequency(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad5933_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
long val;
int ret;
ret = strict_strtoul(buf, 10, &val);
if (ret)
return ret;
if (val > AD5933_MAX_OUTPUT_FREQ_Hz)
return -EINVAL;
mutex_lock(&indio_dev->mlock);
ret = ad5933_set_freq(st, this_attr->address, val);
mutex_unlock(&indio_dev->mlock);
return ret ? ret : len;
}
static IIO_DEVICE_ATTR(out_voltage0_freq_start, S_IRUGO | S_IWUSR,
ad5933_show_frequency,
ad5933_store_frequency,
AD5933_REG_FREQ_START);
static IIO_DEVICE_ATTR(out_voltage0_freq_increment, S_IRUGO | S_IWUSR,
ad5933_show_frequency,
ad5933_store_frequency,
AD5933_REG_FREQ_INC);
static ssize_t ad5933_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad5933_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret = 0, len = 0;
mutex_lock(&indio_dev->mlock);
switch ((u32) this_attr->address) {
case AD5933_OUT_RANGE:
len = sprintf(buf, "%d\n",
st->range_avail[(st->ctrl_hb >> 1) & 0x3]);
break;
case AD5933_OUT_RANGE_AVAIL:
len = sprintf(buf, "%d %d %d %d\n", st->range_avail[0],
st->range_avail[3], st->range_avail[2],
st->range_avail[1]);
break;
case AD5933_OUT_SETTLING_CYCLES:
len = sprintf(buf, "%d\n", st->settling_cycles);
break;
case AD5933_IN_PGA_GAIN:
len = sprintf(buf, "%s\n",
(st->ctrl_hb & AD5933_CTRL_PGA_GAIN_1) ?
"1" : "0.2");
break;
case AD5933_IN_PGA_GAIN_AVAIL:
len = sprintf(buf, "1 0.2\n");
break;
case AD5933_FREQ_POINTS:
len = sprintf(buf, "%d\n", st->freq_points);
break;
default:
ret = -EINVAL;
}
mutex_unlock(&indio_dev->mlock);
return ret ? ret : len;
}
static ssize_t ad5933_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t len)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad5933_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
long val;
int i, ret = 0;
unsigned short dat;
if (this_attr->address != AD5933_IN_PGA_GAIN) {
ret = strict_strtol(buf, 10, &val);
if (ret)
return ret;
}
mutex_lock(&indio_dev->mlock);
switch ((u32) this_attr->address) {
case AD5933_OUT_RANGE:
for (i = 0; i < 4; i++)
if (val == st->range_avail[i]) {
st->ctrl_hb &= ~AD5933_CTRL_RANGE(0x3);
st->ctrl_hb |= AD5933_CTRL_RANGE(i);
ret = ad5933_cmd(st, 0);
break;
}
ret = -EINVAL;
break;
case AD5933_IN_PGA_GAIN:
if (sysfs_streq(buf, "1")) {
st->ctrl_hb |= AD5933_CTRL_PGA_GAIN_1;
} else if (sysfs_streq(buf, "0.2")) {
st->ctrl_hb &= ~AD5933_CTRL_PGA_GAIN_1;
} else {
ret = -EINVAL;
break;
}
ret = ad5933_cmd(st, 0);
break;
case AD5933_OUT_SETTLING_CYCLES:
val = clamp(val, 0L, 0x7FFL);
st->settling_cycles = val;
/* 2x, 4x handling, see datasheet */
if (val > 511)
val = (val >> 1) | (1 << 9);
else if (val > 1022)
val = (val >> 2) | (3 << 9);
dat = cpu_to_be16(val);
ret = ad5933_i2c_write(st->client,
AD5933_REG_SETTLING_CYCLES, 2, (u8 *)&dat);
break;
case AD5933_FREQ_POINTS:
val = clamp(val, 0L, 511L);
st->freq_points = val;
dat = cpu_to_be16(val);
ret = ad5933_i2c_write(st->client, AD5933_REG_INC_NUM, 2,
(u8 *)&dat);
break;
default:
ret = -EINVAL;
}
mutex_unlock(&indio_dev->mlock);
return ret ? ret : len;
}
static IIO_DEVICE_ATTR(out_voltage0_scale, S_IRUGO | S_IWUSR,
ad5933_show,
ad5933_store,
AD5933_OUT_RANGE);
static IIO_DEVICE_ATTR(out_voltage0_scale_available, S_IRUGO,
ad5933_show,
NULL,
AD5933_OUT_RANGE_AVAIL);
static IIO_DEVICE_ATTR(in_voltage0_scale, S_IRUGO | S_IWUSR,
ad5933_show,
ad5933_store,
AD5933_IN_PGA_GAIN);
static IIO_DEVICE_ATTR(in_voltage0_scale_available, S_IRUGO,
ad5933_show,
NULL,
AD5933_IN_PGA_GAIN_AVAIL);
static IIO_DEVICE_ATTR(out_voltage0_freq_points, S_IRUGO | S_IWUSR,
ad5933_show,
ad5933_store,
AD5933_FREQ_POINTS);
static IIO_DEVICE_ATTR(out_voltage0_settling_cycles, S_IRUGO | S_IWUSR,
ad5933_show,
ad5933_store,
AD5933_OUT_SETTLING_CYCLES);
/* note:
* ideally we would handle the scale attributes via the iio_info
* (read|write)_raw methods, however this part is a untypical since we
* don't create dedicated sysfs channel attributes for out0 and in0.
*/
static struct attribute *ad5933_attributes[] = {
&iio_dev_attr_out_voltage0_scale.dev_attr.attr,
&iio_dev_attr_out_voltage0_scale_available.dev_attr.attr,
&iio_dev_attr_out_voltage0_freq_start.dev_attr.attr,
&iio_dev_attr_out_voltage0_freq_increment.dev_attr.attr,
&iio_dev_attr_out_voltage0_freq_points.dev_attr.attr,
&iio_dev_attr_out_voltage0_settling_cycles.dev_attr.attr,
&iio_dev_attr_in_voltage0_scale.dev_attr.attr,
&iio_dev_attr_in_voltage0_scale_available.dev_attr.attr,
NULL
};
static const struct attribute_group ad5933_attribute_group = {
.attrs = ad5933_attributes,
};
static int ad5933_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val,
int *val2,
long m)
{
struct ad5933_state *st = iio_priv(indio_dev);
unsigned short dat;
int ret = -EINVAL;
mutex_lock(&indio_dev->mlock);
switch (m) {
case 0:
if (iio_buffer_enabled(indio_dev)) {
ret = -EBUSY;
goto out;
}
ret = ad5933_cmd(st, AD5933_CTRL_MEASURE_TEMP);
if (ret < 0)
goto out;
ret = ad5933_wait_busy(st, AD5933_STAT_TEMP_VALID);
if (ret < 0)
goto out;
ret = ad5933_i2c_read(st->client,
AD5933_REG_TEMP_DATA, 2,
(u8 *)&dat);
if (ret < 0)
goto out;
mutex_unlock(&indio_dev->mlock);
ret = be16_to_cpu(dat);
/* Temp in Milli degrees Celsius */
if (ret < 8192)
*val = ret * 1000 / 32;
else
*val = (ret - 16384) * 1000 / 32;
return IIO_VAL_INT;
}
out:
mutex_unlock(&indio_dev->mlock);
return ret;
}
static const struct iio_info ad5933_info = {
.read_raw = &ad5933_read_raw,
.attrs = &ad5933_attribute_group,
.driver_module = THIS_MODULE,
};
static int ad5933_ring_preenable(struct iio_dev *indio_dev)
{
struct ad5933_state *st = iio_priv(indio_dev);
size_t d_size;
int ret;
if (bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
return -EINVAL;
d_size = bitmap_weight(indio_dev->active_scan_mask,
indio_dev->masklength) *
ad5933_channels[1].scan_type.storagebits / 8;
if (indio_dev->buffer->access->set_bytes_per_datum)
indio_dev->buffer->access->
set_bytes_per_datum(indio_dev->buffer, d_size);
ret = ad5933_reset(st);
if (ret < 0)
return ret;
ret = ad5933_cmd(st, AD5933_CTRL_STANDBY);
if (ret < 0)
return ret;
ret = ad5933_cmd(st, AD5933_CTRL_INIT_START_FREQ);
if (ret < 0)
return ret;
st->state = AD5933_CTRL_INIT_START_FREQ;
return 0;
}
static int ad5933_ring_postenable(struct iio_dev *indio_dev)
{
struct ad5933_state *st = iio_priv(indio_dev);
/* AD5933_CTRL_INIT_START_FREQ:
* High Q complex circuits require a long time to reach steady state.
* To facilitate the measurement of such impedances, this mode allows
* the user full control of the settling time requirement before
* entering start frequency sweep mode where the impedance measurement
* takes place. In this mode the impedance is excited with the
* programmed start frequency (ad5933_ring_preenable),
* but no measurement takes place.
*/
schedule_delayed_work(&st->work,
msecs_to_jiffies(AD5933_INIT_EXCITATION_TIME_ms));
return 0;
}
static int ad5933_ring_postdisable(struct iio_dev *indio_dev)
{
struct ad5933_state *st = iio_priv(indio_dev);
cancel_delayed_work_sync(&st->work);
return ad5933_cmd(st, AD5933_CTRL_POWER_DOWN);
}
static const struct iio_buffer_setup_ops ad5933_ring_setup_ops = {
.preenable = &ad5933_ring_preenable,
.postenable = &ad5933_ring_postenable,
.postdisable = &ad5933_ring_postdisable,
};
static int ad5933_register_ring_funcs_and_init(struct iio_dev *indio_dev)
{
indio_dev->buffer = iio_sw_rb_allocate(indio_dev);
if (!indio_dev->buffer)
return -ENOMEM;
/* Ring buffer functions - here trigger setup related */
indio_dev->setup_ops = &ad5933_ring_setup_ops;
indio_dev->modes |= INDIO_BUFFER_HARDWARE;
return 0;
}
static void ad5933_work(struct work_struct *work)
{
struct ad5933_state *st = container_of(work,
struct ad5933_state, work.work);
struct iio_dev *indio_dev = i2c_get_clientdata(st->client);
struct iio_buffer *ring = indio_dev->buffer;
signed short buf[2];
unsigned char status;
mutex_lock(&indio_dev->mlock);
if (st->state == AD5933_CTRL_INIT_START_FREQ) {
/* start sweep */
ad5933_cmd(st, AD5933_CTRL_START_SWEEP);
st->state = AD5933_CTRL_START_SWEEP;
schedule_delayed_work(&st->work, st->poll_time_jiffies);
mutex_unlock(&indio_dev->mlock);
return;
}
ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status);
if (status & AD5933_STAT_DATA_VALID) {
int scan_count = bitmap_weight(indio_dev->active_scan_mask,
indio_dev->masklength);
ad5933_i2c_read(st->client,
test_bit(1, indio_dev->active_scan_mask) ?
AD5933_REG_REAL_DATA : AD5933_REG_IMAG_DATA,
scan_count * 2, (u8 *)buf);
if (scan_count == 2) {
buf[0] = be16_to_cpu(buf[0]);
buf[1] = be16_to_cpu(buf[1]);
} else {
buf[0] = be16_to_cpu(buf[0]);
}
/* save datum to the ring */
ring->access->store_to(ring, (u8 *)buf, iio_get_time_ns());
} else {
/* no data available - try again later */
schedule_delayed_work(&st->work, st->poll_time_jiffies);
mutex_unlock(&indio_dev->mlock);
return;
}
if (status & AD5933_STAT_SWEEP_DONE) {
/* last sample received - power down do nothing until
* the ring enable is toggled */
ad5933_cmd(st, AD5933_CTRL_POWER_DOWN);
} else {
/* we just received a valid datum, move on to the next */
ad5933_cmd(st, AD5933_CTRL_INC_FREQ);
schedule_delayed_work(&st->work, st->poll_time_jiffies);
}
mutex_unlock(&indio_dev->mlock);
}
static int __devinit ad5933_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int ret, voltage_uv = 0;
struct ad5933_platform_data *pdata = client->dev.platform_data;
struct ad5933_state *st;
struct iio_dev *indio_dev = iio_allocate_device(sizeof(*st));
if (indio_dev == NULL)
return -ENOMEM;
st = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
st->client = client;
if (!pdata)
st->pdata = &ad5933_default_pdata;
else
st->pdata = pdata;
st->reg = regulator_get(&client->dev, "vcc");
if (!IS_ERR(st->reg)) {
ret = regulator_enable(st->reg);
if (ret)
goto error_put_reg;
voltage_uv = regulator_get_voltage(st->reg);
}
if (voltage_uv)
st->vref_mv = voltage_uv / 1000;
else
st->vref_mv = st->pdata->vref_mv;
if (st->pdata->ext_clk_Hz) {
st->mclk_hz = st->pdata->ext_clk_Hz;
st->ctrl_lb = AD5933_CTRL_EXT_SYSCLK;
} else {
st->mclk_hz = AD5933_INT_OSC_FREQ_Hz;
st->ctrl_lb = AD5933_CTRL_INT_SYSCLK;
}
ad5933_calc_out_ranges(st);
INIT_DELAYED_WORK(&st->work, ad5933_work);
st->poll_time_jiffies = msecs_to_jiffies(AD5933_POLL_TIME_ms);
indio_dev->dev.parent = &client->dev;
indio_dev->info = &ad5933_info;
indio_dev->name = id->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = ad5933_channels;
indio_dev->num_channels = 1; /* only register temp0_input */
ret = ad5933_register_ring_funcs_and_init(indio_dev);
if (ret)
goto error_disable_reg;
/* skip temp0_input, register in0_(real|imag)_raw */
ret = iio_buffer_register(indio_dev, &ad5933_channels[1], 2);
if (ret)
goto error_unreg_ring;
/* enable both REAL and IMAG channels by default */
iio_scan_mask_set(indio_dev, indio_dev->buffer, 0);
iio_scan_mask_set(indio_dev, indio_dev->buffer, 1);
ret = ad5933_setup(st);
if (ret)
goto error_uninitialize_ring;
ret = iio_device_register(indio_dev);
if (ret)
goto error_uninitialize_ring;
return 0;
error_uninitialize_ring:
iio_buffer_unregister(indio_dev);
error_unreg_ring:
iio_sw_rb_free(indio_dev->buffer);
error_disable_reg:
if (!IS_ERR(st->reg))
regulator_disable(st->reg);
error_put_reg:
if (!IS_ERR(st->reg))
regulator_put(st->reg);
iio_free_device(indio_dev);
return ret;
}
static __devexit int ad5933_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
struct ad5933_state *st = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
iio_buffer_unregister(indio_dev);
iio_sw_rb_free(indio_dev->buffer);
if (!IS_ERR(st->reg)) {
regulator_disable(st->reg);
regulator_put(st->reg);
}
iio_free_device(indio_dev);
return 0;
}
static const struct i2c_device_id ad5933_id[] = {
{ "ad5933", 0 },
{ "ad5934", 0 },
{}
};
MODULE_DEVICE_TABLE(i2c, ad5933_id);
static struct i2c_driver ad5933_driver = {
.driver = {
.name = "ad5933",
},
.probe = ad5933_probe,
.remove = __devexit_p(ad5933_remove),
.id_table = ad5933_id,
};
module_i2c_driver(ad5933_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD5933 Impedance Conv. Network Analyzer");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
kbc-developers/padfone2_kernel | arch/arm/mach-imx/mach-cpuimx51.c | 4832 | 8429 | /*
*
* Copyright (C) 2010 Eric Bénard <eric@eukrea.com>
*
* based on board-mx51_babbage.c which is
* Copyright 2009 Freescale Semiconductor, Inc. All Rights Reserved.
* Copyright (C) 2009-2010 Amit Kucheria <amit.kucheria@canonical.com>
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
* Version 2 or later at the following locations:
*
* http://www.opensource.org/licenses/gpl-license.html
* http://www.gnu.org/copyleft/gpl.html
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/serial_8250.h>
#include <linux/i2c.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <mach/eukrea-baseboards.h>
#include <mach/common.h>
#include <mach/hardware.h>
#include <mach/iomux-mx51.h>
#include <asm/setup.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
#include "devices-imx51.h"
#define CPUIMX51_USBH1_STP IMX_GPIO_NR(1, 27)
#define CPUIMX51_QUARTA_GPIO IMX_GPIO_NR(3, 28)
#define CPUIMX51_QUARTB_GPIO IMX_GPIO_NR(3, 25)
#define CPUIMX51_QUARTC_GPIO IMX_GPIO_NR(3, 26)
#define CPUIMX51_QUARTD_GPIO IMX_GPIO_NR(3, 27)
#define CPUIMX51_QUART_XTAL 14745600
#define CPUIMX51_QUART_REGSHIFT 17
/* USB_CTRL_1 */
#define MX51_USB_CTRL_1_OFFSET 0x10
#define MX51_USB_CTRL_UH1_EXT_CLK_EN (1 << 25)
#define MX51_USB_PLLDIV_12_MHZ 0x00
#define MX51_USB_PLL_DIV_19_2_MHZ 0x01
#define MX51_USB_PLL_DIV_24_MHZ 0x02
static struct plat_serial8250_port serial_platform_data[] = {
{
.mapbase = (unsigned long)(MX51_CS1_BASE_ADDR + 0x400000),
.irq = IMX_GPIO_TO_IRQ(CPUIMX51_QUARTA_GPIO),
.irqflags = IRQF_TRIGGER_HIGH,
.uartclk = CPUIMX51_QUART_XTAL,
.regshift = CPUIMX51_QUART_REGSHIFT,
.iotype = UPIO_MEM,
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP,
}, {
.mapbase = (unsigned long)(MX51_CS1_BASE_ADDR + 0x800000),
.irq = IMX_GPIO_TO_IRQ(CPUIMX51_QUARTB_GPIO),
.irqflags = IRQF_TRIGGER_HIGH,
.uartclk = CPUIMX51_QUART_XTAL,
.regshift = CPUIMX51_QUART_REGSHIFT,
.iotype = UPIO_MEM,
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP,
}, {
.mapbase = (unsigned long)(MX51_CS1_BASE_ADDR + 0x1000000),
.irq = IMX_GPIO_TO_IRQ(CPUIMX51_QUARTC_GPIO),
.irqflags = IRQF_TRIGGER_HIGH,
.uartclk = CPUIMX51_QUART_XTAL,
.regshift = CPUIMX51_QUART_REGSHIFT,
.iotype = UPIO_MEM,
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP,
}, {
.mapbase = (unsigned long)(MX51_CS1_BASE_ADDR + 0x2000000),
.irq = IMX_GPIO_TO_IRQ(CPUIMX51_QUARTD_GPIO),
.irqflags = IRQF_TRIGGER_HIGH,
.uartclk = CPUIMX51_QUART_XTAL,
.regshift = CPUIMX51_QUART_REGSHIFT,
.iotype = UPIO_MEM,
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP,
}, {
}
};
static struct platform_device serial_device = {
.name = "serial8250",
.id = 0,
.dev = {
.platform_data = serial_platform_data,
},
};
static struct platform_device *devices[] __initdata = {
&serial_device,
};
static iomux_v3_cfg_t eukrea_cpuimx51_pads[] = {
/* UART1 */
MX51_PAD_UART1_RXD__UART1_RXD,
MX51_PAD_UART1_TXD__UART1_TXD,
MX51_PAD_UART1_RTS__UART1_RTS,
MX51_PAD_UART1_CTS__UART1_CTS,
/* I2C2 */
MX51_PAD_GPIO1_2__I2C2_SCL,
MX51_PAD_GPIO1_3__I2C2_SDA,
MX51_PAD_NANDF_D10__GPIO3_30,
/* QUART IRQ */
MX51_PAD_NANDF_D15__GPIO3_25,
MX51_PAD_NANDF_D14__GPIO3_26,
MX51_PAD_NANDF_D13__GPIO3_27,
MX51_PAD_NANDF_D12__GPIO3_28,
/* USB HOST1 */
MX51_PAD_USBH1_CLK__USBH1_CLK,
MX51_PAD_USBH1_DIR__USBH1_DIR,
MX51_PAD_USBH1_NXT__USBH1_NXT,
MX51_PAD_USBH1_DATA0__USBH1_DATA0,
MX51_PAD_USBH1_DATA1__USBH1_DATA1,
MX51_PAD_USBH1_DATA2__USBH1_DATA2,
MX51_PAD_USBH1_DATA3__USBH1_DATA3,
MX51_PAD_USBH1_DATA4__USBH1_DATA4,
MX51_PAD_USBH1_DATA5__USBH1_DATA5,
MX51_PAD_USBH1_DATA6__USBH1_DATA6,
MX51_PAD_USBH1_DATA7__USBH1_DATA7,
MX51_PAD_USBH1_STP__USBH1_STP,
};
static const struct mxc_nand_platform_data
eukrea_cpuimx51_nand_board_info __initconst = {
.width = 1,
.hw_ecc = 1,
.flash_bbt = 1,
};
static const struct imxuart_platform_data uart_pdata __initconst = {
.flags = IMXUART_HAVE_RTSCTS,
};
static const
struct imxi2c_platform_data eukrea_cpuimx51_i2c_data __initconst = {
.bitrate = 100000,
};
static struct i2c_board_info eukrea_cpuimx51_i2c_devices[] = {
{
I2C_BOARD_INFO("pcf8563", 0x51),
},
};
/* This function is board specific as the bit mask for the plldiv will also
be different for other Freescale SoCs, thus a common bitmask is not
possible and cannot get place in /plat-mxc/ehci.c.*/
static int initialize_otg_port(struct platform_device *pdev)
{
u32 v;
void __iomem *usb_base;
void __iomem *usbother_base;
usb_base = ioremap(MX51_USB_OTG_BASE_ADDR, SZ_4K);
if (!usb_base)
return -ENOMEM;
usbother_base = usb_base + MX5_USBOTHER_REGS_OFFSET;
/* Set the PHY clock to 19.2MHz */
v = __raw_readl(usbother_base + MXC_USB_PHY_CTR_FUNC2_OFFSET);
v &= ~MX5_USB_UTMI_PHYCTRL1_PLLDIV_MASK;
v |= MX51_USB_PLL_DIV_19_2_MHZ;
__raw_writel(v, usbother_base + MXC_USB_PHY_CTR_FUNC2_OFFSET);
iounmap(usb_base);
mdelay(10);
return mx51_initialize_usb_hw(0, MXC_EHCI_INTERNAL_PHY);
}
static int initialize_usbh1_port(struct platform_device *pdev)
{
u32 v;
void __iomem *usb_base;
void __iomem *usbother_base;
usb_base = ioremap(MX51_USB_OTG_BASE_ADDR, SZ_4K);
if (!usb_base)
return -ENOMEM;
usbother_base = usb_base + MX5_USBOTHER_REGS_OFFSET;
/* The clock for the USBH1 ULPI port will come externally from the PHY. */
v = __raw_readl(usbother_base + MX51_USB_CTRL_1_OFFSET);
__raw_writel(v | MX51_USB_CTRL_UH1_EXT_CLK_EN, usbother_base + MX51_USB_CTRL_1_OFFSET);
iounmap(usb_base);
mdelay(10);
return mx51_initialize_usb_hw(1, MXC_EHCI_POWER_PINS_ENABLED |
MXC_EHCI_ITC_NO_THRESHOLD);
}
static const struct mxc_usbh_platform_data dr_utmi_config __initconst = {
.init = initialize_otg_port,
.portsc = MXC_EHCI_UTMI_16BIT,
};
static const struct fsl_usb2_platform_data usb_pdata __initconst = {
.operating_mode = FSL_USB2_DR_DEVICE,
.phy_mode = FSL_USB2_PHY_UTMI_WIDE,
};
static const struct mxc_usbh_platform_data usbh1_config __initconst = {
.init = initialize_usbh1_port,
.portsc = MXC_EHCI_MODE_ULPI,
};
static int otg_mode_host;
static int __init eukrea_cpuimx51_otg_mode(char *options)
{
if (!strcmp(options, "host"))
otg_mode_host = 1;
else if (!strcmp(options, "device"))
otg_mode_host = 0;
else
pr_info("otg_mode neither \"host\" nor \"device\". "
"Defaulting to device\n");
return 0;
}
__setup("otg_mode=", eukrea_cpuimx51_otg_mode);
/*
* Board specific initialization.
*/
static void __init eukrea_cpuimx51_init(void)
{
imx51_soc_init();
mxc_iomux_v3_setup_multiple_pads(eukrea_cpuimx51_pads,
ARRAY_SIZE(eukrea_cpuimx51_pads));
imx51_add_imx_uart(0, &uart_pdata);
imx51_add_mxc_nand(&eukrea_cpuimx51_nand_board_info);
gpio_request(CPUIMX51_QUARTA_GPIO, "quarta_irq");
gpio_direction_input(CPUIMX51_QUARTA_GPIO);
gpio_free(CPUIMX51_QUARTA_GPIO);
gpio_request(CPUIMX51_QUARTB_GPIO, "quartb_irq");
gpio_direction_input(CPUIMX51_QUARTB_GPIO);
gpio_free(CPUIMX51_QUARTB_GPIO);
gpio_request(CPUIMX51_QUARTC_GPIO, "quartc_irq");
gpio_direction_input(CPUIMX51_QUARTC_GPIO);
gpio_free(CPUIMX51_QUARTC_GPIO);
gpio_request(CPUIMX51_QUARTD_GPIO, "quartd_irq");
gpio_direction_input(CPUIMX51_QUARTD_GPIO);
gpio_free(CPUIMX51_QUARTD_GPIO);
imx51_add_fec(NULL);
platform_add_devices(devices, ARRAY_SIZE(devices));
imx51_add_imx_i2c(1, &eukrea_cpuimx51_i2c_data);
i2c_register_board_info(1, eukrea_cpuimx51_i2c_devices,
ARRAY_SIZE(eukrea_cpuimx51_i2c_devices));
if (otg_mode_host)
imx51_add_mxc_ehci_otg(&dr_utmi_config);
else {
initialize_otg_port(NULL);
imx51_add_fsl_usb2_udc(&usb_pdata);
}
imx51_add_mxc_ehci_hs(1, &usbh1_config);
#ifdef CONFIG_MACH_EUKREA_MBIMX51_BASEBOARD
eukrea_mbimx51_baseboard_init();
#endif
}
static void __init eukrea_cpuimx51_timer_init(void)
{
mx51_clocks_init(32768, 24000000, 22579200, 0);
}
static struct sys_timer mxc_timer = {
.init = eukrea_cpuimx51_timer_init,
};
MACHINE_START(EUKREA_CPUIMX51, "Eukrea CPUIMX51 Module")
/* Maintainer: Eric Bénard <eric@eukrea.com> */
.atag_offset = 0x100,
.map_io = mx51_map_io,
.init_early = imx51_init_early,
.init_irq = mx51_init_irq,
.handle_irq = imx51_handle_irq,
.timer = &mxc_timer,
.init_machine = eukrea_cpuimx51_init,
.restart = mxc_restart,
MACHINE_END
| gpl-2.0 |
Droid-Concepts/kernel_samsung_jf | net/sunrpc/xprtrdma/rpc_rdma.c | 5088 | 28365 | /*
* Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the BSD-type
* license below:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* Neither the name of the Network Appliance, Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* rpc_rdma.c
*
* This file contains the guts of the RPC RDMA protocol, and
* does marshaling/unmarshaling, etc. It is also where interfacing
* to the Linux RPC framework lives.
*/
#include "xprt_rdma.h"
#include <linux/highmem.h>
#ifdef RPC_DEBUG
# define RPCDBG_FACILITY RPCDBG_TRANS
#endif
enum rpcrdma_chunktype {
rpcrdma_noch = 0,
rpcrdma_readch,
rpcrdma_areadch,
rpcrdma_writech,
rpcrdma_replych
};
#ifdef RPC_DEBUG
static const char transfertypes[][12] = {
"pure inline", /* no chunks */
" read chunk", /* some argument via rdma read */
"*read chunk", /* entire request via rdma read */
"write chunk", /* some result via rdma write */
"reply chunk" /* entire reply via rdma write */
};
#endif
/*
* Chunk assembly from upper layer xdr_buf.
*
* Prepare the passed-in xdr_buf into representation as RPC/RDMA chunk
* elements. Segments are then coalesced when registered, if possible
* within the selected memreg mode.
*
* Note, this routine is never called if the connection's memory
* registration strategy is 0 (bounce buffers).
*/
static int
rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos,
enum rpcrdma_chunktype type, struct rpcrdma_mr_seg *seg, int nsegs)
{
int len, n = 0, p;
int page_base;
struct page **ppages;
if (pos == 0 && xdrbuf->head[0].iov_len) {
seg[n].mr_page = NULL;
seg[n].mr_offset = xdrbuf->head[0].iov_base;
seg[n].mr_len = xdrbuf->head[0].iov_len;
++n;
}
len = xdrbuf->page_len;
ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
page_base = xdrbuf->page_base & ~PAGE_MASK;
p = 0;
while (len && n < nsegs) {
seg[n].mr_page = ppages[p];
seg[n].mr_offset = (void *)(unsigned long) page_base;
seg[n].mr_len = min_t(u32, PAGE_SIZE - page_base, len);
BUG_ON(seg[n].mr_len > PAGE_SIZE);
len -= seg[n].mr_len;
++n;
++p;
page_base = 0; /* page offset only applies to first page */
}
/* Message overflows the seg array */
if (len && n == nsegs)
return 0;
if (xdrbuf->tail[0].iov_len) {
/* the rpcrdma protocol allows us to omit any trailing
* xdr pad bytes, saving the server an RDMA operation. */
if (xdrbuf->tail[0].iov_len < 4 && xprt_rdma_pad_optimize)
return n;
if (n == nsegs)
/* Tail remains, but we're out of segments */
return 0;
seg[n].mr_page = NULL;
seg[n].mr_offset = xdrbuf->tail[0].iov_base;
seg[n].mr_len = xdrbuf->tail[0].iov_len;
++n;
}
return n;
}
/*
* Create read/write chunk lists, and reply chunks, for RDMA
*
* Assume check against THRESHOLD has been done, and chunks are required.
* Assume only encoding one list entry for read|write chunks. The NFSv3
* protocol is simple enough to allow this as it only has a single "bulk
* result" in each procedure - complicated NFSv4 COMPOUNDs are not. (The
* RDMA/Sessions NFSv4 proposal addresses this for future v4 revs.)
*
* When used for a single reply chunk (which is a special write
* chunk used for the entire reply, rather than just the data), it
* is used primarily for READDIR and READLINK which would otherwise
* be severely size-limited by a small rdma inline read max. The server
* response will come back as an RDMA Write, followed by a message
* of type RDMA_NOMSG carrying the xid and length. As a result, reply
* chunks do not provide data alignment, however they do not require
* "fixup" (moving the response to the upper layer buffer) either.
*
* Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
*
* Read chunklist (a linked list):
* N elements, position P (same P for all chunks of same arg!):
* 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
*
* Write chunklist (a list of (one) counted array):
* N elements:
* 1 - N - HLOO - HLOO - ... - HLOO - 0
*
* Reply chunk (a counted array):
* N elements:
* 1 - N - HLOO - HLOO - ... - HLOO
*/
static unsigned int
rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
struct rpcrdma_msg *headerp, enum rpcrdma_chunktype type)
{
struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_task->tk_xprt);
int nsegs, nchunks = 0;
unsigned int pos;
struct rpcrdma_mr_seg *seg = req->rl_segments;
struct rpcrdma_read_chunk *cur_rchunk = NULL;
struct rpcrdma_write_array *warray = NULL;
struct rpcrdma_write_chunk *cur_wchunk = NULL;
__be32 *iptr = headerp->rm_body.rm_chunks;
if (type == rpcrdma_readch || type == rpcrdma_areadch) {
/* a read chunk - server will RDMA Read our memory */
cur_rchunk = (struct rpcrdma_read_chunk *) iptr;
} else {
/* a write or reply chunk - server will RDMA Write our memory */
*iptr++ = xdr_zero; /* encode a NULL read chunk list */
if (type == rpcrdma_replych)
*iptr++ = xdr_zero; /* a NULL write chunk list */
warray = (struct rpcrdma_write_array *) iptr;
cur_wchunk = (struct rpcrdma_write_chunk *) (warray + 1);
}
if (type == rpcrdma_replych || type == rpcrdma_areadch)
pos = 0;
else
pos = target->head[0].iov_len;
nsegs = rpcrdma_convert_iovs(target, pos, type, seg, RPCRDMA_MAX_SEGS);
if (nsegs == 0)
return 0;
do {
/* bind/register the memory, then build chunk from result. */
int n = rpcrdma_register_external(seg, nsegs,
cur_wchunk != NULL, r_xprt);
if (n <= 0)
goto out;
if (cur_rchunk) { /* read */
cur_rchunk->rc_discrim = xdr_one;
/* all read chunks have the same "position" */
cur_rchunk->rc_position = htonl(pos);
cur_rchunk->rc_target.rs_handle = htonl(seg->mr_rkey);
cur_rchunk->rc_target.rs_length = htonl(seg->mr_len);
xdr_encode_hyper(
(__be32 *)&cur_rchunk->rc_target.rs_offset,
seg->mr_base);
dprintk("RPC: %s: read chunk "
"elem %d@0x%llx:0x%x pos %u (%s)\n", __func__,
seg->mr_len, (unsigned long long)seg->mr_base,
seg->mr_rkey, pos, n < nsegs ? "more" : "last");
cur_rchunk++;
r_xprt->rx_stats.read_chunk_count++;
} else { /* write/reply */
cur_wchunk->wc_target.rs_handle = htonl(seg->mr_rkey);
cur_wchunk->wc_target.rs_length = htonl(seg->mr_len);
xdr_encode_hyper(
(__be32 *)&cur_wchunk->wc_target.rs_offset,
seg->mr_base);
dprintk("RPC: %s: %s chunk "
"elem %d@0x%llx:0x%x (%s)\n", __func__,
(type == rpcrdma_replych) ? "reply" : "write",
seg->mr_len, (unsigned long long)seg->mr_base,
seg->mr_rkey, n < nsegs ? "more" : "last");
cur_wchunk++;
if (type == rpcrdma_replych)
r_xprt->rx_stats.reply_chunk_count++;
else
r_xprt->rx_stats.write_chunk_count++;
r_xprt->rx_stats.total_rdma_request += seg->mr_len;
}
nchunks++;
seg += n;
nsegs -= n;
} while (nsegs);
/* success. all failures return above */
req->rl_nchunks = nchunks;
BUG_ON(nchunks == 0);
BUG_ON((r_xprt->rx_ia.ri_memreg_strategy == RPCRDMA_FRMR)
&& (nchunks > 3));
/*
* finish off header. If write, marshal discrim and nchunks.
*/
if (cur_rchunk) {
iptr = (__be32 *) cur_rchunk;
*iptr++ = xdr_zero; /* finish the read chunk list */
*iptr++ = xdr_zero; /* encode a NULL write chunk list */
*iptr++ = xdr_zero; /* encode a NULL reply chunk */
} else {
warray->wc_discrim = xdr_one;
warray->wc_nchunks = htonl(nchunks);
iptr = (__be32 *) cur_wchunk;
if (type == rpcrdma_writech) {
*iptr++ = xdr_zero; /* finish the write chunk list */
*iptr++ = xdr_zero; /* encode a NULL reply chunk */
}
}
/*
* Return header size.
*/
return (unsigned char *)iptr - (unsigned char *)headerp;
out:
for (pos = 0; nchunks--;)
pos += rpcrdma_deregister_external(
&req->rl_segments[pos], r_xprt, NULL);
return 0;
}
/*
* Copy write data inline.
* This function is used for "small" requests. Data which is passed
* to RPC via iovecs (or page list) is copied directly into the
* pre-registered memory buffer for this request. For small amounts
* of data, this is efficient. The cutoff value is tunable.
*/
static int
rpcrdma_inline_pullup(struct rpc_rqst *rqst, int pad)
{
int i, npages, curlen;
int copy_len;
unsigned char *srcp, *destp;
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
int page_base;
struct page **ppages;
destp = rqst->rq_svec[0].iov_base;
curlen = rqst->rq_svec[0].iov_len;
destp += curlen;
/*
* Do optional padding where it makes sense. Alignment of write
* payload can help the server, if our setting is accurate.
*/
pad -= (curlen + 36/*sizeof(struct rpcrdma_msg_padded)*/);
if (pad < 0 || rqst->rq_slen - curlen < RPCRDMA_INLINE_PAD_THRESH)
pad = 0; /* don't pad this request */
dprintk("RPC: %s: pad %d destp 0x%p len %d hdrlen %d\n",
__func__, pad, destp, rqst->rq_slen, curlen);
copy_len = rqst->rq_snd_buf.page_len;
if (rqst->rq_snd_buf.tail[0].iov_len) {
curlen = rqst->rq_snd_buf.tail[0].iov_len;
if (destp + copy_len != rqst->rq_snd_buf.tail[0].iov_base) {
memmove(destp + copy_len,
rqst->rq_snd_buf.tail[0].iov_base, curlen);
r_xprt->rx_stats.pullup_copy_count += curlen;
}
dprintk("RPC: %s: tail destp 0x%p len %d\n",
__func__, destp + copy_len, curlen);
rqst->rq_svec[0].iov_len += curlen;
}
r_xprt->rx_stats.pullup_copy_count += copy_len;
page_base = rqst->rq_snd_buf.page_base;
ppages = rqst->rq_snd_buf.pages + (page_base >> PAGE_SHIFT);
page_base &= ~PAGE_MASK;
npages = PAGE_ALIGN(page_base+copy_len) >> PAGE_SHIFT;
for (i = 0; copy_len && i < npages; i++) {
curlen = PAGE_SIZE - page_base;
if (curlen > copy_len)
curlen = copy_len;
dprintk("RPC: %s: page %d destp 0x%p len %d curlen %d\n",
__func__, i, destp, copy_len, curlen);
srcp = kmap_atomic(ppages[i]);
memcpy(destp, srcp+page_base, curlen);
kunmap_atomic(srcp);
rqst->rq_svec[0].iov_len += curlen;
destp += curlen;
copy_len -= curlen;
page_base = 0;
}
/* header now contains entire send message */
return pad;
}
/*
* Marshal a request: the primary job of this routine is to choose
* the transfer modes. See comments below.
*
* Uses multiple RDMA IOVs for a request:
* [0] -- RPC RDMA header, which uses memory from the *start* of the
* preregistered buffer that already holds the RPC data in
* its middle.
* [1] -- the RPC header/data, marshaled by RPC and the NFS protocol.
* [2] -- optional padding.
* [3] -- if padded, header only in [1] and data here.
*/
int
rpcrdma_marshal_req(struct rpc_rqst *rqst)
{
struct rpc_xprt *xprt = rqst->rq_task->tk_xprt;
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
char *base;
size_t hdrlen, rpclen, padlen;
enum rpcrdma_chunktype rtype, wtype;
struct rpcrdma_msg *headerp;
/*
* rpclen gets amount of data in first buffer, which is the
* pre-registered buffer.
*/
base = rqst->rq_svec[0].iov_base;
rpclen = rqst->rq_svec[0].iov_len;
/* build RDMA header in private area at front */
headerp = (struct rpcrdma_msg *) req->rl_base;
/* don't htonl XID, it's already done in request */
headerp->rm_xid = rqst->rq_xid;
headerp->rm_vers = xdr_one;
headerp->rm_credit = htonl(r_xprt->rx_buf.rb_max_requests);
headerp->rm_type = htonl(RDMA_MSG);
/*
* Chunks needed for results?
*
* o If the expected result is under the inline threshold, all ops
* return as inline (but see later).
* o Large non-read ops return as a single reply chunk.
* o Large read ops return data as write chunk(s), header as inline.
*
* Note: the NFS code sending down multiple result segments implies
* the op is one of read, readdir[plus], readlink or NFSv4 getacl.
*/
/*
* This code can handle read chunks, write chunks OR reply
* chunks -- only one type. If the request is too big to fit
* inline, then we will choose read chunks. If the request is
* a READ, then use write chunks to separate the file data
* into pages; otherwise use reply chunks.
*/
if (rqst->rq_rcv_buf.buflen <= RPCRDMA_INLINE_READ_THRESHOLD(rqst))
wtype = rpcrdma_noch;
else if (rqst->rq_rcv_buf.page_len == 0)
wtype = rpcrdma_replych;
else if (rqst->rq_rcv_buf.flags & XDRBUF_READ)
wtype = rpcrdma_writech;
else
wtype = rpcrdma_replych;
/*
* Chunks needed for arguments?
*
* o If the total request is under the inline threshold, all ops
* are sent as inline.
* o Large non-write ops are sent with the entire message as a
* single read chunk (protocol 0-position special case).
* o Large write ops transmit data as read chunk(s), header as
* inline.
*
* Note: the NFS code sending down multiple argument segments
* implies the op is a write.
* TBD check NFSv4 setacl
*/
if (rqst->rq_snd_buf.len <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst))
rtype = rpcrdma_noch;
else if (rqst->rq_snd_buf.page_len == 0)
rtype = rpcrdma_areadch;
else
rtype = rpcrdma_readch;
/* The following simplification is not true forever */
if (rtype != rpcrdma_noch && wtype == rpcrdma_replych)
wtype = rpcrdma_noch;
BUG_ON(rtype != rpcrdma_noch && wtype != rpcrdma_noch);
if (r_xprt->rx_ia.ri_memreg_strategy == RPCRDMA_BOUNCEBUFFERS &&
(rtype != rpcrdma_noch || wtype != rpcrdma_noch)) {
/* forced to "pure inline"? */
dprintk("RPC: %s: too much data (%d/%d) for inline\n",
__func__, rqst->rq_rcv_buf.len, rqst->rq_snd_buf.len);
return -1;
}
hdrlen = 28; /*sizeof *headerp;*/
padlen = 0;
/*
* Pull up any extra send data into the preregistered buffer.
* When padding is in use and applies to the transfer, insert
* it and change the message type.
*/
if (rtype == rpcrdma_noch) {
padlen = rpcrdma_inline_pullup(rqst,
RPCRDMA_INLINE_PAD_VALUE(rqst));
if (padlen) {
headerp->rm_type = htonl(RDMA_MSGP);
headerp->rm_body.rm_padded.rm_align =
htonl(RPCRDMA_INLINE_PAD_VALUE(rqst));
headerp->rm_body.rm_padded.rm_thresh =
htonl(RPCRDMA_INLINE_PAD_THRESH);
headerp->rm_body.rm_padded.rm_pempty[0] = xdr_zero;
headerp->rm_body.rm_padded.rm_pempty[1] = xdr_zero;
headerp->rm_body.rm_padded.rm_pempty[2] = xdr_zero;
hdrlen += 2 * sizeof(u32); /* extra words in padhdr */
BUG_ON(wtype != rpcrdma_noch);
} else {
headerp->rm_body.rm_nochunks.rm_empty[0] = xdr_zero;
headerp->rm_body.rm_nochunks.rm_empty[1] = xdr_zero;
headerp->rm_body.rm_nochunks.rm_empty[2] = xdr_zero;
/* new length after pullup */
rpclen = rqst->rq_svec[0].iov_len;
/*
* Currently we try to not actually use read inline.
* Reply chunks have the desirable property that
* they land, packed, directly in the target buffers
* without headers, so they require no fixup. The
* additional RDMA Write op sends the same amount
* of data, streams on-the-wire and adds no overhead
* on receive. Therefore, we request a reply chunk
* for non-writes wherever feasible and efficient.
*/
if (wtype == rpcrdma_noch &&
r_xprt->rx_ia.ri_memreg_strategy > RPCRDMA_REGISTER)
wtype = rpcrdma_replych;
}
}
/*
* Marshal chunks. This routine will return the header length
* consumed by marshaling.
*/
if (rtype != rpcrdma_noch) {
hdrlen = rpcrdma_create_chunks(rqst,
&rqst->rq_snd_buf, headerp, rtype);
wtype = rtype; /* simplify dprintk */
} else if (wtype != rpcrdma_noch) {
hdrlen = rpcrdma_create_chunks(rqst,
&rqst->rq_rcv_buf, headerp, wtype);
}
if (hdrlen == 0)
return -1;
dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd padlen %zd"
" headerp 0x%p base 0x%p lkey 0x%x\n",
__func__, transfertypes[wtype], hdrlen, rpclen, padlen,
headerp, base, req->rl_iov.lkey);
/*
* initialize send_iov's - normally only two: rdma chunk header and
* single preregistered RPC header buffer, but if padding is present,
* then use a preregistered (and zeroed) pad buffer between the RPC
* header and any write data. In all non-rdma cases, any following
* data has been copied into the RPC header buffer.
*/
req->rl_send_iov[0].addr = req->rl_iov.addr;
req->rl_send_iov[0].length = hdrlen;
req->rl_send_iov[0].lkey = req->rl_iov.lkey;
req->rl_send_iov[1].addr = req->rl_iov.addr + (base - req->rl_base);
req->rl_send_iov[1].length = rpclen;
req->rl_send_iov[1].lkey = req->rl_iov.lkey;
req->rl_niovs = 2;
if (padlen) {
struct rpcrdma_ep *ep = &r_xprt->rx_ep;
req->rl_send_iov[2].addr = ep->rep_pad.addr;
req->rl_send_iov[2].length = padlen;
req->rl_send_iov[2].lkey = ep->rep_pad.lkey;
req->rl_send_iov[3].addr = req->rl_send_iov[1].addr + rpclen;
req->rl_send_iov[3].length = rqst->rq_slen - rpclen;
req->rl_send_iov[3].lkey = req->rl_iov.lkey;
req->rl_niovs = 4;
}
return 0;
}
/*
* Chase down a received write or reply chunklist to get length
* RDMA'd by server. See map at rpcrdma_create_chunks()! :-)
*/
static int
rpcrdma_count_chunks(struct rpcrdma_rep *rep, unsigned int max, int wrchunk, __be32 **iptrp)
{
unsigned int i, total_len;
struct rpcrdma_write_chunk *cur_wchunk;
i = ntohl(**iptrp); /* get array count */
if (i > max)
return -1;
cur_wchunk = (struct rpcrdma_write_chunk *) (*iptrp + 1);
total_len = 0;
while (i--) {
struct rpcrdma_segment *seg = &cur_wchunk->wc_target;
ifdebug(FACILITY) {
u64 off;
xdr_decode_hyper((__be32 *)&seg->rs_offset, &off);
dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n",
__func__,
ntohl(seg->rs_length),
(unsigned long long)off,
ntohl(seg->rs_handle));
}
total_len += ntohl(seg->rs_length);
++cur_wchunk;
}
/* check and adjust for properly terminated write chunk */
if (wrchunk) {
__be32 *w = (__be32 *) cur_wchunk;
if (*w++ != xdr_zero)
return -1;
cur_wchunk = (struct rpcrdma_write_chunk *) w;
}
if ((char *) cur_wchunk > rep->rr_base + rep->rr_len)
return -1;
*iptrp = (__be32 *) cur_wchunk;
return total_len;
}
/*
* Scatter inline received data back into provided iov's.
*/
static void
rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
{
int i, npages, curlen, olen;
char *destp;
struct page **ppages;
int page_base;
curlen = rqst->rq_rcv_buf.head[0].iov_len;
if (curlen > copy_len) { /* write chunk header fixup */
curlen = copy_len;
rqst->rq_rcv_buf.head[0].iov_len = curlen;
}
dprintk("RPC: %s: srcp 0x%p len %d hdrlen %d\n",
__func__, srcp, copy_len, curlen);
/* Shift pointer for first receive segment only */
rqst->rq_rcv_buf.head[0].iov_base = srcp;
srcp += curlen;
copy_len -= curlen;
olen = copy_len;
i = 0;
rpcx_to_rdmax(rqst->rq_xprt)->rx_stats.fixup_copy_count += olen;
page_base = rqst->rq_rcv_buf.page_base;
ppages = rqst->rq_rcv_buf.pages + (page_base >> PAGE_SHIFT);
page_base &= ~PAGE_MASK;
if (copy_len && rqst->rq_rcv_buf.page_len) {
npages = PAGE_ALIGN(page_base +
rqst->rq_rcv_buf.page_len) >> PAGE_SHIFT;
for (; i < npages; i++) {
curlen = PAGE_SIZE - page_base;
if (curlen > copy_len)
curlen = copy_len;
dprintk("RPC: %s: page %d"
" srcp 0x%p len %d curlen %d\n",
__func__, i, srcp, copy_len, curlen);
destp = kmap_atomic(ppages[i]);
memcpy(destp + page_base, srcp, curlen);
flush_dcache_page(ppages[i]);
kunmap_atomic(destp);
srcp += curlen;
copy_len -= curlen;
if (copy_len == 0)
break;
page_base = 0;
}
rqst->rq_rcv_buf.page_len = olen - copy_len;
} else
rqst->rq_rcv_buf.page_len = 0;
if (copy_len && rqst->rq_rcv_buf.tail[0].iov_len) {
curlen = copy_len;
if (curlen > rqst->rq_rcv_buf.tail[0].iov_len)
curlen = rqst->rq_rcv_buf.tail[0].iov_len;
if (rqst->rq_rcv_buf.tail[0].iov_base != srcp)
memmove(rqst->rq_rcv_buf.tail[0].iov_base, srcp, curlen);
dprintk("RPC: %s: tail srcp 0x%p len %d curlen %d\n",
__func__, srcp, copy_len, curlen);
rqst->rq_rcv_buf.tail[0].iov_len = curlen;
copy_len -= curlen; ++i;
} else
rqst->rq_rcv_buf.tail[0].iov_len = 0;
if (pad) {
/* implicit padding on terminal chunk */
unsigned char *p = rqst->rq_rcv_buf.tail[0].iov_base;
while (pad--)
p[rqst->rq_rcv_buf.tail[0].iov_len++] = 0;
}
if (copy_len)
dprintk("RPC: %s: %d bytes in"
" %d extra segments (%d lost)\n",
__func__, olen, i, copy_len);
/* TBD avoid a warning from call_decode() */
rqst->rq_private_buf = rqst->rq_rcv_buf;
}
/*
* This function is called when an async event is posted to
* the connection which changes the connection state. All it
* does at this point is mark the connection up/down, the rpc
* timers do the rest.
*/
void
rpcrdma_conn_func(struct rpcrdma_ep *ep)
{
struct rpc_xprt *xprt = ep->rep_xprt;
spin_lock_bh(&xprt->transport_lock);
if (++xprt->connect_cookie == 0) /* maintain a reserved value */
++xprt->connect_cookie;
if (ep->rep_connected > 0) {
if (!xprt_test_and_set_connected(xprt))
xprt_wake_pending_tasks(xprt, 0);
} else {
if (xprt_test_and_clear_connected(xprt))
xprt_wake_pending_tasks(xprt, -ENOTCONN);
}
spin_unlock_bh(&xprt->transport_lock);
}
/*
* This function is called when memory window unbind which we are waiting
* for completes. Just use rr_func (zeroed by upcall) to signal completion.
*/
static void
rpcrdma_unbind_func(struct rpcrdma_rep *rep)
{
wake_up(&rep->rr_unbind);
}
/*
* Called as a tasklet to do req/reply match and complete a request
* Errors must result in the RPC task either being awakened, or
* allowed to timeout, to discover the errors at that time.
*/
void
rpcrdma_reply_handler(struct rpcrdma_rep *rep)
{
struct rpcrdma_msg *headerp;
struct rpcrdma_req *req;
struct rpc_rqst *rqst;
struct rpc_xprt *xprt = rep->rr_xprt;
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
__be32 *iptr;
int i, rdmalen, status;
/* Check status. If bad, signal disconnect and return rep to pool */
if (rep->rr_len == ~0U) {
rpcrdma_recv_buffer_put(rep);
if (r_xprt->rx_ep.rep_connected == 1) {
r_xprt->rx_ep.rep_connected = -EIO;
rpcrdma_conn_func(&r_xprt->rx_ep);
}
return;
}
if (rep->rr_len < 28) {
dprintk("RPC: %s: short/invalid reply\n", __func__);
goto repost;
}
headerp = (struct rpcrdma_msg *) rep->rr_base;
if (headerp->rm_vers != xdr_one) {
dprintk("RPC: %s: invalid version %d\n",
__func__, ntohl(headerp->rm_vers));
goto repost;
}
/* Get XID and try for a match. */
spin_lock(&xprt->transport_lock);
rqst = xprt_lookup_rqst(xprt, headerp->rm_xid);
if (rqst == NULL) {
spin_unlock(&xprt->transport_lock);
dprintk("RPC: %s: reply 0x%p failed "
"to match any request xid 0x%08x len %d\n",
__func__, rep, headerp->rm_xid, rep->rr_len);
repost:
r_xprt->rx_stats.bad_reply_count++;
rep->rr_func = rpcrdma_reply_handler;
if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, &r_xprt->rx_ep, rep))
rpcrdma_recv_buffer_put(rep);
return;
}
/* get request object */
req = rpcr_to_rdmar(rqst);
if (req->rl_reply) {
spin_unlock(&xprt->transport_lock);
dprintk("RPC: %s: duplicate reply 0x%p to RPC "
"request 0x%p: xid 0x%08x\n", __func__, rep, req,
headerp->rm_xid);
goto repost;
}
dprintk("RPC: %s: reply 0x%p completes request 0x%p\n"
" RPC request 0x%p xid 0x%08x\n",
__func__, rep, req, rqst, headerp->rm_xid);
/* from here on, the reply is no longer an orphan */
req->rl_reply = rep;
/* check for expected message types */
/* The order of some of these tests is important. */
switch (headerp->rm_type) {
case htonl(RDMA_MSG):
/* never expect read chunks */
/* never expect reply chunks (two ways to check) */
/* never expect write chunks without having offered RDMA */
if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
(headerp->rm_body.rm_chunks[1] == xdr_zero &&
headerp->rm_body.rm_chunks[2] != xdr_zero) ||
(headerp->rm_body.rm_chunks[1] != xdr_zero &&
req->rl_nchunks == 0))
goto badheader;
if (headerp->rm_body.rm_chunks[1] != xdr_zero) {
/* count any expected write chunks in read reply */
/* start at write chunk array count */
iptr = &headerp->rm_body.rm_chunks[2];
rdmalen = rpcrdma_count_chunks(rep,
req->rl_nchunks, 1, &iptr);
/* check for validity, and no reply chunk after */
if (rdmalen < 0 || *iptr++ != xdr_zero)
goto badheader;
rep->rr_len -=
((unsigned char *)iptr - (unsigned char *)headerp);
status = rep->rr_len + rdmalen;
r_xprt->rx_stats.total_rdma_reply += rdmalen;
/* special case - last chunk may omit padding */
if (rdmalen &= 3) {
rdmalen = 4 - rdmalen;
status += rdmalen;
}
} else {
/* else ordinary inline */
rdmalen = 0;
iptr = (__be32 *)((unsigned char *)headerp + 28);
rep->rr_len -= 28; /*sizeof *headerp;*/
status = rep->rr_len;
}
/* Fix up the rpc results for upper layer */
rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len, rdmalen);
break;
case htonl(RDMA_NOMSG):
/* never expect read or write chunks, always reply chunks */
if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
headerp->rm_body.rm_chunks[1] != xdr_zero ||
headerp->rm_body.rm_chunks[2] != xdr_one ||
req->rl_nchunks == 0)
goto badheader;
iptr = (__be32 *)((unsigned char *)headerp + 28);
rdmalen = rpcrdma_count_chunks(rep, req->rl_nchunks, 0, &iptr);
if (rdmalen < 0)
goto badheader;
r_xprt->rx_stats.total_rdma_reply += rdmalen;
/* Reply chunk buffer already is the reply vector - no fixup. */
status = rdmalen;
break;
badheader:
default:
dprintk("%s: invalid rpcrdma reply header (type %d):"
" chunks[012] == %d %d %d"
" expected chunks <= %d\n",
__func__, ntohl(headerp->rm_type),
headerp->rm_body.rm_chunks[0],
headerp->rm_body.rm_chunks[1],
headerp->rm_body.rm_chunks[2],
req->rl_nchunks);
status = -EIO;
r_xprt->rx_stats.bad_reply_count++;
break;
}
/* If using mw bind, start the deregister process now. */
/* (Note: if mr_free(), cannot perform it here, in tasklet context) */
if (req->rl_nchunks) switch (r_xprt->rx_ia.ri_memreg_strategy) {
case RPCRDMA_MEMWINDOWS:
for (i = 0; req->rl_nchunks-- > 1;)
i += rpcrdma_deregister_external(
&req->rl_segments[i], r_xprt, NULL);
/* Optionally wait (not here) for unbinds to complete */
rep->rr_func = rpcrdma_unbind_func;
(void) rpcrdma_deregister_external(&req->rl_segments[i],
r_xprt, rep);
break;
case RPCRDMA_MEMWINDOWS_ASYNC:
for (i = 0; req->rl_nchunks--;)
i += rpcrdma_deregister_external(&req->rl_segments[i],
r_xprt, NULL);
break;
default:
break;
}
dprintk("RPC: %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n",
__func__, xprt, rqst, status);
xprt_complete_rqst(rqst->rq_task, status);
spin_unlock(&xprt->transport_lock);
}
| gpl-2.0 |
TeamGlide/android_kernel_htc_msm7x30 | drivers/scsi/NCR5380.c | 7648 | 93330 | /*
* NCR 5380 generic driver routines. These should make it *trivial*
* to implement 5380 SCSI drivers under Linux with a non-trantor
* architecture.
*
* Note that these routines also work with NR53c400 family chips.
*
* Copyright 1993, Drew Eckhardt
* Visionary Computing
* (Unix and Linux consulting and custom programming)
* drew@colorado.edu
* +1 (303) 666-5836
*
* DISTRIBUTION RELEASE 6.
*
* For more information, please consult
*
* NCR 5380 Family
* SCSI Protocol Controller
* Databook
*
* NCR Microelectronics
* 1635 Aeroplaza Drive
* Colorado Springs, CO 80916
* 1+ (719) 578-3400
* 1+ (800) 334-5454
*/
/*
* $Log: NCR5380.c,v $
* Revision 1.10 1998/9/2 Alan Cox
* (alan@lxorguk.ukuu.org.uk)
* Fixed up the timer lockups reported so far. Things still suck. Looking
* forward to 2.3 and per device request queues. Then it'll be possible to
* SMP thread this beast and improve life no end.
* Revision 1.9 1997/7/27 Ronald van Cuijlenborg
* (ronald.van.cuijlenborg@tip.nl or nutty@dds.nl)
* (hopefully) fixed and enhanced USLEEP
* added support for DTC3181E card (for Mustek scanner)
*
* Revision 1.8 Ingmar Baumgart
* (ingmar@gonzo.schwaben.de)
* added support for NCR53C400a card
*
* Revision 1.7 1996/3/2 Ray Van Tassle (rayvt@comm.mot.com)
* added proc_info
* added support needed for DTC 3180/3280
* fixed a couple of bugs
*
* Revision 1.5 1994/01/19 09:14:57 drew
* Fixed udelay() hack that was being used on DATAOUT phases
* instead of a proper wait for the final handshake.
*
* Revision 1.4 1994/01/19 06:44:25 drew
* *** empty log message ***
*
* Revision 1.3 1994/01/19 05:24:40 drew
* Added support for TCR LAST_BYTE_SENT bit.
*
* Revision 1.2 1994/01/15 06:14:11 drew
* REAL DMA support, bug fixes.
*
* Revision 1.1 1994/01/15 06:00:54 drew
* Initial revision
*
*/
/*
* Further development / testing that should be done :
* 1. Cleanup the NCR5380_transfer_dma function and DMA operation complete
* code so that everything does the same thing that's done at the
* end of a pseudo-DMA read operation.
*
* 2. Fix REAL_DMA (interrupt driven, polled works fine) -
* basically, transfer size needs to be reduced by one
* and the last byte read as is done with PSEUDO_DMA.
*
* 4. Test SCSI-II tagged queueing (I have no devices which support
* tagged queueing)
*
* 5. Test linked command handling code after Eric is ready with
* the high level code.
*/
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_transport_spi.h>
#ifndef NDEBUG
#define NDEBUG 0
#endif
#ifndef NDEBUG_ABORT
#define NDEBUG_ABORT 0
#endif
#if (NDEBUG & NDEBUG_LISTS)
#define LIST(x,y) {printk("LINE:%d Adding %p to %p\n", __LINE__, (void*)(x), (void*)(y)); if ((x)==(y)) udelay(5); }
#define REMOVE(w,x,y,z) {printk("LINE:%d Removing: %p->%p %p->%p \n", __LINE__, (void*)(w), (void*)(x), (void*)(y), (void*)(z)); if ((x)==(y)) udelay(5); }
#else
#define LIST(x,y)
#define REMOVE(w,x,y,z)
#endif
#ifndef notyet
#undef LINKED
#undef REAL_DMA
#endif
#ifdef REAL_DMA_POLL
#undef READ_OVERRUNS
#define READ_OVERRUNS
#endif
#ifdef BOARD_REQUIRES_NO_DELAY
#define io_recovery_delay(x)
#else
#define io_recovery_delay(x) udelay(x)
#endif
/*
* Design
*
* This is a generic 5380 driver. To use it on a different platform,
* one simply writes appropriate system specific macros (ie, data
* transfer - some PC's will use the I/O bus, 68K's must use
* memory mapped) and drops this file in their 'C' wrapper.
*
* (Note from hch: unfortunately it was not enough for the different
* m68k folks and instead of improving this driver they copied it
* and hacked it up for their needs. As a consequence they lost
* most updates to this driver. Maybe someone will fix all these
* drivers to use a common core one day..)
*
* As far as command queueing, two queues are maintained for
* each 5380 in the system - commands that haven't been issued yet,
* and commands that are currently executing. This means that an
* unlimited number of commands may be queued, letting
* more commands propagate from the higher driver levels giving higher
* throughput. Note that both I_T_L and I_T_L_Q nexuses are supported,
* allowing multiple commands to propagate all the way to a SCSI-II device
* while a command is already executing.
*
*
* Issues specific to the NCR5380 :
*
* When used in a PIO or pseudo-dma mode, the NCR5380 is a braindead
* piece of hardware that requires you to sit in a loop polling for
* the REQ signal as long as you are connected. Some devices are
* brain dead (ie, many TEXEL CD ROM drives) and won't disconnect
* while doing long seek operations.
*
* The workaround for this is to keep track of devices that have
* disconnected. If the device hasn't disconnected, for commands that
* should disconnect, we do something like
*
* while (!REQ is asserted) { sleep for N usecs; poll for M usecs }
*
* Some tweaking of N and M needs to be done. An algorithm based
* on "time to data" would give the best results as long as short time
* to datas (ie, on the same track) were considered, however these
* broken devices are the exception rather than the rule and I'd rather
* spend my time optimizing for the normal case.
*
* Architecture :
*
* At the heart of the design is a coroutine, NCR5380_main,
* which is started from a workqueue for each NCR5380 host in the
* system. It attempts to establish I_T_L or I_T_L_Q nexuses by
* removing the commands from the issue queue and calling
* NCR5380_select() if a nexus is not established.
*
* Once a nexus is established, the NCR5380_information_transfer()
* phase goes through the various phases as instructed by the target.
* if the target goes into MSG IN and sends a DISCONNECT message,
* the command structure is placed into the per instance disconnected
* queue, and NCR5380_main tries to find more work. If the target is
* idle for too long, the system will try to sleep.
*
* If a command has disconnected, eventually an interrupt will trigger,
* calling NCR5380_intr() which will in turn call NCR5380_reselect
* to reestablish a nexus. This will run main if necessary.
*
* On command termination, the done function will be called as
* appropriate.
*
* SCSI pointers are maintained in the SCp field of SCSI command
* structures, being initialized after the command is connected
* in NCR5380_select, and set as appropriate in NCR5380_information_transfer.
* Note that in violation of the standard, an implicit SAVE POINTERS operation
* is done, since some BROKEN disks fail to issue an explicit SAVE POINTERS.
*/
/*
* Using this file :
* This file a skeleton Linux SCSI driver for the NCR 5380 series
* of chips. To use it, you write an architecture specific functions
* and macros and include this file in your driver.
*
* These macros control options :
* AUTOPROBE_IRQ - if defined, the NCR5380_probe_irq() function will be
* defined.
*
* AUTOSENSE - if defined, REQUEST SENSE will be performed automatically
* for commands that return with a CHECK CONDITION status.
*
* DIFFERENTIAL - if defined, NCR53c81 chips will use external differential
* transceivers.
*
* DONT_USE_INTR - if defined, never use interrupts, even if we probe or
* override-configure an IRQ.
*
* LIMIT_TRANSFERSIZE - if defined, limit the pseudo-dma transfers to 512
* bytes at a time. Since interrupts are disabled by default during
* these transfers, we might need this to give reasonable interrupt
* service time if the transfer size gets too large.
*
* LINKED - if defined, linked commands are supported.
*
* PSEUDO_DMA - if defined, PSEUDO DMA is used during the data transfer phases.
*
* REAL_DMA - if defined, REAL DMA is used during the data transfer phases.
*
* REAL_DMA_POLL - if defined, REAL DMA is used but the driver doesn't
* rely on phase mismatch and EOP interrupts to determine end
* of phase.
*
* UNSAFE - leave interrupts enabled during pseudo-DMA transfers. You
* only really want to use this if you're having a problem with
* dropped characters during high speed communications, and even
* then, you're going to be better off twiddling with transfersize
* in the high level code.
*
* Defaults for these will be provided although the user may want to adjust
* these to allocate CPU resources to the SCSI driver or "real" code.
*
* USLEEP_SLEEP - amount of time, in jiffies, to sleep
*
* USLEEP_POLL - amount of time, in jiffies, to poll
*
* These macros MUST be defined :
* NCR5380_local_declare() - declare any local variables needed for your
* transfer routines.
*
* NCR5380_setup(instance) - initialize any local variables needed from a given
* instance of the host adapter for NCR5380_{read,write,pread,pwrite}
*
* NCR5380_read(register) - read from the specified register
*
* NCR5380_write(register, value) - write to the specific register
*
* NCR5380_implementation_fields - additional fields needed for this
* specific implementation of the NCR5380
*
* Either real DMA *or* pseudo DMA may be implemented
* REAL functions :
* NCR5380_REAL_DMA should be defined if real DMA is to be used.
* Note that the DMA setup functions should return the number of bytes
* that they were able to program the controller for.
*
* Also note that generic i386/PC versions of these macros are
* available as NCR5380_i386_dma_write_setup,
* NCR5380_i386_dma_read_setup, and NCR5380_i386_dma_residual.
*
* NCR5380_dma_write_setup(instance, src, count) - initialize
* NCR5380_dma_read_setup(instance, dst, count) - initialize
* NCR5380_dma_residual(instance); - residual count
*
* PSEUDO functions :
* NCR5380_pwrite(instance, src, count)
* NCR5380_pread(instance, dst, count);
*
* The generic driver is initialized by calling NCR5380_init(instance),
* after setting the appropriate host specific fields and ID. If the
* driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance,
* possible) function may be used.
*/
static int do_abort(struct Scsi_Host *host);
static void do_reset(struct Scsi_Host *host);
/*
* initialize_SCp - init the scsi pointer field
* @cmd: command block to set up
*
* Set up the internal fields in the SCSI command.
*/
static __inline__ void initialize_SCp(Scsi_Cmnd * cmd)
{
/*
* Initialize the Scsi Pointer field so that all of the commands in the
* various queues are valid.
*/
if (scsi_bufflen(cmd)) {
cmd->SCp.buffer = scsi_sglist(cmd);
cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
cmd->SCp.this_residual = cmd->SCp.buffer->length;
} else {
cmd->SCp.buffer = NULL;
cmd->SCp.buffers_residual = 0;
cmd->SCp.ptr = NULL;
cmd->SCp.this_residual = 0;
}
}
/**
* NCR5380_poll_politely - wait for NCR5380 status bits
* @instance: controller to poll
* @reg: 5380 register to poll
* @bit: Bitmask to check
* @val: Value required to exit
*
* Polls the NCR5380 in a reasonably efficient manner waiting for
* an event to occur, after a short quick poll we begin giving the
* CPU back in non IRQ contexts
*
* Returns the value of the register or a negative error code.
*/
static int NCR5380_poll_politely(struct Scsi_Host *instance, int reg, int bit, int val, int t)
{
NCR5380_local_declare();
int n = 500; /* At about 8uS a cycle for the cpu access */
unsigned long end = jiffies + t;
int r;
NCR5380_setup(instance);
while( n-- > 0)
{
r = NCR5380_read(reg);
if((r & bit) == val)
return 0;
cpu_relax();
}
/* t time yet ? */
while(time_before(jiffies, end))
{
r = NCR5380_read(reg);
if((r & bit) == val)
return 0;
if(!in_interrupt())
cond_resched();
else
cpu_relax();
}
return -ETIMEDOUT;
}
static struct {
unsigned char value;
const char *name;
} phases[] __maybe_unused = {
{PHASE_DATAOUT, "DATAOUT"},
{PHASE_DATAIN, "DATAIN"},
{PHASE_CMDOUT, "CMDOUT"},
{PHASE_STATIN, "STATIN"},
{PHASE_MSGOUT, "MSGOUT"},
{PHASE_MSGIN, "MSGIN"},
{PHASE_UNKNOWN, "UNKNOWN"}
};
#if NDEBUG
static struct {
unsigned char mask;
const char *name;
} signals[] = {
{SR_DBP, "PARITY"},
{SR_RST, "RST"},
{SR_BSY, "BSY"},
{SR_REQ, "REQ"},
{SR_MSG, "MSG"},
{SR_CD, "CD"},
{SR_IO, "IO"},
{SR_SEL, "SEL"},
{0, NULL}
},
basrs[] = {
{BASR_ATN, "ATN"},
{BASR_ACK, "ACK"},
{0, NULL}
},
icrs[] = {
{ICR_ASSERT_RST, "ASSERT RST"},
{ICR_ASSERT_ACK, "ASSERT ACK"},
{ICR_ASSERT_BSY, "ASSERT BSY"},
{ICR_ASSERT_SEL, "ASSERT SEL"},
{ICR_ASSERT_ATN, "ASSERT ATN"},
{ICR_ASSERT_DATA, "ASSERT DATA"},
{0, NULL}
},
mrs[] = {
{MR_BLOCK_DMA_MODE, "MODE BLOCK DMA"},
{MR_TARGET, "MODE TARGET"},
{MR_ENABLE_PAR_CHECK, "MODE PARITY CHECK"},
{MR_ENABLE_PAR_INTR, "MODE PARITY INTR"},
{MR_MONITOR_BSY, "MODE MONITOR BSY"},
{MR_DMA_MODE, "MODE DMA"},
{MR_ARBITRATE, "MODE ARBITRATION"},
{0, NULL}
};
/**
* NCR5380_print - print scsi bus signals
* @instance: adapter state to dump
*
* Print the SCSI bus signals for debugging purposes
*
* Locks: caller holds hostdata lock (not essential)
*/
static void NCR5380_print(struct Scsi_Host *instance)
{
NCR5380_local_declare();
unsigned char status, data, basr, mr, icr, i;
NCR5380_setup(instance);
data = NCR5380_read(CURRENT_SCSI_DATA_REG);
status = NCR5380_read(STATUS_REG);
mr = NCR5380_read(MODE_REG);
icr = NCR5380_read(INITIATOR_COMMAND_REG);
basr = NCR5380_read(BUS_AND_STATUS_REG);
printk("STATUS_REG: %02x ", status);
for (i = 0; signals[i].mask; ++i)
if (status & signals[i].mask)
printk(",%s", signals[i].name);
printk("\nBASR: %02x ", basr);
for (i = 0; basrs[i].mask; ++i)
if (basr & basrs[i].mask)
printk(",%s", basrs[i].name);
printk("\nICR: %02x ", icr);
for (i = 0; icrs[i].mask; ++i)
if (icr & icrs[i].mask)
printk(",%s", icrs[i].name);
printk("\nMODE: %02x ", mr);
for (i = 0; mrs[i].mask; ++i)
if (mr & mrs[i].mask)
printk(",%s", mrs[i].name);
printk("\n");
}
/*
* NCR5380_print_phase - show SCSI phase
* @instance: adapter to dump
*
* Print the current SCSI phase for debugging purposes
*
* Locks: none
*/
static void NCR5380_print_phase(struct Scsi_Host *instance)
{
NCR5380_local_declare();
unsigned char status;
int i;
NCR5380_setup(instance);
status = NCR5380_read(STATUS_REG);
if (!(status & SR_REQ))
printk("scsi%d : REQ not asserted, phase unknown.\n", instance->host_no);
else {
for (i = 0; (phases[i].value != PHASE_UNKNOWN) && (phases[i].value != (status & PHASE_MASK)); ++i);
printk("scsi%d : phase %s\n", instance->host_no, phases[i].name);
}
}
#endif
/*
* These need tweaking, and would probably work best as per-device
* flags initialized differently for disk, tape, cd, etc devices.
* People with broken devices are free to experiment as to what gives
* the best results for them.
*
* USLEEP_SLEEP should be a minimum seek time.
*
* USLEEP_POLL should be a maximum rotational latency.
*/
#ifndef USLEEP_SLEEP
/* 20 ms (reasonable hard disk speed) */
#define USLEEP_SLEEP (20*HZ/1000)
#endif
/* 300 RPM (floppy speed) */
#ifndef USLEEP_POLL
#define USLEEP_POLL (200*HZ/1000)
#endif
#ifndef USLEEP_WAITLONG
/* RvC: (reasonable time to wait on select error) */
#define USLEEP_WAITLONG USLEEP_SLEEP
#endif
/*
* Function : int should_disconnect (unsigned char cmd)
*
* Purpose : decide whether a command would normally disconnect or
* not, since if it won't disconnect we should go to sleep.
*
* Input : cmd - opcode of SCSI command
*
* Returns : DISCONNECT_LONG if we should disconnect for a really long
* time (ie always, sleep, look for REQ active, sleep),
* DISCONNECT_TIME_TO_DATA if we would only disconnect for a normal
* time-to-data delay, DISCONNECT_NONE if this command would return
* immediately.
*
* Future sleep algorithms based on time to data can exploit
* something like this so they can differentiate between "normal"
* (ie, read, write, seek) and unusual commands (ie, * format).
*
* Note : We don't deal with commands that handle an immediate disconnect,
*
*/
static int should_disconnect(unsigned char cmd)
{
switch (cmd) {
case READ_6:
case WRITE_6:
case SEEK_6:
case READ_10:
case WRITE_10:
case SEEK_10:
return DISCONNECT_TIME_TO_DATA;
case FORMAT_UNIT:
case SEARCH_HIGH:
case SEARCH_LOW:
case SEARCH_EQUAL:
return DISCONNECT_LONG;
default:
return DISCONNECT_NONE;
}
}
static void NCR5380_set_timer(struct NCR5380_hostdata *hostdata, unsigned long timeout)
{
hostdata->time_expires = jiffies + timeout;
schedule_delayed_work(&hostdata->coroutine, timeout);
}
static int probe_irq __initdata = 0;
/**
* probe_intr - helper for IRQ autoprobe
* @irq: interrupt number
* @dev_id: unused
* @regs: unused
*
* Set a flag to indicate the IRQ in question was received. This is
* used by the IRQ probe code.
*/
static irqreturn_t __init probe_intr(int irq, void *dev_id)
{
probe_irq = irq;
return IRQ_HANDLED;
}
/**
* NCR5380_probe_irq - find the IRQ of an NCR5380
* @instance: NCR5380 controller
* @possible: bitmask of ISA IRQ lines
*
* Autoprobe for the IRQ line used by the NCR5380 by triggering an IRQ
* and then looking to see what interrupt actually turned up.
*
* Locks: none, irqs must be enabled on entry
*/
static int __init __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance,
int possible)
{
NCR5380_local_declare();
struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
unsigned long timeout;
int trying_irqs, i, mask;
NCR5380_setup(instance);
for (trying_irqs = i = 0, mask = 1; i < 16; ++i, mask <<= 1)
if ((mask & possible) && (request_irq(i, &probe_intr, IRQF_DISABLED, "NCR-probe", NULL) == 0))
trying_irqs |= mask;
timeout = jiffies + (250 * HZ / 1000);
probe_irq = SCSI_IRQ_NONE;
/*
* A interrupt is triggered whenever BSY = false, SEL = true
* and a bit set in the SELECT_ENABLE_REG is asserted on the
* SCSI bus.
*
* Note that the bus is only driven when the phase control signals
* (I/O, C/D, and MSG) match those in the TCR, so we must reset that
* to zero.
*/
NCR5380_write(TARGET_COMMAND_REG, 0);
NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_SEL);
while (probe_irq == SCSI_IRQ_NONE && time_before(jiffies, timeout))
schedule_timeout_uninterruptible(1);
NCR5380_write(SELECT_ENABLE_REG, 0);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
for (i = 0, mask = 1; i < 16; ++i, mask <<= 1)
if (trying_irqs & mask)
free_irq(i, NULL);
return probe_irq;
}
/**
* NCR58380_print_options - show options
* @instance: unused for now
*
* Called by probe code indicating the NCR5380 driver options that
* were selected. At some point this will switch to runtime options
* read from the adapter in question
*
* Locks: none
*/
static void __init __maybe_unused
NCR5380_print_options(struct Scsi_Host *instance)
{
printk(" generic options"
#ifdef AUTOPROBE_IRQ
" AUTOPROBE_IRQ"
#endif
#ifdef AUTOSENSE
" AUTOSENSE"
#endif
#ifdef DIFFERENTIAL
" DIFFERENTIAL"
#endif
#ifdef REAL_DMA
" REAL DMA"
#endif
#ifdef REAL_DMA_POLL
" REAL DMA POLL"
#endif
#ifdef PARITY
" PARITY"
#endif
#ifdef PSEUDO_DMA
" PSEUDO DMA"
#endif
#ifdef UNSAFE
" UNSAFE "
#endif
);
printk(" USLEEP, USLEEP_POLL=%d USLEEP_SLEEP=%d", USLEEP_POLL, USLEEP_SLEEP);
printk(" generic release=%d", NCR5380_PUBLIC_RELEASE);
if (((struct NCR5380_hostdata *) instance->hostdata)->flags & FLAG_NCR53C400) {
printk(" ncr53c400 release=%d", NCR53C400_PUBLIC_RELEASE);
}
}
/**
* NCR5380_print_status - dump controller info
* @instance: controller to dump
*
* Print commands in the various queues, called from NCR5380_abort
* and NCR5380_debug to aid debugging.
*
* Locks: called functions disable irqs
*/
static void NCR5380_print_status(struct Scsi_Host *instance)
{
NCR5380_dprint(NDEBUG_ANY, instance);
NCR5380_dprint_phase(NDEBUG_ANY, instance);
}
/******************************************/
/*
* /proc/scsi/[dtc pas16 t128 generic]/[0-ASC_NUM_BOARD_SUPPORTED]
*
* *buffer: I/O buffer
* **start: if inout == FALSE pointer into buffer where user read should start
* offset: current offset
* length: length of buffer
* hostno: Scsi_Host host_no
* inout: TRUE - user is writing; FALSE - user is reading
*
* Return the number of bytes read from or written
*/
#undef SPRINTF
#define SPRINTF(args...) do { if(pos < buffer + length-80) pos += sprintf(pos, ## args); } while(0)
static
char *lprint_Scsi_Cmnd(Scsi_Cmnd * cmd, char *pos, char *buffer, int length);
static
char *lprint_command(unsigned char *cmd, char *pos, char *buffer, int len);
static
char *lprint_opcode(int opcode, char *pos, char *buffer, int length);
static int __maybe_unused NCR5380_proc_info(struct Scsi_Host *instance,
char *buffer, char **start, off_t offset, int length, int inout)
{
char *pos = buffer;
struct NCR5380_hostdata *hostdata;
Scsi_Cmnd *ptr;
hostdata = (struct NCR5380_hostdata *) instance->hostdata;
if (inout) { /* Has data been written to the file ? */
#ifdef DTC_PUBLIC_RELEASE
dtc_wmaxi = dtc_maxi = 0;
#endif
#ifdef PAS16_PUBLIC_RELEASE
pas_wmaxi = pas_maxi = 0;
#endif
return (-ENOSYS); /* Currently this is a no-op */
}
SPRINTF("NCR5380 core release=%d. ", NCR5380_PUBLIC_RELEASE);
if (((struct NCR5380_hostdata *) instance->hostdata)->flags & FLAG_NCR53C400)
SPRINTF("ncr53c400 release=%d. ", NCR53C400_PUBLIC_RELEASE);
#ifdef DTC_PUBLIC_RELEASE
SPRINTF("DTC 3180/3280 release %d", DTC_PUBLIC_RELEASE);
#endif
#ifdef T128_PUBLIC_RELEASE
SPRINTF("T128 release %d", T128_PUBLIC_RELEASE);
#endif
#ifdef GENERIC_NCR5380_PUBLIC_RELEASE
SPRINTF("Generic5380 release %d", GENERIC_NCR5380_PUBLIC_RELEASE);
#endif
#ifdef PAS16_PUBLIC_RELEASE
SPRINTF("PAS16 release=%d", PAS16_PUBLIC_RELEASE);
#endif
SPRINTF("\nBase Addr: 0x%05lX ", (long) instance->base);
SPRINTF("io_port: %04x ", (int) instance->io_port);
if (instance->irq == SCSI_IRQ_NONE)
SPRINTF("IRQ: None.\n");
else
SPRINTF("IRQ: %d.\n", instance->irq);
#ifdef DTC_PUBLIC_RELEASE
SPRINTF("Highwater I/O busy_spin_counts -- write: %d read: %d\n", dtc_wmaxi, dtc_maxi);
#endif
#ifdef PAS16_PUBLIC_RELEASE
SPRINTF("Highwater I/O busy_spin_counts -- write: %d read: %d\n", pas_wmaxi, pas_maxi);
#endif
spin_lock_irq(instance->host_lock);
if (!hostdata->connected)
SPRINTF("scsi%d: no currently connected command\n", instance->host_no);
else
pos = lprint_Scsi_Cmnd((Scsi_Cmnd *) hostdata->connected, pos, buffer, length);
SPRINTF("scsi%d: issue_queue\n", instance->host_no);
for (ptr = (Scsi_Cmnd *) hostdata->issue_queue; ptr; ptr = (Scsi_Cmnd *) ptr->host_scribble)
pos = lprint_Scsi_Cmnd(ptr, pos, buffer, length);
SPRINTF("scsi%d: disconnected_queue\n", instance->host_no);
for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr; ptr = (Scsi_Cmnd *) ptr->host_scribble)
pos = lprint_Scsi_Cmnd(ptr, pos, buffer, length);
spin_unlock_irq(instance->host_lock);
*start = buffer;
if (pos - buffer < offset)
return 0;
else if (pos - buffer - offset < length)
return pos - buffer - offset;
return length;
}
static char *lprint_Scsi_Cmnd(Scsi_Cmnd * cmd, char *pos, char *buffer, int length)
{
SPRINTF("scsi%d : destination target %d, lun %d\n", cmd->device->host->host_no, cmd->device->id, cmd->device->lun);
SPRINTF(" command = ");
pos = lprint_command(cmd->cmnd, pos, buffer, length);
return (pos);
}
static char *lprint_command(unsigned char *command, char *pos, char *buffer, int length)
{
int i, s;
pos = lprint_opcode(command[0], pos, buffer, length);
for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i)
SPRINTF("%02x ", command[i]);
SPRINTF("\n");
return (pos);
}
static char *lprint_opcode(int opcode, char *pos, char *buffer, int length)
{
SPRINTF("%2d (0x%02x)", opcode, opcode);
return (pos);
}
/**
* NCR5380_init - initialise an NCR5380
* @instance: adapter to configure
* @flags: control flags
*
* Initializes *instance and corresponding 5380 chip,
* with flags OR'd into the initial flags value.
*
* Notes : I assume that the host, hostno, and id bits have been
* set correctly. I don't care about the irq and other fields.
*
* Returns 0 for success
*
* Locks: interrupts must be enabled when we are called
*/
static int __devinit NCR5380_init(struct Scsi_Host *instance, int flags)
{
NCR5380_local_declare();
int i, pass;
unsigned long timeout;
struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
if(in_interrupt())
printk(KERN_ERR "NCR5380_init called with interrupts off!\n");
/*
* On NCR53C400 boards, NCR5380 registers are mapped 8 past
* the base address.
*/
#ifdef NCR53C400
if (flags & FLAG_NCR53C400)
instance->NCR5380_instance_name += NCR53C400_address_adjust;
#endif
NCR5380_setup(instance);
hostdata->aborted = 0;
hostdata->id_mask = 1 << instance->this_id;
for (i = hostdata->id_mask; i <= 0x80; i <<= 1)
if (i > hostdata->id_mask)
hostdata->id_higher_mask |= i;
for (i = 0; i < 8; ++i)
hostdata->busy[i] = 0;
#ifdef REAL_DMA
hostdata->dmalen = 0;
#endif
hostdata->targets_present = 0;
hostdata->connected = NULL;
hostdata->issue_queue = NULL;
hostdata->disconnected_queue = NULL;
INIT_DELAYED_WORK(&hostdata->coroutine, NCR5380_main);
#ifdef NCR5380_STATS
for (i = 0; i < 8; ++i) {
hostdata->time_read[i] = 0;
hostdata->time_write[i] = 0;
hostdata->bytes_read[i] = 0;
hostdata->bytes_write[i] = 0;
}
hostdata->timebase = 0;
hostdata->pendingw = 0;
hostdata->pendingr = 0;
#endif
/* The CHECK code seems to break the 53C400. Will check it later maybe */
if (flags & FLAG_NCR53C400)
hostdata->flags = FLAG_HAS_LAST_BYTE_SENT | flags;
else
hostdata->flags = FLAG_CHECK_LAST_BYTE_SENT | flags;
hostdata->host = instance;
hostdata->time_expires = 0;
#ifndef AUTOSENSE
if ((instance->cmd_per_lun > 1) || instance->can_queue > 1)
printk(KERN_WARNING "scsi%d : WARNING : support for multiple outstanding commands enabled\n" " without AUTOSENSE option, contingent allegiance conditions may\n"
" be incorrectly cleared.\n", instance->host_no);
#endif /* def AUTOSENSE */
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
NCR5380_write(MODE_REG, MR_BASE);
NCR5380_write(TARGET_COMMAND_REG, 0);
NCR5380_write(SELECT_ENABLE_REG, 0);
#ifdef NCR53C400
if (hostdata->flags & FLAG_NCR53C400) {
NCR5380_write(C400_CONTROL_STATUS_REG, CSR_BASE);
}
#endif
/*
* Detect and correct bus wedge problems.
*
* If the system crashed, it may have crashed in a state
* where a SCSI command was still executing, and the
* SCSI bus is not in a BUS FREE STATE.
*
* If this is the case, we'll try to abort the currently
* established nexus which we know nothing about, and that
* failing, do a hard reset of the SCSI bus
*/
for (pass = 1; (NCR5380_read(STATUS_REG) & SR_BSY) && pass <= 6; ++pass) {
switch (pass) {
case 1:
case 3:
case 5:
printk(KERN_INFO "scsi%d: SCSI bus busy, waiting up to five seconds\n", instance->host_no);
timeout = jiffies + 5 * HZ;
NCR5380_poll_politely(instance, STATUS_REG, SR_BSY, 0, 5*HZ);
break;
case 2:
printk(KERN_WARNING "scsi%d: bus busy, attempting abort\n", instance->host_no);
do_abort(instance);
break;
case 4:
printk(KERN_WARNING "scsi%d: bus busy, attempting reset\n", instance->host_no);
do_reset(instance);
break;
case 6:
printk(KERN_ERR "scsi%d: bus locked solid or invalid override\n", instance->host_no);
return -ENXIO;
}
}
return 0;
}
/**
* NCR5380_exit - remove an NCR5380
* @instance: adapter to remove
*/
static void NCR5380_exit(struct Scsi_Host *instance)
{
struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
cancel_delayed_work_sync(&hostdata->coroutine);
}
/**
* NCR5380_queue_command - queue a command
* @cmd: SCSI command
* @done: completion handler
*
* cmd is added to the per instance issue_queue, with minor
* twiddling done to the host specific fields of cmd. If the
* main coroutine is not running, it is restarted.
*
* Locks: host lock taken by caller
*/
static int NCR5380_queue_command_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
{
struct Scsi_Host *instance = cmd->device->host;
struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
Scsi_Cmnd *tmp;
#if (NDEBUG & NDEBUG_NO_WRITE)
switch (cmd->cmnd[0]) {
case WRITE_6:
case WRITE_10:
printk("scsi%d : WRITE attempted with NO_WRITE debugging flag set\n", instance->host_no);
cmd->result = (DID_ERROR << 16);
done(cmd);
return 0;
}
#endif /* (NDEBUG & NDEBUG_NO_WRITE) */
#ifdef NCR5380_STATS
switch (cmd->cmnd[0]) {
case WRITE:
case WRITE_6:
case WRITE_10:
hostdata->time_write[cmd->device->id] -= (jiffies - hostdata->timebase);
hostdata->bytes_write[cmd->device->id] += scsi_bufflen(cmd);
hostdata->pendingw++;
break;
case READ:
case READ_6:
case READ_10:
hostdata->time_read[cmd->device->id] -= (jiffies - hostdata->timebase);
hostdata->bytes_read[cmd->device->id] += scsi_bufflen(cmd);
hostdata->pendingr++;
break;
}
#endif
/*
* We use the host_scribble field as a pointer to the next command
* in a queue
*/
cmd->host_scribble = NULL;
cmd->scsi_done = done;
cmd->result = 0;
/*
* Insert the cmd into the issue queue. Note that REQUEST SENSE
* commands are added to the head of the queue since any command will
* clear the contingent allegiance condition that exists and the
* sense data is only guaranteed to be valid while the condition exists.
*/
if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) {
LIST(cmd, hostdata->issue_queue);
cmd->host_scribble = (unsigned char *) hostdata->issue_queue;
hostdata->issue_queue = cmd;
} else {
for (tmp = (Scsi_Cmnd *) hostdata->issue_queue; tmp->host_scribble; tmp = (Scsi_Cmnd *) tmp->host_scribble);
LIST(cmd, tmp);
tmp->host_scribble = (unsigned char *) cmd;
}
dprintk(NDEBUG_QUEUES, ("scsi%d : command added to %s of queue\n", instance->host_no, (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail"));
/* Run the coroutine if it isn't already running. */
/* Kick off command processing */
schedule_delayed_work(&hostdata->coroutine, 0);
return 0;
}
static DEF_SCSI_QCMD(NCR5380_queue_command)
/**
* NCR5380_main - NCR state machines
*
* NCR5380_main is a coroutine that runs as long as more work can
* be done on the NCR5380 host adapters in a system. Both
* NCR5380_queue_command() and NCR5380_intr() will try to start it
* in case it is not running.
*
* Locks: called as its own thread with no locks held. Takes the
* host lock and called routines may take the isa dma lock.
*/
static void NCR5380_main(struct work_struct *work)
{
struct NCR5380_hostdata *hostdata =
container_of(work, struct NCR5380_hostdata, coroutine.work);
struct Scsi_Host *instance = hostdata->host;
Scsi_Cmnd *tmp, *prev;
int done;
spin_lock_irq(instance->host_lock);
do {
/* Lock held here */
done = 1;
if (!hostdata->connected && !hostdata->selecting) {
dprintk(NDEBUG_MAIN, ("scsi%d : not connected\n", instance->host_no));
/*
* Search through the issue_queue for a command destined
* for a target that's not busy.
*/
for (tmp = (Scsi_Cmnd *) hostdata->issue_queue, prev = NULL; tmp; prev = tmp, tmp = (Scsi_Cmnd *) tmp->host_scribble)
{
if (prev != tmp)
dprintk(NDEBUG_LISTS, ("MAIN tmp=%p target=%d busy=%d lun=%d\n", tmp, tmp->target, hostdata->busy[tmp->target], tmp->lun));
/* When we find one, remove it from the issue queue. */
if (!(hostdata->busy[tmp->device->id] & (1 << tmp->device->lun))) {
if (prev) {
REMOVE(prev, prev->host_scribble, tmp, tmp->host_scribble);
prev->host_scribble = tmp->host_scribble;
} else {
REMOVE(-1, hostdata->issue_queue, tmp, tmp->host_scribble);
hostdata->issue_queue = (Scsi_Cmnd *) tmp->host_scribble;
}
tmp->host_scribble = NULL;
/*
* Attempt to establish an I_T_L nexus here.
* On success, instance->hostdata->connected is set.
* On failure, we must add the command back to the
* issue queue so we can keep trying.
*/
dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, ("scsi%d : main() : command for target %d lun %d removed from issue_queue\n", instance->host_no, tmp->target, tmp->lun));
/*
* A successful selection is defined as one that
* leaves us with the command connected and
* in hostdata->connected, OR has terminated the
* command.
*
* With successful commands, we fall through
* and see if we can do an information transfer,
* with failures we will restart.
*/
hostdata->selecting = NULL;
/* RvC: have to preset this to indicate a new command is being performed */
if (!NCR5380_select(instance, tmp,
/*
* REQUEST SENSE commands are issued without tagged
* queueing, even on SCSI-II devices because the
* contingent allegiance condition exists for the
* entire unit.
*/
(tmp->cmnd[0] == REQUEST_SENSE) ? TAG_NONE : TAG_NEXT)) {
break;
} else {
LIST(tmp, hostdata->issue_queue);
tmp->host_scribble = (unsigned char *) hostdata->issue_queue;
hostdata->issue_queue = tmp;
done = 0;
dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, ("scsi%d : main(): select() failed, returned to issue_queue\n", instance->host_no));
}
/* lock held here still */
} /* if target/lun is not busy */
} /* for */
/* exited locked */
} /* if (!hostdata->connected) */
if (hostdata->selecting) {
tmp = (Scsi_Cmnd *) hostdata->selecting;
/* Selection will drop and retake the lock */
if (!NCR5380_select(instance, tmp, (tmp->cmnd[0] == REQUEST_SENSE) ? TAG_NONE : TAG_NEXT)) {
/* Ok ?? */
} else {
/* RvC: device failed, so we wait a long time
this is needed for Mustek scanners, that
do not respond to commands immediately
after a scan */
printk(KERN_DEBUG "scsi%d: device %d did not respond in time\n", instance->host_no, tmp->device->id);
LIST(tmp, hostdata->issue_queue);
tmp->host_scribble = (unsigned char *) hostdata->issue_queue;
hostdata->issue_queue = tmp;
NCR5380_set_timer(hostdata, USLEEP_WAITLONG);
}
} /* if hostdata->selecting */
if (hostdata->connected
#ifdef REAL_DMA
&& !hostdata->dmalen
#endif
&& (!hostdata->time_expires || time_before_eq(hostdata->time_expires, jiffies))
) {
dprintk(NDEBUG_MAIN, ("scsi%d : main() : performing information transfer\n", instance->host_no));
NCR5380_information_transfer(instance);
dprintk(NDEBUG_MAIN, ("scsi%d : main() : done set false\n", instance->host_no));
done = 0;
} else
break;
} while (!done);
spin_unlock_irq(instance->host_lock);
}
#ifndef DONT_USE_INTR
/**
* NCR5380_intr - generic NCR5380 irq handler
* @irq: interrupt number
* @dev_id: device info
*
* Handle interrupts, reestablishing I_T_L or I_T_L_Q nexuses
* from the disconnected queue, and restarting NCR5380_main()
* as required.
*
* Locks: takes the needed instance locks
*/
static irqreturn_t NCR5380_intr(int dummy, void *dev_id)
{
NCR5380_local_declare();
struct Scsi_Host *instance = dev_id;
struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
int done;
unsigned char basr;
unsigned long flags;
dprintk(NDEBUG_INTR, ("scsi : NCR5380 irq %d triggered\n",
instance->irq));
do {
done = 1;
spin_lock_irqsave(instance->host_lock, flags);
/* Look for pending interrupts */
NCR5380_setup(instance);
basr = NCR5380_read(BUS_AND_STATUS_REG);
/* XXX dispatch to appropriate routine if found and done=0 */
if (basr & BASR_IRQ) {
NCR5380_dprint(NDEBUG_INTR, instance);
if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) {
done = 0;
dprintk(NDEBUG_INTR, ("scsi%d : SEL interrupt\n", instance->host_no));
NCR5380_reselect(instance);
(void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
} else if (basr & BASR_PARITY_ERROR) {
dprintk(NDEBUG_INTR, ("scsi%d : PARITY interrupt\n", instance->host_no));
(void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
} else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) {
dprintk(NDEBUG_INTR, ("scsi%d : RESET interrupt\n", instance->host_no));
(void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
} else {
#if defined(REAL_DMA)
/*
* We should only get PHASE MISMATCH and EOP interrupts
* if we have DMA enabled, so do a sanity check based on
* the current setting of the MODE register.
*/
if ((NCR5380_read(MODE_REG) & MR_DMA) && ((basr & BASR_END_DMA_TRANSFER) || !(basr & BASR_PHASE_MATCH))) {
int transferred;
if (!hostdata->connected)
panic("scsi%d : received end of DMA interrupt with no connected cmd\n", instance->hostno);
transferred = (hostdata->dmalen - NCR5380_dma_residual(instance));
hostdata->connected->SCp.this_residual -= transferred;
hostdata->connected->SCp.ptr += transferred;
hostdata->dmalen = 0;
(void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
/* FIXME: we need to poll briefly then defer a workqueue task ! */
NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, BASR_ACK, 0, 2*HZ);
NCR5380_write(MODE_REG, MR_BASE);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
}
#else
dprintk(NDEBUG_INTR, ("scsi : unknown interrupt, BASR 0x%X, MR 0x%X, SR 0x%x\n", basr, NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG)));
(void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
#endif
}
} /* if BASR_IRQ */
spin_unlock_irqrestore(instance->host_lock, flags);
if(!done)
schedule_delayed_work(&hostdata->coroutine, 0);
} while (!done);
return IRQ_HANDLED;
}
#endif
/**
* collect_stats - collect stats on a scsi command
* @hostdata: adapter
* @cmd: command being issued
*
* Update the statistical data by parsing the command in question
*/
static void collect_stats(struct NCR5380_hostdata *hostdata, Scsi_Cmnd * cmd)
{
#ifdef NCR5380_STATS
switch (cmd->cmnd[0]) {
case WRITE:
case WRITE_6:
case WRITE_10:
hostdata->time_write[scmd_id(cmd)] += (jiffies - hostdata->timebase);
hostdata->pendingw--;
break;
case READ:
case READ_6:
case READ_10:
hostdata->time_read[scmd_id(cmd)] += (jiffies - hostdata->timebase);
hostdata->pendingr--;
break;
}
#endif
}
/*
* Function : int NCR5380_select (struct Scsi_Host *instance, Scsi_Cmnd *cmd,
* int tag);
*
* Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command,
* including ARBITRATION, SELECTION, and initial message out for
* IDENTIFY and queue messages.
*
* Inputs : instance - instantiation of the 5380 driver on which this
* target lives, cmd - SCSI command to execute, tag - set to TAG_NEXT for
* new tag, TAG_NONE for untagged queueing, otherwise set to the tag for
* the command that is presently connected.
*
* Returns : -1 if selection could not execute for some reason,
* 0 if selection succeeded or failed because the target
* did not respond.
*
* Side effects :
* If bus busy, arbitration failed, etc, NCR5380_select() will exit
* with registers as they should have been on entry - ie
* SELECT_ENABLE will be set appropriately, the NCR5380
* will cease to drive any SCSI bus signals.
*
* If successful : I_T_L or I_T_L_Q nexus will be established,
* instance->connected will be set to cmd.
* SELECT interrupt will be disabled.
*
* If failed (no target) : cmd->scsi_done() will be called, and the
* cmd->result host byte set to DID_BAD_TARGET.
*
* Locks: caller holds hostdata lock in IRQ mode
*/
static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag)
{
NCR5380_local_declare();
struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
unsigned char tmp[3], phase;
unsigned char *data;
int len;
unsigned long timeout;
unsigned char value;
int err;
NCR5380_setup(instance);
if (hostdata->selecting)
goto part2;
hostdata->restart_select = 0;
NCR5380_dprint(NDEBUG_ARBITRATION, instance);
dprintk(NDEBUG_ARBITRATION, ("scsi%d : starting arbitration, id = %d\n", instance->host_no, instance->this_id));
/*
* Set the phase bits to 0, otherwise the NCR5380 won't drive the
* data bus during SELECTION.
*/
NCR5380_write(TARGET_COMMAND_REG, 0);
/*
* Start arbitration.
*/
NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask);
NCR5380_write(MODE_REG, MR_ARBITRATE);
/* We can be relaxed here, interrupts are on, we are
in workqueue context, the birds are singing in the trees */
spin_unlock_irq(instance->host_lock);
err = NCR5380_poll_politely(instance, INITIATOR_COMMAND_REG, ICR_ARBITRATION_PROGRESS, ICR_ARBITRATION_PROGRESS, 5*HZ);
spin_lock_irq(instance->host_lock);
if (err < 0) {
printk(KERN_DEBUG "scsi: arbitration timeout at %d\n", __LINE__);
NCR5380_write(MODE_REG, MR_BASE);
NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
goto failed;
}
dprintk(NDEBUG_ARBITRATION, ("scsi%d : arbitration complete\n", instance->host_no));
/*
* The arbitration delay is 2.2us, but this is a minimum and there is
* no maximum so we can safely sleep for ceil(2.2) usecs to accommodate
* the integral nature of udelay().
*
*/
udelay(3);
/* Check for lost arbitration */
if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || (NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_higher_mask) || (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST)) {
NCR5380_write(MODE_REG, MR_BASE);
dprintk(NDEBUG_ARBITRATION, ("scsi%d : lost arbitration, deasserting MR_ARBITRATE\n", instance->host_no));
goto failed;
}
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_SEL);
if (!(hostdata->flags & FLAG_DTC3181E) &&
/* RvC: DTC3181E has some trouble with this
* so we simply removed it. Seems to work with
* only Mustek scanner attached
*/
(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST)) {
NCR5380_write(MODE_REG, MR_BASE);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
dprintk(NDEBUG_ARBITRATION, ("scsi%d : lost arbitration, deasserting ICR_ASSERT_SEL\n", instance->host_no));
goto failed;
}
/*
* Again, bus clear + bus settle time is 1.2us, however, this is
* a minimum so we'll udelay ceil(1.2)
*/
udelay(2);
dprintk(NDEBUG_ARBITRATION, ("scsi%d : won arbitration\n", instance->host_no));
/*
* Now that we have won arbitration, start Selection process, asserting
* the host and target ID's on the SCSI bus.
*/
NCR5380_write(OUTPUT_DATA_REG, (hostdata->id_mask | (1 << scmd_id(cmd))));
/*
* Raise ATN while SEL is true before BSY goes false from arbitration,
* since this is the only way to guarantee that we'll get a MESSAGE OUT
* phase immediately after selection.
*/
NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_BSY | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_SEL));
NCR5380_write(MODE_REG, MR_BASE);
/*
* Reselect interrupts must be turned off prior to the dropping of BSY,
* otherwise we will trigger an interrupt.
*/
NCR5380_write(SELECT_ENABLE_REG, 0);
/*
* The initiator shall then wait at least two deskew delays and release
* the BSY signal.
*/
udelay(1); /* wingel -- wait two bus deskew delay >2*45ns */
/* Reset BSY */
NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_SEL));
/*
* Something weird happens when we cease to drive BSY - looks
* like the board/chip is letting us do another read before the
* appropriate propagation delay has expired, and we're confusing
* a BSY signal from ourselves as the target's response to SELECTION.
*
* A small delay (the 'C++' frontend breaks the pipeline with an
* unnecessary jump, making it work on my 386-33/Trantor T128, the
* tighter 'C' code breaks and requires this) solves the problem -
* the 1 us delay is arbitrary, and only used because this delay will
* be the same on other platforms and since it works here, it should
* work there.
*
* wingel suggests that this could be due to failing to wait
* one deskew delay.
*/
udelay(1);
dprintk(NDEBUG_SELECTION, ("scsi%d : selecting target %d\n", instance->host_no, scmd_id(cmd)));
/*
* The SCSI specification calls for a 250 ms timeout for the actual
* selection.
*/
timeout = jiffies + (250 * HZ / 1000);
/*
* XXX very interesting - we're seeing a bounce where the BSY we
* asserted is being reflected / still asserted (propagation delay?)
* and it's detecting as true. Sigh.
*/
hostdata->select_time = 0; /* we count the clock ticks at which we polled */
hostdata->selecting = cmd;
part2:
/* RvC: here we enter after a sleeping period, or immediately after
execution of part 1
we poll only once ech clock tick */
value = NCR5380_read(STATUS_REG) & (SR_BSY | SR_IO);
if (!value && (hostdata->select_time < HZ/4)) {
/* RvC: we still must wait for a device response */
hostdata->select_time++; /* after 25 ticks the device has failed */
NCR5380_set_timer(hostdata, 1);
return 0; /* RvC: we return here with hostdata->selecting set,
to go to sleep */
}
hostdata->selecting = NULL;/* clear this pointer, because we passed the
waiting period */
if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) {
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
NCR5380_reselect(instance);
printk("scsi%d : reselection after won arbitration?\n", instance->host_no);
NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
return -1;
}
/*
* No less than two deskew delays after the initiator detects the
* BSY signal is true, it shall release the SEL signal and may
* change the DATA BUS. -wingel
*/
udelay(1);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
if (!(NCR5380_read(STATUS_REG) & SR_BSY)) {
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
if (hostdata->targets_present & (1 << scmd_id(cmd))) {
printk(KERN_DEBUG "scsi%d : weirdness\n", instance->host_no);
if (hostdata->restart_select)
printk(KERN_DEBUG "\trestart select\n");
NCR5380_dprint(NDEBUG_SELECTION, instance);
NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
return -1;
}
cmd->result = DID_BAD_TARGET << 16;
collect_stats(hostdata, cmd);
cmd->scsi_done(cmd);
NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
dprintk(NDEBUG_SELECTION, ("scsi%d : target did not respond within 250ms\n", instance->host_no));
NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
return 0;
}
hostdata->targets_present |= (1 << scmd_id(cmd));
/*
* Since we followed the SCSI spec, and raised ATN while SEL
* was true but before BSY was false during selection, the information
* transfer phase should be a MESSAGE OUT phase so that we can send the
* IDENTIFY message.
*
* If SCSI-II tagged queuing is enabled, we also send a SIMPLE_QUEUE_TAG
* message (2 bytes) with a tag ID that we increment with every command
* until it wraps back to 0.
*
* XXX - it turns out that there are some broken SCSI-II devices,
* which claim to support tagged queuing but fail when more than
* some number of commands are issued at once.
*/
/* Wait for start of REQ/ACK handshake */
spin_unlock_irq(instance->host_lock);
err = NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, HZ);
spin_lock_irq(instance->host_lock);
if(err) {
printk(KERN_ERR "scsi%d: timeout at NCR5380.c:%d\n", instance->host_no, __LINE__);
NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
goto failed;
}
dprintk(NDEBUG_SELECTION, ("scsi%d : target %d selected, going into MESSAGE OUT phase.\n", instance->host_no, cmd->device->id));
tmp[0] = IDENTIFY(((instance->irq == SCSI_IRQ_NONE) ? 0 : 1), cmd->device->lun);
len = 1;
cmd->tag = 0;
/* Send message(s) */
data = tmp;
phase = PHASE_MSGOUT;
NCR5380_transfer_pio(instance, &phase, &len, &data);
dprintk(NDEBUG_SELECTION, ("scsi%d : nexus established.\n", instance->host_no));
/* XXX need to handle errors here */
hostdata->connected = cmd;
hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
initialize_SCp(cmd);
return 0;
/* Selection failed */
failed:
return -1;
}
/*
* Function : int NCR5380_transfer_pio (struct Scsi_Host *instance,
* unsigned char *phase, int *count, unsigned char **data)
*
* Purpose : transfers data in given phase using polled I/O
*
* Inputs : instance - instance of driver, *phase - pointer to
* what phase is expected, *count - pointer to number of
* bytes to transfer, **data - pointer to data pointer.
*
* Returns : -1 when different phase is entered without transferring
* maximum number of bytes, 0 if all bytes or transferred or exit
* is in same phase.
*
* Also, *phase, *count, *data are modified in place.
*
* XXX Note : handling for bus free may be useful.
*/
/*
* Note : this code is not as quick as it could be, however it
* IS 100% reliable, and for the actual data transfer where speed
* counts, we will always do a pseudo DMA or DMA transfer.
*/
static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data) {
NCR5380_local_declare();
unsigned char p = *phase, tmp;
int c = *count;
unsigned char *d = *data;
/*
* RvC: some administrative data to process polling time
*/
int break_allowed = 0;
struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
NCR5380_setup(instance);
if (!(p & SR_IO))
dprintk(NDEBUG_PIO, ("scsi%d : pio write %d bytes\n", instance->host_no, c));
else
dprintk(NDEBUG_PIO, ("scsi%d : pio read %d bytes\n", instance->host_no, c));
/*
* The NCR5380 chip will only drive the SCSI bus when the
* phase specified in the appropriate bits of the TARGET COMMAND
* REGISTER match the STATUS REGISTER
*/
NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p));
/* RvC: don't know if this is necessary, but other SCSI I/O is short
* so breaks are not necessary there
*/
if ((p == PHASE_DATAIN) || (p == PHASE_DATAOUT)) {
break_allowed = 1;
}
do {
/*
* Wait for assertion of REQ, after which the phase bits will be
* valid
*/
/* RvC: we simply poll once, after that we stop temporarily
* and let the device buffer fill up
* if breaking is not allowed, we keep polling as long as needed
*/
/* FIXME */
while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ) && !break_allowed);
if (!(tmp & SR_REQ)) {
/* timeout condition */
NCR5380_set_timer(hostdata, USLEEP_SLEEP);
break;
}
dprintk(NDEBUG_HANDSHAKE, ("scsi%d : REQ detected\n", instance->host_no));
/* Check for phase mismatch */
if ((tmp & PHASE_MASK) != p) {
dprintk(NDEBUG_HANDSHAKE, ("scsi%d : phase mismatch\n", instance->host_no));
NCR5380_dprint_phase(NDEBUG_HANDSHAKE, instance);
break;
}
/* Do actual transfer from SCSI bus to / from memory */
if (!(p & SR_IO))
NCR5380_write(OUTPUT_DATA_REG, *d);
else
*d = NCR5380_read(CURRENT_SCSI_DATA_REG);
++d;
/*
* The SCSI standard suggests that in MSGOUT phase, the initiator
* should drop ATN on the last byte of the message phase
* after REQ has been asserted for the handshake but before
* the initiator raises ACK.
*/
if (!(p & SR_IO)) {
if (!((p & SR_MSG) && c > 1)) {
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA);
NCR5380_dprint(NDEBUG_PIO, instance);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ACK);
} else {
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ATN);
NCR5380_dprint(NDEBUG_PIO, instance);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK);
}
} else {
NCR5380_dprint(NDEBUG_PIO, instance);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK);
}
/* FIXME - if this fails bus reset ?? */
NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, 0, 5*HZ);
dprintk(NDEBUG_HANDSHAKE, ("scsi%d : req false, handshake complete\n", instance->host_no));
/*
* We have several special cases to consider during REQ/ACK handshaking :
* 1. We were in MSGOUT phase, and we are on the last byte of the
* message. ATN must be dropped as ACK is dropped.
*
* 2. We are in a MSGIN phase, and we are on the last byte of the
* message. We must exit with ACK asserted, so that the calling
* code may raise ATN before dropping ACK to reject the message.
*
* 3. ACK and ATN are clear and the target may proceed as normal.
*/
if (!(p == PHASE_MSGIN && c == 1)) {
if (p == PHASE_MSGOUT && c > 1)
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
else
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
}
} while (--c);
dprintk(NDEBUG_PIO, ("scsi%d : residual %d\n", instance->host_no, c));
*count = c;
*data = d;
tmp = NCR5380_read(STATUS_REG);
if (tmp & SR_REQ)
*phase = tmp & PHASE_MASK;
else
*phase = PHASE_UNKNOWN;
if (!c || (*phase == p))
return 0;
else
return -1;
}
/**
* do_reset - issue a reset command
* @host: adapter to reset
*
* Issue a reset sequence to the NCR5380 and try and get the bus
* back into sane shape.
*
* Locks: caller holds queue lock
*/
static void do_reset(struct Scsi_Host *host) {
NCR5380_local_declare();
NCR5380_setup(host);
NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(NCR5380_read(STATUS_REG) & PHASE_MASK));
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST);
udelay(25);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
}
/*
* Function : do_abort (Scsi_Host *host)
*
* Purpose : abort the currently established nexus. Should only be
* called from a routine which can drop into a
*
* Returns : 0 on success, -1 on failure.
*
* Locks: queue lock held by caller
* FIXME: sort this out and get new_eh running
*/
static int do_abort(struct Scsi_Host *host) {
NCR5380_local_declare();
unsigned char *msgptr, phase, tmp;
int len;
int rc;
NCR5380_setup(host);
/* Request message out phase */
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
/*
* Wait for the target to indicate a valid phase by asserting
* REQ. Once this happens, we'll have either a MSGOUT phase
* and can immediately send the ABORT message, or we'll have some
* other phase and will have to source/sink data.
*
* We really don't care what value was on the bus or what value
* the target sees, so we just handshake.
*/
rc = NCR5380_poll_politely(host, STATUS_REG, SR_REQ, SR_REQ, 60 * HZ);
if(rc < 0)
return -1;
tmp = (unsigned char)rc;
NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp));
if ((tmp & PHASE_MASK) != PHASE_MSGOUT) {
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK);
rc = NCR5380_poll_politely(host, STATUS_REG, SR_REQ, 0, 3*HZ);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
if(rc == -1)
return -1;
}
tmp = ABORT;
msgptr = &tmp;
len = 1;
phase = PHASE_MSGOUT;
NCR5380_transfer_pio(host, &phase, &len, &msgptr);
/*
* If we got here, and the command completed successfully,
* we're about to go into bus free state.
*/
return len ? -1 : 0;
}
#if defined(REAL_DMA) || defined(PSEUDO_DMA) || defined (REAL_DMA_POLL)
/*
* Function : int NCR5380_transfer_dma (struct Scsi_Host *instance,
* unsigned char *phase, int *count, unsigned char **data)
*
* Purpose : transfers data in given phase using either real
* or pseudo DMA.
*
* Inputs : instance - instance of driver, *phase - pointer to
* what phase is expected, *count - pointer to number of
* bytes to transfer, **data - pointer to data pointer.
*
* Returns : -1 when different phase is entered without transferring
* maximum number of bytes, 0 if all bytes or transferred or exit
* is in same phase.
*
* Also, *phase, *count, *data are modified in place.
*
* Locks: io_request lock held by caller
*/
static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data) {
NCR5380_local_declare();
register int c = *count;
register unsigned char p = *phase;
register unsigned char *d = *data;
unsigned char tmp;
int foo;
#if defined(REAL_DMA_POLL)
int cnt, toPIO;
unsigned char saved_data = 0, overrun = 0, residue;
#endif
struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
NCR5380_setup(instance);
if ((tmp = (NCR5380_read(STATUS_REG) & PHASE_MASK)) != p) {
*phase = tmp;
return -1;
}
#if defined(REAL_DMA) || defined(REAL_DMA_POLL)
#ifdef READ_OVERRUNS
if (p & SR_IO) {
c -= 2;
}
#endif
dprintk(NDEBUG_DMA, ("scsi%d : initializing DMA channel %d for %s, %d bytes %s %0x\n", instance->host_no, instance->dma_channel, (p & SR_IO) ? "reading" : "writing", c, (p & SR_IO) ? "to" : "from", (unsigned) d));
hostdata->dma_len = (p & SR_IO) ? NCR5380_dma_read_setup(instance, d, c) : NCR5380_dma_write_setup(instance, d, c);
#endif
NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p));
#ifdef REAL_DMA
NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_ENABLE_EOP_INTR | MR_MONITOR_BSY);
#elif defined(REAL_DMA_POLL)
NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE);
#else
/*
* Note : on my sample board, watch-dog timeouts occurred when interrupts
* were not disabled for the duration of a single DMA transfer, from
* before the setting of DMA mode to after transfer of the last byte.
*/
#if defined(PSEUDO_DMA) && defined(UNSAFE)
spin_unlock_irq(instance->host_lock);
#endif
/* KLL May need eop and parity in 53c400 */
if (hostdata->flags & FLAG_NCR53C400)
NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE |
MR_ENABLE_PAR_CHECK | MR_ENABLE_PAR_INTR |
MR_ENABLE_EOP_INTR | MR_MONITOR_BSY);
else
NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE);
#endif /* def REAL_DMA */
dprintk(NDEBUG_DMA, ("scsi%d : mode reg = 0x%X\n", instance->host_no, NCR5380_read(MODE_REG)));
/*
* On the PAS16 at least I/O recovery delays are not needed here.
* Everyone else seems to want them.
*/
if (p & SR_IO) {
io_recovery_delay(1);
NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0);
} else {
io_recovery_delay(1);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA);
io_recovery_delay(1);
NCR5380_write(START_DMA_SEND_REG, 0);
io_recovery_delay(1);
}
#if defined(REAL_DMA_POLL)
do {
tmp = NCR5380_read(BUS_AND_STATUS_REG);
} while ((tmp & BASR_PHASE_MATCH) && !(tmp & (BASR_BUSY_ERROR | BASR_END_DMA_TRANSFER)));
/*
At this point, either we've completed DMA, or we have a phase mismatch,
or we've unexpectedly lost BUSY (which is a real error).
For write DMAs, we want to wait until the last byte has been
transferred out over the bus before we turn off DMA mode. Alas, there
seems to be no terribly good way of doing this on a 5380 under all
conditions. For non-scatter-gather operations, we can wait until REQ
and ACK both go false, or until a phase mismatch occurs. Gather-writes
are nastier, since the device will be expecting more data than we
are prepared to send it, and REQ will remain asserted. On a 53C8[01] we
could test LAST BIT SENT to assure transfer (I imagine this is precisely
why this signal was added to the newer chips) but on the older 538[01]
this signal does not exist. The workaround for this lack is a watchdog;
we bail out of the wait-loop after a modest amount of wait-time if
the usual exit conditions are not met. Not a terribly clean or
correct solution :-%
Reads are equally tricky due to a nasty characteristic of the NCR5380.
If the chip is in DMA mode for an READ, it will respond to a target's
REQ by latching the SCSI data into the INPUT DATA register and asserting
ACK, even if it has _already_ been notified by the DMA controller that
the current DMA transfer has completed! If the NCR5380 is then taken
out of DMA mode, this already-acknowledged byte is lost.
This is not a problem for "one DMA transfer per command" reads, because
the situation will never arise... either all of the data is DMA'ed
properly, or the target switches to MESSAGE IN phase to signal a
disconnection (either operation bringing the DMA to a clean halt).
However, in order to handle scatter-reads, we must work around the
problem. The chosen fix is to DMA N-2 bytes, then check for the
condition before taking the NCR5380 out of DMA mode. One or two extra
bytes are transferred via PIO as necessary to fill out the original
request.
*/
if (p & SR_IO) {
#ifdef READ_OVERRUNS
udelay(10);
if (((NCR5380_read(BUS_AND_STATUS_REG) & (BASR_PHASE_MATCH | BASR_ACK)) == (BASR_PHASE_MATCH | BASR_ACK))) {
saved_data = NCR5380_read(INPUT_DATA_REGISTER);
overrun = 1;
}
#endif
} else {
int limit = 100;
while (((tmp = NCR5380_read(BUS_AND_STATUS_REG)) & BASR_ACK) || (NCR5380_read(STATUS_REG) & SR_REQ)) {
if (!(tmp & BASR_PHASE_MATCH))
break;
if (--limit < 0)
break;
}
}
dprintk(NDEBUG_DMA, ("scsi%d : polled DMA transfer complete, basr 0x%X, sr 0x%X\n", instance->host_no, tmp, NCR5380_read(STATUS_REG)));
NCR5380_write(MODE_REG, MR_BASE);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
residue = NCR5380_dma_residual(instance);
c -= residue;
*count -= c;
*data += c;
*phase = NCR5380_read(STATUS_REG) & PHASE_MASK;
#ifdef READ_OVERRUNS
if (*phase == p && (p & SR_IO) && residue == 0) {
if (overrun) {
dprintk(NDEBUG_DMA, ("Got an input overrun, using saved byte\n"));
**data = saved_data;
*data += 1;
*count -= 1;
cnt = toPIO = 1;
} else {
printk("No overrun??\n");
cnt = toPIO = 2;
}
dprintk(NDEBUG_DMA, ("Doing %d-byte PIO to 0x%X\n", cnt, *data));
NCR5380_transfer_pio(instance, phase, &cnt, data);
*count -= toPIO - cnt;
}
#endif
dprintk(NDEBUG_DMA, ("Return with data ptr = 0x%X, count %d, last 0x%X, next 0x%X\n", *data, *count, *(*data + *count - 1), *(*data + *count)));
return 0;
#elif defined(REAL_DMA)
return 0;
#else /* defined(REAL_DMA_POLL) */
if (p & SR_IO) {
#ifdef DMA_WORKS_RIGHT
foo = NCR5380_pread(instance, d, c);
#else
int diff = 1;
if (hostdata->flags & FLAG_NCR53C400) {
diff = 0;
}
if (!(foo = NCR5380_pread(instance, d, c - diff))) {
/*
* We can't disable DMA mode after successfully transferring
* what we plan to be the last byte, since that would open up
* a race condition where if the target asserted REQ before
* we got the DMA mode reset, the NCR5380 would have latched
* an additional byte into the INPUT DATA register and we'd
* have dropped it.
*
* The workaround was to transfer one fewer bytes than we
* intended to with the pseudo-DMA read function, wait for
* the chip to latch the last byte, read it, and then disable
* pseudo-DMA mode.
*
* After REQ is asserted, the NCR5380 asserts DRQ and ACK.
* REQ is deasserted when ACK is asserted, and not reasserted
* until ACK goes false. Since the NCR5380 won't lower ACK
* until DACK is asserted, which won't happen unless we twiddle
* the DMA port or we take the NCR5380 out of DMA mode, we
* can guarantee that we won't handshake another extra
* byte.
*/
if (!(hostdata->flags & FLAG_NCR53C400)) {
while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ));
/* Wait for clean handshake */
while (NCR5380_read(STATUS_REG) & SR_REQ);
d[c - 1] = NCR5380_read(INPUT_DATA_REG);
}
}
#endif
} else {
#ifdef DMA_WORKS_RIGHT
foo = NCR5380_pwrite(instance, d, c);
#else
int timeout;
dprintk(NDEBUG_C400_PWRITE, ("About to pwrite %d bytes\n", c));
if (!(foo = NCR5380_pwrite(instance, d, c))) {
/*
* Wait for the last byte to be sent. If REQ is being asserted for
* the byte we're interested, we'll ACK it and it will go false.
*/
if (!(hostdata->flags & FLAG_HAS_LAST_BYTE_SENT)) {
timeout = 20000;
while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) && (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH));
if (!timeout)
dprintk(NDEBUG_LAST_BYTE_SENT, ("scsi%d : timed out on last byte\n", instance->host_no));
if (hostdata->flags & FLAG_CHECK_LAST_BYTE_SENT) {
hostdata->flags &= ~FLAG_CHECK_LAST_BYTE_SENT;
if (NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT) {
hostdata->flags |= FLAG_HAS_LAST_BYTE_SENT;
dprintk(NDEBUG_LAST_WRITE_SENT, ("scsi%d : last bit sent works\n", instance->host_no));
}
}
} else {
dprintk(NDEBUG_C400_PWRITE, ("Waiting for LASTBYTE\n"));
while (!(NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT));
dprintk(NDEBUG_C400_PWRITE, ("Got LASTBYTE\n"));
}
}
#endif
}
NCR5380_write(MODE_REG, MR_BASE);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
if ((!(p & SR_IO)) && (hostdata->flags & FLAG_NCR53C400)) {
dprintk(NDEBUG_C400_PWRITE, ("53C400w: Checking for IRQ\n"));
if (NCR5380_read(BUS_AND_STATUS_REG) & BASR_IRQ) {
dprintk(NDEBUG_C400_PWRITE, ("53C400w: got it, reading reset interrupt reg\n"));
NCR5380_read(RESET_PARITY_INTERRUPT_REG);
} else {
printk("53C400w: IRQ NOT THERE!\n");
}
}
*data = d + c;
*count = 0;
*phase = NCR5380_read(STATUS_REG) & PHASE_MASK;
#if defined(PSEUDO_DMA) && defined(UNSAFE)
spin_lock_irq(instance->host_lock);
#endif /* defined(REAL_DMA_POLL) */
return foo;
#endif /* def REAL_DMA */
}
#endif /* defined(REAL_DMA) | defined(PSEUDO_DMA) */
/*
* Function : NCR5380_information_transfer (struct Scsi_Host *instance)
*
* Purpose : run through the various SCSI phases and do as the target
* directs us to. Operates on the currently connected command,
* instance->connected.
*
* Inputs : instance, instance for which we are doing commands
*
* Side effects : SCSI things happen, the disconnected queue will be
* modified if a command disconnects, *instance->connected will
* change.
*
* XXX Note : we need to watch for bus free or a reset condition here
* to recover from an unexpected bus free condition.
*
* Locks: io_request_lock held by caller in IRQ mode
*/
static void NCR5380_information_transfer(struct Scsi_Host *instance) {
NCR5380_local_declare();
struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *)instance->hostdata;
unsigned char msgout = NOP;
int sink = 0;
int len;
#if defined(PSEUDO_DMA) || defined(REAL_DMA_POLL)
int transfersize;
#endif
unsigned char *data;
unsigned char phase, tmp, extended_msg[10], old_phase = 0xff;
Scsi_Cmnd *cmd = (Scsi_Cmnd *) hostdata->connected;
/* RvC: we need to set the end of the polling time */
unsigned long poll_time = jiffies + USLEEP_POLL;
NCR5380_setup(instance);
while (1) {
tmp = NCR5380_read(STATUS_REG);
/* We only have a valid SCSI phase when REQ is asserted */
if (tmp & SR_REQ) {
phase = (tmp & PHASE_MASK);
if (phase != old_phase) {
old_phase = phase;
NCR5380_dprint_phase(NDEBUG_INFORMATION, instance);
}
if (sink && (phase != PHASE_MSGOUT)) {
NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp));
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK);
while (NCR5380_read(STATUS_REG) & SR_REQ);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
sink = 0;
continue;
}
switch (phase) {
case PHASE_DATAIN:
case PHASE_DATAOUT:
#if (NDEBUG & NDEBUG_NO_DATAOUT)
printk("scsi%d : NDEBUG_NO_DATAOUT set, attempted DATAOUT aborted\n", instance->host_no);
sink = 1;
do_abort(instance);
cmd->result = DID_ERROR << 16;
cmd->scsi_done(cmd);
return;
#endif
/*
* If there is no room left in the current buffer in the
* scatter-gather list, move onto the next one.
*/
if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
++cmd->SCp.buffer;
--cmd->SCp.buffers_residual;
cmd->SCp.this_residual = cmd->SCp.buffer->length;
cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
dprintk(NDEBUG_INFORMATION, ("scsi%d : %d bytes and %d buffers left\n", instance->host_no, cmd->SCp.this_residual, cmd->SCp.buffers_residual));
}
/*
* The preferred transfer method is going to be
* PSEUDO-DMA for systems that are strictly PIO,
* since we can let the hardware do the handshaking.
*
* For this to work, we need to know the transfersize
* ahead of time, since the pseudo-DMA code will sit
* in an unconditional loop.
*/
#if defined(PSEUDO_DMA) || defined(REAL_DMA_POLL)
/* KLL
* PSEUDO_DMA is defined here. If this is the g_NCR5380
* driver then it will always be defined, so the
* FLAG_NO_PSEUDO_DMA is used to inhibit PDMA in the base
* NCR5380 case. I think this is a fairly clean solution.
* We supplement these 2 if's with the flag.
*/
#ifdef NCR5380_dma_xfer_len
if (!cmd->device->borken && !(hostdata->flags & FLAG_NO_PSEUDO_DMA) && (transfersize = NCR5380_dma_xfer_len(instance, cmd)) != 0) {
#else
transfersize = cmd->transfersize;
#ifdef LIMIT_TRANSFERSIZE /* If we have problems with interrupt service */
if (transfersize > 512)
transfersize = 512;
#endif /* LIMIT_TRANSFERSIZE */
if (!cmd->device->borken && transfersize && !(hostdata->flags & FLAG_NO_PSEUDO_DMA) && cmd->SCp.this_residual && !(cmd->SCp.this_residual % transfersize)) {
/* Limit transfers to 32K, for xx400 & xx406
* pseudoDMA that transfers in 128 bytes blocks. */
if (transfersize > 32 * 1024)
transfersize = 32 * 1024;
#endif
len = transfersize;
if (NCR5380_transfer_dma(instance, &phase, &len, (unsigned char **) &cmd->SCp.ptr)) {
/*
* If the watchdog timer fires, all future accesses to this
* device will use the polled-IO.
*/
scmd_printk(KERN_INFO, cmd,
"switching to slow handshake\n");
cmd->device->borken = 1;
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
sink = 1;
do_abort(instance);
cmd->result = DID_ERROR << 16;
cmd->scsi_done(cmd);
/* XXX - need to source or sink data here, as appropriate */
} else
cmd->SCp.this_residual -= transfersize - len;
} else
#endif /* defined(PSEUDO_DMA) || defined(REAL_DMA_POLL) */
NCR5380_transfer_pio(instance, &phase, (int *) &cmd->SCp.this_residual, (unsigned char **)
&cmd->SCp.ptr);
break;
case PHASE_MSGIN:
len = 1;
data = &tmp;
NCR5380_transfer_pio(instance, &phase, &len, &data);
cmd->SCp.Message = tmp;
switch (tmp) {
/*
* Linking lets us reduce the time required to get the
* next command out to the device, hopefully this will
* mean we don't waste another revolution due to the delays
* required by ARBITRATION and another SELECTION.
*
* In the current implementation proposal, low level drivers
* merely have to start the next command, pointed to by
* next_link, done() is called as with unlinked commands.
*/
#ifdef LINKED
case LINKED_CMD_COMPLETE:
case LINKED_FLG_CMD_COMPLETE:
/* Accept message by clearing ACK */
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
dprintk(NDEBUG_LINKED, ("scsi%d : target %d lun %d linked command complete.\n", instance->host_no, cmd->device->id, cmd->device->lun));
/*
* Sanity check : A linked command should only terminate with
* one of these messages if there are more linked commands
* available.
*/
if (!cmd->next_link) {
printk("scsi%d : target %d lun %d linked command complete, no next_link\n" instance->host_no, cmd->device->id, cmd->device->lun);
sink = 1;
do_abort(instance);
return;
}
initialize_SCp(cmd->next_link);
/* The next command is still part of this process */
cmd->next_link->tag = cmd->tag;
cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
dprintk(NDEBUG_LINKED, ("scsi%d : target %d lun %d linked request done, calling scsi_done().\n", instance->host_no, cmd->device->id, cmd->device->lun));
collect_stats(hostdata, cmd);
cmd->scsi_done(cmd);
cmd = hostdata->connected;
break;
#endif /* def LINKED */
case ABORT:
case COMMAND_COMPLETE:
/* Accept message by clearing ACK */
sink = 1;
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
hostdata->connected = NULL;
dprintk(NDEBUG_QUEUES, ("scsi%d : command for target %d, lun %d completed\n", instance->host_no, cmd->device->id, cmd->device->lun));
hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
/*
* I'm not sure what the correct thing to do here is :
*
* If the command that just executed is NOT a request
* sense, the obvious thing to do is to set the result
* code to the values of the stored parameters.
*
* If it was a REQUEST SENSE command, we need some way
* to differentiate between the failure code of the original
* and the failure code of the REQUEST sense - the obvious
* case is success, where we fall through and leave the result
* code unchanged.
*
* The non-obvious place is where the REQUEST SENSE failed
*/
if (cmd->cmnd[0] != REQUEST_SENSE)
cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
else if (status_byte(cmd->SCp.Status) != GOOD)
cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16);
#ifdef AUTOSENSE
if ((cmd->cmnd[0] == REQUEST_SENSE) &&
hostdata->ses.cmd_len) {
scsi_eh_restore_cmnd(cmd, &hostdata->ses);
hostdata->ses.cmd_len = 0 ;
}
if ((cmd->cmnd[0] != REQUEST_SENSE) && (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) {
scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0);
dprintk(NDEBUG_AUTOSENSE, ("scsi%d : performing request sense\n", instance->host_no));
LIST(cmd, hostdata->issue_queue);
cmd->host_scribble = (unsigned char *)
hostdata->issue_queue;
hostdata->issue_queue = (Scsi_Cmnd *) cmd;
dprintk(NDEBUG_QUEUES, ("scsi%d : REQUEST SENSE added to head of issue queue\n", instance->host_no));
} else
#endif /* def AUTOSENSE */
{
collect_stats(hostdata, cmd);
cmd->scsi_done(cmd);
}
NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
/*
* Restore phase bits to 0 so an interrupted selection,
* arbitration can resume.
*/
NCR5380_write(TARGET_COMMAND_REG, 0);
while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected)
barrier();
return;
case MESSAGE_REJECT:
/* Accept message by clearing ACK */
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
switch (hostdata->last_message) {
case HEAD_OF_QUEUE_TAG:
case ORDERED_QUEUE_TAG:
case SIMPLE_QUEUE_TAG:
cmd->device->simple_tags = 0;
hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
break;
default:
break;
}
case DISCONNECT:{
/* Accept message by clearing ACK */
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
cmd->device->disconnect = 1;
LIST(cmd, hostdata->disconnected_queue);
cmd->host_scribble = (unsigned char *)
hostdata->disconnected_queue;
hostdata->connected = NULL;
hostdata->disconnected_queue = cmd;
dprintk(NDEBUG_QUEUES, ("scsi%d : command for target %d lun %d was moved from connected to" " the disconnected_queue\n", instance->host_no, cmd->device->id, cmd->device->lun));
/*
* Restore phase bits to 0 so an interrupted selection,
* arbitration can resume.
*/
NCR5380_write(TARGET_COMMAND_REG, 0);
/* Enable reselect interrupts */
NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
/* Wait for bus free to avoid nasty timeouts - FIXME timeout !*/
/* NCR538_poll_politely(instance, STATUS_REG, SR_BSY, 0, 30 * HZ); */
while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected)
barrier();
return;
}
/*
* The SCSI data pointer is *IMPLICITLY* saved on a disconnect
* operation, in violation of the SCSI spec so we can safely
* ignore SAVE/RESTORE pointers calls.
*
* Unfortunately, some disks violate the SCSI spec and
* don't issue the required SAVE_POINTERS message before
* disconnecting, and we have to break spec to remain
* compatible.
*/
case SAVE_POINTERS:
case RESTORE_POINTERS:
/* Accept message by clearing ACK */
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
break;
case EXTENDED_MESSAGE:
/*
* Extended messages are sent in the following format :
* Byte
* 0 EXTENDED_MESSAGE == 1
* 1 length (includes one byte for code, doesn't
* include first two bytes)
* 2 code
* 3..length+1 arguments
*
* Start the extended message buffer with the EXTENDED_MESSAGE
* byte, since spi_print_msg() wants the whole thing.
*/
extended_msg[0] = EXTENDED_MESSAGE;
/* Accept first byte by clearing ACK */
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
dprintk(NDEBUG_EXTENDED, ("scsi%d : receiving extended message\n", instance->host_no));
len = 2;
data = extended_msg + 1;
phase = PHASE_MSGIN;
NCR5380_transfer_pio(instance, &phase, &len, &data);
dprintk(NDEBUG_EXTENDED, ("scsi%d : length=%d, code=0x%02x\n", instance->host_no, (int) extended_msg[1], (int) extended_msg[2]));
if (!len && extended_msg[1] <= (sizeof(extended_msg) - 1)) {
/* Accept third byte by clearing ACK */
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
len = extended_msg[1] - 1;
data = extended_msg + 3;
phase = PHASE_MSGIN;
NCR5380_transfer_pio(instance, &phase, &len, &data);
dprintk(NDEBUG_EXTENDED, ("scsi%d : message received, residual %d\n", instance->host_no, len));
switch (extended_msg[2]) {
case EXTENDED_SDTR:
case EXTENDED_WDTR:
case EXTENDED_MODIFY_DATA_POINTER:
case EXTENDED_EXTENDED_IDENTIFY:
tmp = 0;
}
} else if (len) {
printk("scsi%d: error receiving extended message\n", instance->host_no);
tmp = 0;
} else {
printk("scsi%d: extended message code %02x length %d is too long\n", instance->host_no, extended_msg[2], extended_msg[1]);
tmp = 0;
}
/* Fall through to reject message */
/*
* If we get something weird that we aren't expecting,
* reject it.
*/
default:
if (!tmp) {
printk("scsi%d: rejecting message ", instance->host_no);
spi_print_msg(extended_msg);
printk("\n");
} else if (tmp != EXTENDED_MESSAGE)
scmd_printk(KERN_INFO, cmd,
"rejecting unknown message %02x\n",tmp);
else
scmd_printk(KERN_INFO, cmd,
"rejecting unknown extended message code %02x, length %d\n", extended_msg[1], extended_msg[0]);
msgout = MESSAGE_REJECT;
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN);
break;
} /* switch (tmp) */
break;
case PHASE_MSGOUT:
len = 1;
data = &msgout;
hostdata->last_message = msgout;
NCR5380_transfer_pio(instance, &phase, &len, &data);
if (msgout == ABORT) {
hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
hostdata->connected = NULL;
cmd->result = DID_ERROR << 16;
collect_stats(hostdata, cmd);
cmd->scsi_done(cmd);
NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
return;
}
msgout = NOP;
break;
case PHASE_CMDOUT:
len = cmd->cmd_len;
data = cmd->cmnd;
/*
* XXX for performance reasons, on machines with a
* PSEUDO-DMA architecture we should probably
* use the dma transfer function.
*/
NCR5380_transfer_pio(instance, &phase, &len, &data);
if (!cmd->device->disconnect && should_disconnect(cmd->cmnd[0])) {
NCR5380_set_timer(hostdata, USLEEP_SLEEP);
dprintk(NDEBUG_USLEEP, ("scsi%d : issued command, sleeping until %ul\n", instance->host_no, hostdata->time_expires));
return;
}
break;
case PHASE_STATIN:
len = 1;
data = &tmp;
NCR5380_transfer_pio(instance, &phase, &len, &data);
cmd->SCp.Status = tmp;
break;
default:
printk("scsi%d : unknown phase\n", instance->host_no);
NCR5380_dprint(NDEBUG_ALL, instance);
} /* switch(phase) */
} /* if (tmp * SR_REQ) */
else {
/* RvC: go to sleep if polling time expired
*/
if (!cmd->device->disconnect && time_after_eq(jiffies, poll_time)) {
NCR5380_set_timer(hostdata, USLEEP_SLEEP);
dprintk(NDEBUG_USLEEP, ("scsi%d : poll timed out, sleeping until %ul\n", instance->host_no, hostdata->time_expires));
return;
}
}
} /* while (1) */
}
/*
* Function : void NCR5380_reselect (struct Scsi_Host *instance)
*
* Purpose : does reselection, initializing the instance->connected
* field to point to the Scsi_Cmnd for which the I_T_L or I_T_L_Q
* nexus has been reestablished,
*
* Inputs : instance - this instance of the NCR5380.
*
* Locks: io_request_lock held by caller if IRQ driven
*/
static void NCR5380_reselect(struct Scsi_Host *instance) {
NCR5380_local_declare();
struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *)
instance->hostdata;
unsigned char target_mask;
unsigned char lun, phase;
int len;
unsigned char msg[3];
unsigned char *data;
Scsi_Cmnd *tmp = NULL, *prev;
int abort = 0;
NCR5380_setup(instance);
/*
* Disable arbitration, etc. since the host adapter obviously
* lost, and tell an interrupted NCR5380_select() to restart.
*/
NCR5380_write(MODE_REG, MR_BASE);
hostdata->restart_select = 1;
target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask);
dprintk(NDEBUG_SELECTION, ("scsi%d : reselect\n", instance->host_no));
/*
* At this point, we have detected that our SCSI ID is on the bus,
* SEL is true and BSY was false for at least one bus settle delay
* (400 ns).
*
* We must assert BSY ourselves, until the target drops the SEL
* signal.
*/
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY);
/* FIXME: timeout too long, must fail to workqueue */
if(NCR5380_poll_politely(instance, STATUS_REG, SR_SEL, 0, 2*HZ)<0)
abort = 1;
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
/*
* Wait for target to go into MSGIN.
* FIXME: timeout needed and fail to work queeu
*/
if(NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, SR_REQ, 2*HZ))
abort = 1;
len = 1;
data = msg;
phase = PHASE_MSGIN;
NCR5380_transfer_pio(instance, &phase, &len, &data);
if (!(msg[0] & 0x80)) {
printk(KERN_ERR "scsi%d : expecting IDENTIFY message, got ", instance->host_no);
spi_print_msg(msg);
abort = 1;
} else {
/* Accept message by clearing ACK */
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
lun = (msg[0] & 0x07);
/*
* We need to add code for SCSI-II to track which devices have
* I_T_L_Q nexuses established, and which have simple I_T_L
* nexuses so we can chose to do additional data transfer.
*/
/*
* Find the command corresponding to the I_T_L or I_T_L_Q nexus we
* just reestablished, and remove it from the disconnected queue.
*/
for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue, prev = NULL; tmp; prev = tmp, tmp = (Scsi_Cmnd *) tmp->host_scribble)
if ((target_mask == (1 << tmp->device->id)) && (lun == tmp->device->lun)
) {
if (prev) {
REMOVE(prev, prev->host_scribble, tmp, tmp->host_scribble);
prev->host_scribble = tmp->host_scribble;
} else {
REMOVE(-1, hostdata->disconnected_queue, tmp, tmp->host_scribble);
hostdata->disconnected_queue = (Scsi_Cmnd *) tmp->host_scribble;
}
tmp->host_scribble = NULL;
break;
}
if (!tmp) {
printk(KERN_ERR "scsi%d : warning : target bitmask %02x lun %d not in disconnect_queue.\n", instance->host_no, target_mask, lun);
/*
* Since we have an established nexus that we can't do anything with,
* we must abort it.
*/
abort = 1;
}
}
if (abort) {
do_abort(instance);
} else {
hostdata->connected = tmp;
dprintk(NDEBUG_RESELECTION, ("scsi%d : nexus established, target = %d, lun = %d, tag = %d\n", instance->host_no, tmp->target, tmp->lun, tmp->tag));
}
}
/*
* Function : void NCR5380_dma_complete (struct Scsi_Host *instance)
*
* Purpose : called by interrupt handler when DMA finishes or a phase
* mismatch occurs (which would finish the DMA transfer).
*
* Inputs : instance - this instance of the NCR5380.
*
* Returns : pointer to the Scsi_Cmnd structure for which the I_T_L
* nexus has been reestablished, on failure NULL is returned.
*/
#ifdef REAL_DMA
static void NCR5380_dma_complete(NCR5380_instance * instance) {
NCR5380_local_declare();
struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
int transferred;
NCR5380_setup(instance);
/*
* XXX this might not be right.
*
* Wait for final byte to transfer, ie wait for ACK to go false.
*
* We should use the Last Byte Sent bit, unfortunately this is
* not available on the 5380/5381 (only the various CMOS chips)
*
* FIXME: timeout, and need to handle long timeout/irq case
*/
NCR5380_poll_politely(instance, BUS_AND_STATUS_REG, BASR_ACK, 0, 5*HZ);
NCR5380_write(MODE_REG, MR_BASE);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
/*
* The only places we should see a phase mismatch and have to send
* data from the same set of pointers will be the data transfer
* phases. So, residual, requested length are only important here.
*/
if (!(hostdata->connected->SCp.phase & SR_CD)) {
transferred = instance->dmalen - NCR5380_dma_residual();
hostdata->connected->SCp.this_residual -= transferred;
hostdata->connected->SCp.ptr += transferred;
}
}
#endif /* def REAL_DMA */
/*
* Function : int NCR5380_abort (Scsi_Cmnd *cmd)
*
* Purpose : abort a command
*
* Inputs : cmd - the Scsi_Cmnd to abort, code - code to set the
* host byte of the result field to, if zero DID_ABORTED is
* used.
*
* Returns : 0 - success, -1 on failure.
*
* XXX - there is no way to abort the command that is currently
* connected, you have to wait for it to complete. If this is
* a problem, we could implement longjmp() / setjmp(), setjmp()
* called where the loop started in NCR5380_main().
*
* Locks: host lock taken by caller
*/
static int NCR5380_abort(Scsi_Cmnd * cmd) {
NCR5380_local_declare();
struct Scsi_Host *instance = cmd->device->host;
struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
Scsi_Cmnd *tmp, **prev;
printk(KERN_WARNING "scsi%d : aborting command\n", instance->host_no);
scsi_print_command(cmd);
NCR5380_print_status(instance);
NCR5380_setup(instance);
dprintk(NDEBUG_ABORT, ("scsi%d : abort called\n", instance->host_no));
dprintk(NDEBUG_ABORT, (" basr 0x%X, sr 0x%X\n", NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG)));
#if 0
/*
* Case 1 : If the command is the currently executing command,
* we'll set the aborted flag and return control so that
* information transfer routine can exit cleanly.
*/
if (hostdata->connected == cmd) {
dprintk(NDEBUG_ABORT, ("scsi%d : aborting connected command\n", instance->host_no));
hostdata->aborted = 1;
/*
* We should perform BSY checking, and make sure we haven't slipped
* into BUS FREE.
*/
NCR5380_write(INITIATOR_COMMAND_REG, ICR_ASSERT_ATN);
/*
* Since we can't change phases until we've completed the current
* handshake, we have to source or sink a byte of data if the current
* phase is not MSGOUT.
*/
/*
* Return control to the executing NCR drive so we can clear the
* aborted flag and get back into our main loop.
*/
return 0;
}
#endif
/*
* Case 2 : If the command hasn't been issued yet, we simply remove it
* from the issue queue.
*/
dprintk(NDEBUG_ABORT, ("scsi%d : abort going into loop.\n", instance->host_no));
for (prev = (Scsi_Cmnd **) & (hostdata->issue_queue), tmp = (Scsi_Cmnd *) hostdata->issue_queue; tmp; prev = (Scsi_Cmnd **) & (tmp->host_scribble), tmp = (Scsi_Cmnd *) tmp->host_scribble)
if (cmd == tmp) {
REMOVE(5, *prev, tmp, tmp->host_scribble);
(*prev) = (Scsi_Cmnd *) tmp->host_scribble;
tmp->host_scribble = NULL;
tmp->result = DID_ABORT << 16;
dprintk(NDEBUG_ABORT, ("scsi%d : abort removed command from issue queue.\n", instance->host_no));
tmp->scsi_done(tmp);
return SUCCESS;
}
#if (NDEBUG & NDEBUG_ABORT)
/* KLL */
else if (prev == tmp)
printk(KERN_ERR "scsi%d : LOOP\n", instance->host_no);
#endif
/*
* Case 3 : If any commands are connected, we're going to fail the abort
* and let the high level SCSI driver retry at a later time or
* issue a reset.
*
* Timeouts, and therefore aborted commands, will be highly unlikely
* and handling them cleanly in this situation would make the common
* case of noresets less efficient, and would pollute our code. So,
* we fail.
*/
if (hostdata->connected) {
dprintk(NDEBUG_ABORT, ("scsi%d : abort failed, command connected.\n", instance->host_no));
return FAILED;
}
/*
* Case 4: If the command is currently disconnected from the bus, and
* there are no connected commands, we reconnect the I_T_L or
* I_T_L_Q nexus associated with it, go into message out, and send
* an abort message.
*
* This case is especially ugly. In order to reestablish the nexus, we
* need to call NCR5380_select(). The easiest way to implement this
* function was to abort if the bus was busy, and let the interrupt
* handler triggered on the SEL for reselect take care of lost arbitrations
* where necessary, meaning interrupts need to be enabled.
*
* When interrupts are enabled, the queues may change - so we
* can't remove it from the disconnected queue before selecting it
* because that could cause a failure in hashing the nexus if that
* device reselected.
*
* Since the queues may change, we can't use the pointers from when we
* first locate it.
*
* So, we must first locate the command, and if NCR5380_select()
* succeeds, then issue the abort, relocate the command and remove
* it from the disconnected queue.
*/
for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue; tmp; tmp = (Scsi_Cmnd *) tmp->host_scribble)
if (cmd == tmp) {
dprintk(NDEBUG_ABORT, ("scsi%d : aborting disconnected command.\n", instance->host_no));
if (NCR5380_select(instance, cmd, (int) cmd->tag))
return FAILED;
dprintk(NDEBUG_ABORT, ("scsi%d : nexus reestablished.\n", instance->host_no));
do_abort(instance);
for (prev = (Scsi_Cmnd **) & (hostdata->disconnected_queue), tmp = (Scsi_Cmnd *) hostdata->disconnected_queue; tmp; prev = (Scsi_Cmnd **) & (tmp->host_scribble), tmp = (Scsi_Cmnd *) tmp->host_scribble)
if (cmd == tmp) {
REMOVE(5, *prev, tmp, tmp->host_scribble);
*prev = (Scsi_Cmnd *) tmp->host_scribble;
tmp->host_scribble = NULL;
tmp->result = DID_ABORT << 16;
tmp->scsi_done(tmp);
return SUCCESS;
}
}
/*
* Case 5 : If we reached this point, the command was not found in any of
* the queues.
*
* We probably reached this point because of an unlikely race condition
* between the command completing successfully and the abortion code,
* so we won't panic, but we will notify the user in case something really
* broke.
*/
printk(KERN_WARNING "scsi%d : warning : SCSI command probably completed successfully\n"
" before abortion\n", instance->host_no);
return FAILED;
}
/*
* Function : int NCR5380_bus_reset (Scsi_Cmnd *cmd)
*
* Purpose : reset the SCSI bus.
*
* Returns : SUCCESS
*
* Locks: host lock taken by caller
*/
static int NCR5380_bus_reset(Scsi_Cmnd * cmd)
{
struct Scsi_Host *instance = cmd->device->host;
NCR5380_local_declare();
NCR5380_setup(instance);
NCR5380_print_status(instance);
spin_lock_irq(instance->host_lock);
do_reset(instance);
spin_unlock_irq(instance->host_lock);
return SUCCESS;
}
| gpl-2.0 |
utilite-computer/linux-kernel-3.0 | kernel/trace/trace_workqueue.c | 8160 | 7555 | /*
* Workqueue statistical tracer.
*
* Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
*
*/
#include <trace/events/workqueue.h>
#include <linux/list.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <linux/kref.h>
#include "trace_stat.h"
#include "trace.h"
/* A cpu workqueue thread */
struct cpu_workqueue_stats {
struct list_head list;
struct kref kref;
int cpu;
pid_t pid;
/* Can be inserted from interrupt or user context, need to be atomic */
atomic_t inserted;
/*
* Don't need to be atomic, works are serialized in a single workqueue thread
* on a single CPU.
*/
unsigned int executed;
};
/* List of workqueue threads on one cpu */
struct workqueue_global_stats {
struct list_head list;
spinlock_t lock;
};
/* Don't need a global lock because allocated before the workqueues, and
* never freed.
*/
static DEFINE_PER_CPU(struct workqueue_global_stats, all_workqueue_stat);
#define workqueue_cpu_stat(cpu) (&per_cpu(all_workqueue_stat, cpu))
static void cpu_workqueue_stat_free(struct kref *kref)
{
kfree(container_of(kref, struct cpu_workqueue_stats, kref));
}
/* Insertion of a work */
static void
probe_workqueue_insertion(void *ignore,
struct task_struct *wq_thread,
struct work_struct *work)
{
int cpu = cpumask_first(&wq_thread->cpus_allowed);
struct cpu_workqueue_stats *node;
unsigned long flags;
spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
if (node->pid == wq_thread->pid) {
atomic_inc(&node->inserted);
goto found;
}
}
pr_debug("trace_workqueue: entry not found\n");
found:
spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
}
/* Execution of a work */
static void
probe_workqueue_execution(void *ignore,
struct task_struct *wq_thread,
struct work_struct *work)
{
int cpu = cpumask_first(&wq_thread->cpus_allowed);
struct cpu_workqueue_stats *node;
unsigned long flags;
spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
if (node->pid == wq_thread->pid) {
node->executed++;
goto found;
}
}
pr_debug("trace_workqueue: entry not found\n");
found:
spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
}
/* Creation of a cpu workqueue thread */
static void probe_workqueue_creation(void *ignore,
struct task_struct *wq_thread, int cpu)
{
struct cpu_workqueue_stats *cws;
unsigned long flags;
WARN_ON(cpu < 0);
/* Workqueues are sometimes created in atomic context */
cws = kzalloc(sizeof(struct cpu_workqueue_stats), GFP_ATOMIC);
if (!cws) {
pr_warning("trace_workqueue: not enough memory\n");
return;
}
INIT_LIST_HEAD(&cws->list);
kref_init(&cws->kref);
cws->cpu = cpu;
cws->pid = wq_thread->pid;
spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
list_add_tail(&cws->list, &workqueue_cpu_stat(cpu)->list);
spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
}
/* Destruction of a cpu workqueue thread */
static void
probe_workqueue_destruction(void *ignore, struct task_struct *wq_thread)
{
/* Workqueue only execute on one cpu */
int cpu = cpumask_first(&wq_thread->cpus_allowed);
struct cpu_workqueue_stats *node, *next;
unsigned long flags;
spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list,
list) {
if (node->pid == wq_thread->pid) {
list_del(&node->list);
kref_put(&node->kref, cpu_workqueue_stat_free);
goto found;
}
}
pr_debug("trace_workqueue: don't find workqueue to destroy\n");
found:
spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
}
static struct cpu_workqueue_stats *workqueue_stat_start_cpu(int cpu)
{
unsigned long flags;
struct cpu_workqueue_stats *ret = NULL;
spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
if (!list_empty(&workqueue_cpu_stat(cpu)->list)) {
ret = list_entry(workqueue_cpu_stat(cpu)->list.next,
struct cpu_workqueue_stats, list);
kref_get(&ret->kref);
}
spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
return ret;
}
static void *workqueue_stat_start(struct tracer_stat *trace)
{
int cpu;
void *ret = NULL;
for_each_possible_cpu(cpu) {
ret = workqueue_stat_start_cpu(cpu);
if (ret)
return ret;
}
return NULL;
}
static void *workqueue_stat_next(void *prev, int idx)
{
struct cpu_workqueue_stats *prev_cws = prev;
struct cpu_workqueue_stats *ret;
int cpu = prev_cws->cpu;
unsigned long flags;
spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
if (list_is_last(&prev_cws->list, &workqueue_cpu_stat(cpu)->list)) {
spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
do {
cpu = cpumask_next(cpu, cpu_possible_mask);
if (cpu >= nr_cpu_ids)
return NULL;
} while (!(ret = workqueue_stat_start_cpu(cpu)));
return ret;
} else {
ret = list_entry(prev_cws->list.next,
struct cpu_workqueue_stats, list);
kref_get(&ret->kref);
}
spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
return ret;
}
static int workqueue_stat_show(struct seq_file *s, void *p)
{
struct cpu_workqueue_stats *cws = p;
struct pid *pid;
struct task_struct *tsk;
pid = find_get_pid(cws->pid);
if (pid) {
tsk = get_pid_task(pid, PIDTYPE_PID);
if (tsk) {
seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
atomic_read(&cws->inserted), cws->executed,
tsk->comm);
put_task_struct(tsk);
}
put_pid(pid);
}
return 0;
}
static void workqueue_stat_release(void *stat)
{
struct cpu_workqueue_stats *node = stat;
kref_put(&node->kref, cpu_workqueue_stat_free);
}
static int workqueue_stat_headers(struct seq_file *s)
{
seq_printf(s, "# CPU INSERTED EXECUTED NAME\n");
seq_printf(s, "# | | | |\n");
return 0;
}
struct tracer_stat workqueue_stats __read_mostly = {
.name = "workqueues",
.stat_start = workqueue_stat_start,
.stat_next = workqueue_stat_next,
.stat_show = workqueue_stat_show,
.stat_release = workqueue_stat_release,
.stat_headers = workqueue_stat_headers
};
int __init stat_workqueue_init(void)
{
if (register_stat_tracer(&workqueue_stats)) {
pr_warning("Unable to register workqueue stat tracer\n");
return 1;
}
return 0;
}
fs_initcall(stat_workqueue_init);
/*
* Workqueues are created very early, just after pre-smp initcalls.
* So we must register our tracepoints at this stage.
*/
int __init trace_workqueue_early_init(void)
{
int ret, cpu;
for_each_possible_cpu(cpu) {
spin_lock_init(&workqueue_cpu_stat(cpu)->lock);
INIT_LIST_HEAD(&workqueue_cpu_stat(cpu)->list);
}
ret = register_trace_workqueue_insertion(probe_workqueue_insertion, NULL);
if (ret)
goto out;
ret = register_trace_workqueue_execution(probe_workqueue_execution, NULL);
if (ret)
goto no_insertion;
ret = register_trace_workqueue_creation(probe_workqueue_creation, NULL);
if (ret)
goto no_execution;
ret = register_trace_workqueue_destruction(probe_workqueue_destruction, NULL);
if (ret)
goto no_creation;
return 0;
no_creation:
unregister_trace_workqueue_creation(probe_workqueue_creation, NULL);
no_execution:
unregister_trace_workqueue_execution(probe_workqueue_execution, NULL);
no_insertion:
unregister_trace_workqueue_insertion(probe_workqueue_insertion, NULL);
out:
pr_warning("trace_workqueue: unable to trace workqueues\n");
return 1;
}
early_initcall(trace_workqueue_early_init);
| gpl-2.0 |
12019/old_samsung-lt02wifi-kernel | drivers/input/keyboard/hil_kbd.c | 8160 | 15102 | /*
* Generic linux-input device driver for keyboard devices
*
* Copyright (c) 2001 Brian S. Julin
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL").
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
*
* References:
* HP-HIL Technical Reference Manual. Hewlett Packard Product No. 45918A
*
*/
#include <linux/hil.h>
#include <linux/input.h>
#include <linux/serio.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/completion.h>
#include <linux/slab.h>
#include <linux/pci_ids.h>
#define PREFIX "HIL: "
MODULE_AUTHOR("Brian S. Julin <bri@calyx.com>");
MODULE_DESCRIPTION("HIL keyboard/mouse driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("serio:ty03pr25id00ex*"); /* HIL keyboard */
MODULE_ALIAS("serio:ty03pr25id0Fex*"); /* HIL mouse */
#define HIL_PACKET_MAX_LENGTH 16
#define HIL_KBD_SET1_UPBIT 0x01
#define HIL_KBD_SET1_SHIFT 1
static unsigned int hil_kbd_set1[HIL_KEYCODES_SET1_TBLSIZE] __read_mostly =
{ HIL_KEYCODES_SET1 };
#define HIL_KBD_SET2_UPBIT 0x01
#define HIL_KBD_SET2_SHIFT 1
/* Set2 is user defined */
#define HIL_KBD_SET3_UPBIT 0x80
#define HIL_KBD_SET3_SHIFT 0
static unsigned int hil_kbd_set3[HIL_KEYCODES_SET3_TBLSIZE] __read_mostly =
{ HIL_KEYCODES_SET3 };
static const char hil_language[][16] = { HIL_LOCALE_MAP };
struct hil_dev {
struct input_dev *dev;
struct serio *serio;
/* Input buffer and index for packets from HIL bus. */
hil_packet data[HIL_PACKET_MAX_LENGTH];
int idx4; /* four counts per packet */
/* Raw device info records from HIL bus, see hil.h for fields. */
char idd[HIL_PACKET_MAX_LENGTH]; /* DID byte and IDD record */
char rsc[HIL_PACKET_MAX_LENGTH]; /* RSC record */
char exd[HIL_PACKET_MAX_LENGTH]; /* EXD record */
char rnm[HIL_PACKET_MAX_LENGTH + 1]; /* RNM record + NULL term. */
struct completion cmd_done;
bool is_pointer;
/* Extra device details needed for pointing devices. */
unsigned int nbtn, naxes;
unsigned int btnmap[7];
};
static bool hil_dev_is_command_response(hil_packet p)
{
if ((p & ~HIL_CMDCT_POL) == (HIL_ERR_INT | HIL_PKT_CMD | HIL_CMD_POL))
return false;
if ((p & ~HIL_CMDCT_RPL) == (HIL_ERR_INT | HIL_PKT_CMD | HIL_CMD_RPL))
return false;
return true;
}
static void hil_dev_handle_command_response(struct hil_dev *dev)
{
hil_packet p;
char *buf;
int i, idx;
idx = dev->idx4 / 4;
p = dev->data[idx - 1];
switch (p & HIL_PKT_DATA_MASK) {
case HIL_CMD_IDD:
buf = dev->idd;
break;
case HIL_CMD_RSC:
buf = dev->rsc;
break;
case HIL_CMD_EXD:
buf = dev->exd;
break;
case HIL_CMD_RNM:
dev->rnm[HIL_PACKET_MAX_LENGTH] = 0;
buf = dev->rnm;
break;
default:
/* These occur when device isn't present */
if (p != (HIL_ERR_INT | HIL_PKT_CMD)) {
/* Anything else we'd like to know about. */
printk(KERN_WARNING PREFIX "Device sent unknown record %x\n", p);
}
goto out;
}
for (i = 0; i < idx; i++)
buf[i] = dev->data[i] & HIL_PKT_DATA_MASK;
for (; i < HIL_PACKET_MAX_LENGTH; i++)
buf[i] = 0;
out:
complete(&dev->cmd_done);
}
static void hil_dev_handle_kbd_events(struct hil_dev *kbd)
{
struct input_dev *dev = kbd->dev;
int idx = kbd->idx4 / 4;
int i;
switch (kbd->data[0] & HIL_POL_CHARTYPE_MASK) {
case HIL_POL_CHARTYPE_NONE:
return;
case HIL_POL_CHARTYPE_ASCII:
for (i = 1; i < idx - 1; i++)
input_report_key(dev, kbd->data[i] & 0x7f, 1);
break;
case HIL_POL_CHARTYPE_RSVD1:
case HIL_POL_CHARTYPE_RSVD2:
case HIL_POL_CHARTYPE_BINARY:
for (i = 1; i < idx - 1; i++)
input_report_key(dev, kbd->data[i], 1);
break;
case HIL_POL_CHARTYPE_SET1:
for (i = 1; i < idx - 1; i++) {
unsigned int key = kbd->data[i];
int up = key & HIL_KBD_SET1_UPBIT;
key &= (~HIL_KBD_SET1_UPBIT & 0xff);
key = hil_kbd_set1[key >> HIL_KBD_SET1_SHIFT];
input_report_key(dev, key, !up);
}
break;
case HIL_POL_CHARTYPE_SET2:
for (i = 1; i < idx - 1; i++) {
unsigned int key = kbd->data[i];
int up = key & HIL_KBD_SET2_UPBIT;
key &= (~HIL_KBD_SET1_UPBIT & 0xff);
key = key >> HIL_KBD_SET2_SHIFT;
input_report_key(dev, key, !up);
}
break;
case HIL_POL_CHARTYPE_SET3:
for (i = 1; i < idx - 1; i++) {
unsigned int key = kbd->data[i];
int up = key & HIL_KBD_SET3_UPBIT;
key &= (~HIL_KBD_SET1_UPBIT & 0xff);
key = hil_kbd_set3[key >> HIL_KBD_SET3_SHIFT];
input_report_key(dev, key, !up);
}
break;
}
input_sync(dev);
}
static void hil_dev_handle_ptr_events(struct hil_dev *ptr)
{
struct input_dev *dev = ptr->dev;
int idx = ptr->idx4 / 4;
hil_packet p = ptr->data[idx - 1];
int i, cnt, laxis;
bool absdev, ax16;
if ((p & HIL_CMDCT_POL) != idx - 1) {
printk(KERN_WARNING PREFIX
"Malformed poll packet %x (idx = %i)\n", p, idx);
return;
}
i = (p & HIL_POL_AXIS_ALT) ? 3 : 0;
laxis = (p & HIL_POL_NUM_AXES_MASK) + i;
ax16 = ptr->idd[1] & HIL_IDD_HEADER_16BIT; /* 8 or 16bit resolution */
absdev = ptr->idd[1] & HIL_IDD_HEADER_ABS;
for (cnt = 1; i < laxis; i++) {
unsigned int lo, hi, val;
lo = ptr->data[cnt++] & HIL_PKT_DATA_MASK;
hi = ax16 ? (ptr->data[cnt++] & HIL_PKT_DATA_MASK) : 0;
if (absdev) {
val = lo + (hi << 8);
#ifdef TABLET_AUTOADJUST
if (val < input_abs_get_min(dev, ABS_X + i))
input_abs_set_min(dev, ABS_X + i, val);
if (val > input_abs_get_max(dev, ABS_X + i))
input_abs_set_max(dev, ABS_X + i, val);
#endif
if (i % 3)
val = input_abs_get_max(dev, ABS_X + i) - val;
input_report_abs(dev, ABS_X + i, val);
} else {
val = (int) (((int8_t) lo) | ((int8_t) hi << 8));
if (i % 3)
val *= -1;
input_report_rel(dev, REL_X + i, val);
}
}
while (cnt < idx - 1) {
unsigned int btn = ptr->data[cnt++];
int up = btn & 1;
btn &= 0xfe;
if (btn == 0x8e)
continue; /* TODO: proximity == touch? */
if (btn > 0x8c || btn < 0x80)
continue;
btn = (btn - 0x80) >> 1;
btn = ptr->btnmap[btn];
input_report_key(dev, btn, !up);
}
input_sync(dev);
}
static void hil_dev_process_err(struct hil_dev *dev)
{
printk(KERN_WARNING PREFIX "errored HIL packet\n");
dev->idx4 = 0;
complete(&dev->cmd_done); /* just in case somebody is waiting */
}
static irqreturn_t hil_dev_interrupt(struct serio *serio,
unsigned char data, unsigned int flags)
{
struct hil_dev *dev;
hil_packet packet;
int idx;
dev = serio_get_drvdata(serio);
BUG_ON(dev == NULL);
if (dev->idx4 >= HIL_PACKET_MAX_LENGTH * sizeof(hil_packet)) {
hil_dev_process_err(dev);
goto out;
}
idx = dev->idx4 / 4;
if (!(dev->idx4 % 4))
dev->data[idx] = 0;
packet = dev->data[idx];
packet |= ((hil_packet)data) << ((3 - (dev->idx4 % 4)) * 8);
dev->data[idx] = packet;
/* Records of N 4-byte hil_packets must terminate with a command. */
if ((++dev->idx4 % 4) == 0) {
if ((packet & 0xffff0000) != HIL_ERR_INT) {
hil_dev_process_err(dev);
} else if (packet & HIL_PKT_CMD) {
if (hil_dev_is_command_response(packet))
hil_dev_handle_command_response(dev);
else if (dev->is_pointer)
hil_dev_handle_ptr_events(dev);
else
hil_dev_handle_kbd_events(dev);
dev->idx4 = 0;
}
}
out:
return IRQ_HANDLED;
}
static void hil_dev_disconnect(struct serio *serio)
{
struct hil_dev *dev = serio_get_drvdata(serio);
BUG_ON(dev == NULL);
serio_close(serio);
input_unregister_device(dev->dev);
serio_set_drvdata(serio, NULL);
kfree(dev);
}
static void hil_dev_keyboard_setup(struct hil_dev *kbd)
{
struct input_dev *input_dev = kbd->dev;
uint8_t did = kbd->idd[0];
int i;
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
input_dev->ledbit[0] = BIT_MASK(LED_NUML) | BIT_MASK(LED_CAPSL) |
BIT_MASK(LED_SCROLLL);
for (i = 0; i < 128; i++) {
__set_bit(hil_kbd_set1[i], input_dev->keybit);
__set_bit(hil_kbd_set3[i], input_dev->keybit);
}
__clear_bit(KEY_RESERVED, input_dev->keybit);
input_dev->keycodemax = HIL_KEYCODES_SET1_TBLSIZE;
input_dev->keycodesize = sizeof(hil_kbd_set1[0]);
input_dev->keycode = hil_kbd_set1;
input_dev->name = strlen(kbd->rnm) ? kbd->rnm : "HIL keyboard";
input_dev->phys = "hpkbd/input0";
printk(KERN_INFO PREFIX "HIL keyboard found (did = 0x%02x, lang = %s)\n",
did, hil_language[did & HIL_IDD_DID_TYPE_KB_LANG_MASK]);
}
static void hil_dev_pointer_setup(struct hil_dev *ptr)
{
struct input_dev *input_dev = ptr->dev;
uint8_t did = ptr->idd[0];
uint8_t *idd = ptr->idd + 1;
unsigned int naxsets = HIL_IDD_NUM_AXSETS(*idd);
unsigned int i, btntype;
const char *txt;
ptr->naxes = HIL_IDD_NUM_AXES_PER_SET(*idd);
switch (did & HIL_IDD_DID_TYPE_MASK) {
case HIL_IDD_DID_TYPE_REL:
input_dev->evbit[0] = BIT_MASK(EV_REL);
for (i = 0; i < ptr->naxes; i++)
__set_bit(REL_X + i, input_dev->relbit);
for (i = 3; naxsets > 1 && i < ptr->naxes + 3; i++)
__set_bit(REL_X + i, input_dev->relbit);
txt = "relative";
break;
case HIL_IDD_DID_TYPE_ABS:
input_dev->evbit[0] = BIT_MASK(EV_ABS);
for (i = 0; i < ptr->naxes; i++)
input_set_abs_params(input_dev, ABS_X + i,
0, HIL_IDD_AXIS_MAX(idd, i), 0, 0);
for (i = 3; naxsets > 1 && i < ptr->naxes + 3; i++)
input_set_abs_params(input_dev, ABS_X + i,
0, HIL_IDD_AXIS_MAX(idd, i - 3), 0, 0);
#ifdef TABLET_AUTOADJUST
for (i = 0; i < ABS_MAX; i++) {
int diff = input_abs_get_max(input_dev, ABS_X + i) / 10;
input_abs_set_min(input_dev, ABS_X + i,
input_abs_get_min(input_dev, ABS_X + i) + diff);
input_abs_set_max(input_dev, ABS_X + i,
input_abs_get_max(input_dev, ABS_X + i) - diff);
}
#endif
txt = "absolute";
break;
default:
BUG();
}
ptr->nbtn = HIL_IDD_NUM_BUTTONS(idd);
if (ptr->nbtn)
input_dev->evbit[0] |= BIT_MASK(EV_KEY);
btntype = BTN_MISC;
if ((did & HIL_IDD_DID_ABS_TABLET_MASK) == HIL_IDD_DID_ABS_TABLET)
#ifdef TABLET_SIMULATES_MOUSE
btntype = BTN_TOUCH;
#else
btntype = BTN_DIGI;
#endif
if ((did & HIL_IDD_DID_ABS_TSCREEN_MASK) == HIL_IDD_DID_ABS_TSCREEN)
btntype = BTN_TOUCH;
if ((did & HIL_IDD_DID_REL_MOUSE_MASK) == HIL_IDD_DID_REL_MOUSE)
btntype = BTN_MOUSE;
for (i = 0; i < ptr->nbtn; i++) {
__set_bit(btntype | i, input_dev->keybit);
ptr->btnmap[i] = btntype | i;
}
if (btntype == BTN_MOUSE) {
/* Swap buttons 2 and 3 */
ptr->btnmap[1] = BTN_MIDDLE;
ptr->btnmap[2] = BTN_RIGHT;
}
input_dev->name = strlen(ptr->rnm) ? ptr->rnm : "HIL pointer device";
printk(KERN_INFO PREFIX
"HIL pointer device found (did: 0x%02x, axis: %s)\n",
did, txt);
printk(KERN_INFO PREFIX
"HIL pointer has %i buttons and %i sets of %i axes\n",
ptr->nbtn, naxsets, ptr->naxes);
}
static int hil_dev_connect(struct serio *serio, struct serio_driver *drv)
{
struct hil_dev *dev;
struct input_dev *input_dev;
uint8_t did, *idd;
int error;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
input_dev = input_allocate_device();
if (!dev || !input_dev) {
error = -ENOMEM;
goto bail0;
}
dev->serio = serio;
dev->dev = input_dev;
error = serio_open(serio, drv);
if (error)
goto bail0;
serio_set_drvdata(serio, dev);
/* Get device info. MLC driver supplies devid/status/etc. */
init_completion(&dev->cmd_done);
serio_write(serio, 0);
serio_write(serio, 0);
serio_write(serio, HIL_PKT_CMD >> 8);
serio_write(serio, HIL_CMD_IDD);
error = wait_for_completion_killable(&dev->cmd_done);
if (error)
goto bail1;
init_completion(&dev->cmd_done);
serio_write(serio, 0);
serio_write(serio, 0);
serio_write(serio, HIL_PKT_CMD >> 8);
serio_write(serio, HIL_CMD_RSC);
error = wait_for_completion_killable(&dev->cmd_done);
if (error)
goto bail1;
init_completion(&dev->cmd_done);
serio_write(serio, 0);
serio_write(serio, 0);
serio_write(serio, HIL_PKT_CMD >> 8);
serio_write(serio, HIL_CMD_RNM);
error = wait_for_completion_killable(&dev->cmd_done);
if (error)
goto bail1;
init_completion(&dev->cmd_done);
serio_write(serio, 0);
serio_write(serio, 0);
serio_write(serio, HIL_PKT_CMD >> 8);
serio_write(serio, HIL_CMD_EXD);
error = wait_for_completion_killable(&dev->cmd_done);
if (error)
goto bail1;
did = dev->idd[0];
idd = dev->idd + 1;
switch (did & HIL_IDD_DID_TYPE_MASK) {
case HIL_IDD_DID_TYPE_KB_INTEGRAL:
case HIL_IDD_DID_TYPE_KB_ITF:
case HIL_IDD_DID_TYPE_KB_RSVD:
case HIL_IDD_DID_TYPE_CHAR:
if (HIL_IDD_NUM_BUTTONS(idd) ||
HIL_IDD_NUM_AXES_PER_SET(*idd)) {
printk(KERN_INFO PREFIX
"combo devices are not supported.\n");
goto bail1;
}
dev->is_pointer = false;
hil_dev_keyboard_setup(dev);
break;
case HIL_IDD_DID_TYPE_REL:
case HIL_IDD_DID_TYPE_ABS:
dev->is_pointer = true;
hil_dev_pointer_setup(dev);
break;
default:
goto bail1;
}
input_dev->id.bustype = BUS_HIL;
input_dev->id.vendor = PCI_VENDOR_ID_HP;
input_dev->id.product = 0x0001; /* TODO: get from kbd->rsc */
input_dev->id.version = 0x0100; /* TODO: get from kbd->rsc */
input_dev->dev.parent = &serio->dev;
if (!dev->is_pointer) {
serio_write(serio, 0);
serio_write(serio, 0);
serio_write(serio, HIL_PKT_CMD >> 8);
/* Enable Keyswitch Autorepeat 1 */
serio_write(serio, HIL_CMD_EK1);
/* No need to wait for completion */
}
error = input_register_device(input_dev);
if (error)
goto bail1;
return 0;
bail1:
serio_close(serio);
serio_set_drvdata(serio, NULL);
bail0:
input_free_device(input_dev);
kfree(dev);
return error;
}
static struct serio_device_id hil_dev_ids[] = {
{
.type = SERIO_HIL_MLC,
.proto = SERIO_HIL,
.id = SERIO_ANY,
.extra = SERIO_ANY,
},
{ 0 }
};
MODULE_DEVICE_TABLE(serio, hil_dev_ids);
static struct serio_driver hil_serio_drv = {
.driver = {
.name = "hil_dev",
},
.description = "HP HIL keyboard/mouse/tablet driver",
.id_table = hil_dev_ids,
.connect = hil_dev_connect,
.disconnect = hil_dev_disconnect,
.interrupt = hil_dev_interrupt
};
static int __init hil_dev_init(void)
{
return serio_register_driver(&hil_serio_drv);
}
static void __exit hil_dev_exit(void)
{
serio_unregister_driver(&hil_serio_drv);
}
module_init(hil_dev_init);
module_exit(hil_dev_exit);
| gpl-2.0 |
DooMLoRD/semc-kernel-msm7x30-dev | drivers/gpu/drm/r128/r128_irq.c | 8416 | 3517 | /* r128_irq.c -- IRQ handling for radeon -*- linux-c -*- */
/*
* Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
*
* The Weather Channel (TM) funded Tungsten Graphics to develop the
* initial release of the Radeon 8500 driver under the XFree86 license.
* This notice must be preserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Keith Whitwell <keith@tungstengraphics.com>
* Eric Anholt <anholt@FreeBSD.org>
*/
#include "drmP.h"
#include "drm.h"
#include "r128_drm.h"
#include "r128_drv.h"
u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
{
const drm_r128_private_t *dev_priv = dev->dev_private;
if (crtc != 0)
return 0;
return atomic_read(&dev_priv->vbl_received);
}
irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
int status;
status = R128_READ(R128_GEN_INT_STATUS);
/* VBLANK interrupt */
if (status & R128_CRTC_VBLANK_INT) {
R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
atomic_inc(&dev_priv->vbl_received);
drm_handle_vblank(dev, 0);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
int r128_enable_vblank(struct drm_device *dev, int crtc)
{
drm_r128_private_t *dev_priv = dev->dev_private;
if (crtc != 0) {
DRM_ERROR("%s: bad crtc %d\n", __func__, crtc);
return -EINVAL;
}
R128_WRITE(R128_GEN_INT_CNTL, R128_CRTC_VBLANK_INT_EN);
return 0;
}
void r128_disable_vblank(struct drm_device *dev, int crtc)
{
if (crtc != 0)
DRM_ERROR("%s: bad crtc %d\n", __func__, crtc);
/*
* FIXME: implement proper interrupt disable by using the vblank
* counter register (if available)
*
* R128_WRITE(R128_GEN_INT_CNTL,
* R128_READ(R128_GEN_INT_CNTL) & ~R128_CRTC_VBLANK_INT_EN);
*/
}
void r128_driver_irq_preinstall(struct drm_device *dev)
{
drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
/* Disable *all* interrupts */
R128_WRITE(R128_GEN_INT_CNTL, 0);
/* Clear vblank bit if it's already high */
R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
}
int r128_driver_irq_postinstall(struct drm_device *dev)
{
return 0;
}
void r128_driver_irq_uninstall(struct drm_device *dev)
{
drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
if (!dev_priv)
return;
/* Disable *all* interrupts */
R128_WRITE(R128_GEN_INT_CNTL, 0);
}
| gpl-2.0 |
pacificIT/Kernel_Unico | drivers/net/wimax/i2400m/sdio-fw.c | 9184 | 7208 | /*
* Intel Wireless WiMAX Connection 2400m
* Firmware uploader's SDIO specifics
*
*
* Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*
* Intel Corporation <linux-wimax@intel.com>
* Yanir Lubetkin <yanirx.lubetkin@intel.com>
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
* - Initial implementation
*
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
* - Bus generic/specific split for USB
*
* Dirk Brandewie <dirk.j.brandewie@intel.com>
* - Initial implementation for SDIO
*
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
* - SDIO rehash for changes in the bus-driver model
*
* Dirk Brandewie <dirk.j.brandewie@intel.com>
* - Make it IRQ based, not polling
*
* THE PROCEDURE
*
* See fw.c for the generic description of this procedure.
*
* This file implements only the SDIO specifics. It boils down to how
* to send a command and waiting for an acknowledgement from the
* device.
*
* All this code is sequential -- all i2400ms_bus_bm_*() functions are
* executed in the same thread, except i2400ms_bm_irq() [on its own by
* the SDIO driver]. This makes it possible to avoid locking.
*
* COMMAND EXECUTION
*
* The generic firmware upload code will call i2400m_bus_bm_cmd_send()
* to send commands.
*
* The SDIO devices expects things in 256 byte blocks, so it will pad
* it, compute the checksum (if needed) and pass it to SDIO.
*
* ACK RECEPTION
*
* This works in IRQ mode -- the fw loader says when to wait for data
* and for that it calls i2400ms_bus_bm_wait_for_ack().
*
* This checks if there is any data available (RX size > 0); if not,
* waits for the IRQ handler to notify about it. Once there is data,
* it is read and passed to the caller. Doing it this way we don't
* need much coordination/locking, and it makes it much more difficult
* for an interrupt to be lost and the wait_for_ack() function getting
* stuck even when data is pending.
*/
#include <linux/mmc/sdio_func.h>
#include "i2400m-sdio.h"
#define D_SUBMODULE fw
#include "sdio-debug-levels.h"
/*
* Send a boot-mode command to the SDIO function
*
* We use a bounce buffer (i2400m->bm_cmd_buf) because we need to
* touch the header if the RAW flag is not set.
*
* @flags: pass thru from i2400m_bm_cmd()
* @return: cmd_size if ok, < 0 errno code on error.
*
* Note the command is padded to the SDIO block size for the device.
*/
ssize_t i2400ms_bus_bm_cmd_send(struct i2400m *i2400m,
const struct i2400m_bootrom_header *_cmd,
size_t cmd_size, int flags)
{
ssize_t result;
struct device *dev = i2400m_dev(i2400m);
struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
int opcode = _cmd == NULL ? -1 : i2400m_brh_get_opcode(_cmd);
struct i2400m_bootrom_header *cmd;
/* SDIO restriction */
size_t cmd_size_a = ALIGN(cmd_size, I2400MS_BLK_SIZE);
d_fnstart(5, dev, "(i2400m %p cmd %p size %zu)\n",
i2400m, _cmd, cmd_size);
result = -E2BIG;
if (cmd_size > I2400M_BM_CMD_BUF_SIZE)
goto error_too_big;
if (_cmd != i2400m->bm_cmd_buf)
memmove(i2400m->bm_cmd_buf, _cmd, cmd_size);
cmd = i2400m->bm_cmd_buf;
if (cmd_size_a > cmd_size) /* Zero pad space */
memset(i2400m->bm_cmd_buf + cmd_size, 0, cmd_size_a - cmd_size);
if ((flags & I2400M_BM_CMD_RAW) == 0) {
if (WARN_ON(i2400m_brh_get_response_required(cmd) == 0))
dev_warn(dev, "SW BUG: response_required == 0\n");
i2400m_bm_cmd_prepare(cmd);
}
d_printf(4, dev, "BM cmd %d: %zu bytes (%zu padded)\n",
opcode, cmd_size, cmd_size_a);
d_dump(5, dev, cmd, cmd_size);
sdio_claim_host(i2400ms->func); /* Send & check */
result = sdio_memcpy_toio(i2400ms->func, I2400MS_DATA_ADDR,
i2400m->bm_cmd_buf, cmd_size_a);
sdio_release_host(i2400ms->func);
if (result < 0) {
dev_err(dev, "BM cmd %d: cannot send: %ld\n",
opcode, (long) result);
goto error_cmd_send;
}
result = cmd_size;
error_cmd_send:
error_too_big:
d_fnend(5, dev, "(i2400m %p cmd %p size %zu) = %d\n",
i2400m, _cmd, cmd_size, (int) result);
return result;
}
/*
* Read an ack from the device's boot-mode
*
* @i2400m:
* @_ack: pointer to where to store the read data
* @ack_size: how many bytes we should read
*
* Returns: < 0 errno code on error; otherwise, amount of received bytes.
*
* The ACK for a BM command is always at least sizeof(*ack) bytes, so
* check for that. We don't need to check for device reboots
*
*/
ssize_t i2400ms_bus_bm_wait_for_ack(struct i2400m *i2400m,
struct i2400m_bootrom_header *ack,
size_t ack_size)
{
ssize_t result;
struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
struct sdio_func *func = i2400ms->func;
struct device *dev = &func->dev;
int size;
BUG_ON(sizeof(*ack) > ack_size);
d_fnstart(5, dev, "(i2400m %p ack %p size %zu)\n",
i2400m, ack, ack_size);
result = wait_event_timeout(i2400ms->bm_wfa_wq,
i2400ms->bm_ack_size != -EINPROGRESS,
2 * HZ);
if (result == 0) {
result = -ETIMEDOUT;
dev_err(dev, "BM: error waiting for an ack\n");
goto error_timeout;
}
spin_lock(&i2400m->rx_lock);
result = i2400ms->bm_ack_size;
BUG_ON(result == -EINPROGRESS);
if (result < 0) /* so we exit when rx_release() is called */
dev_err(dev, "BM: %s failed: %zd\n", __func__, result);
else {
size = min(ack_size, i2400ms->bm_ack_size);
memcpy(ack, i2400m->bm_ack_buf, size);
}
/*
* Remember always to clear the bm_ack_size to -EINPROGRESS
* after the RX data is processed
*/
i2400ms->bm_ack_size = -EINPROGRESS;
spin_unlock(&i2400m->rx_lock);
error_timeout:
d_fnend(5, dev, "(i2400m %p ack %p size %zu) = %zd\n",
i2400m, ack, ack_size, result);
return result;
}
| gpl-2.0 |
SlimRoms/kernel_samsung_t1 | drivers/media/video/pvrusb2/pvrusb2-encoder.c | 9440 | 14688 | /*
*
*
* Copyright (C) 2005 Mike Isely <isely@pobox.com>
* Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/device.h> // for linux/firmware.h
#include <linux/firmware.h>
#include "pvrusb2-util.h"
#include "pvrusb2-encoder.h"
#include "pvrusb2-hdw-internal.h"
#include "pvrusb2-debug.h"
#include "pvrusb2-fx2-cmd.h"
/* Firmware mailbox flags - definitions found from ivtv */
#define IVTV_MBOX_FIRMWARE_DONE 0x00000004
#define IVTV_MBOX_DRIVER_DONE 0x00000002
#define IVTV_MBOX_DRIVER_BUSY 0x00000001
#define MBOX_BASE 0x44
static int pvr2_encoder_write_words(struct pvr2_hdw *hdw,
unsigned int offs,
const u32 *data, unsigned int dlen)
{
unsigned int idx,addr;
unsigned int bAddr;
int ret;
unsigned int chunkCnt;
/*
Format: First byte must be 0x01. Remaining 32 bit words are
spread out into chunks of 7 bytes each, with the first 4 bytes
being the data word (little endian), and the next 3 bytes
being the address where that data word is to be written (big
endian). Repeat request for additional words, with offset
adjusted accordingly.
*/
while (dlen) {
chunkCnt = 8;
if (chunkCnt > dlen) chunkCnt = dlen;
memset(hdw->cmd_buffer,0,sizeof(hdw->cmd_buffer));
bAddr = 0;
hdw->cmd_buffer[bAddr++] = FX2CMD_MEM_WRITE_DWORD;
for (idx = 0; idx < chunkCnt; idx++) {
addr = idx + offs;
hdw->cmd_buffer[bAddr+6] = (addr & 0xffu);
hdw->cmd_buffer[bAddr+5] = ((addr>>8) & 0xffu);
hdw->cmd_buffer[bAddr+4] = ((addr>>16) & 0xffu);
PVR2_DECOMPOSE_LE(hdw->cmd_buffer, bAddr,data[idx]);
bAddr += 7;
}
ret = pvr2_send_request(hdw,
hdw->cmd_buffer,1+(chunkCnt*7),
NULL,0);
if (ret) return ret;
data += chunkCnt;
dlen -= chunkCnt;
offs += chunkCnt;
}
return 0;
}
static int pvr2_encoder_read_words(struct pvr2_hdw *hdw,
unsigned int offs,
u32 *data, unsigned int dlen)
{
unsigned int idx;
int ret;
unsigned int chunkCnt;
/*
Format: First byte must be 0x02 (status check) or 0x28 (read
back block of 32 bit words). Next 6 bytes must be zero,
followed by a single byte of MBOX_BASE+offset for portion to
be read. Returned data is packed set of 32 bits words that
were read.
*/
while (dlen) {
chunkCnt = 16;
if (chunkCnt > dlen) chunkCnt = dlen;
if (chunkCnt < 16) chunkCnt = 1;
hdw->cmd_buffer[0] =
((chunkCnt == 1) ?
FX2CMD_MEM_READ_DWORD : FX2CMD_MEM_READ_64BYTES);
hdw->cmd_buffer[1] = 0;
hdw->cmd_buffer[2] = 0;
hdw->cmd_buffer[3] = 0;
hdw->cmd_buffer[4] = 0;
hdw->cmd_buffer[5] = ((offs>>16) & 0xffu);
hdw->cmd_buffer[6] = ((offs>>8) & 0xffu);
hdw->cmd_buffer[7] = (offs & 0xffu);
ret = pvr2_send_request(hdw,
hdw->cmd_buffer,8,
hdw->cmd_buffer,
(chunkCnt == 1 ? 4 : 16 * 4));
if (ret) return ret;
for (idx = 0; idx < chunkCnt; idx++) {
data[idx] = PVR2_COMPOSE_LE(hdw->cmd_buffer,idx*4);
}
data += chunkCnt;
dlen -= chunkCnt;
offs += chunkCnt;
}
return 0;
}
/* This prototype is set up to be compatible with the
cx2341x_mbox_func prototype in cx2341x.h, which should be in
kernels 2.6.18 or later. We do this so that we can enable
cx2341x.ko to write to our encoder (by handing it a pointer to this
function). For earlier kernels this doesn't really matter. */
static int pvr2_encoder_cmd(void *ctxt,
u32 cmd,
int arg_cnt_send,
int arg_cnt_recv,
u32 *argp)
{
unsigned int poll_count;
unsigned int try_count = 0;
int retry_flag;
int ret = 0;
unsigned int idx;
/* These sizes look to be limited by the FX2 firmware implementation */
u32 wrData[16];
u32 rdData[16];
struct pvr2_hdw *hdw = (struct pvr2_hdw *)ctxt;
/*
The encoder seems to speak entirely using blocks 32 bit words.
In ivtv driver terms, this is a mailbox at MBOX_BASE which we
populate with data and watch what the hardware does with it.
The first word is a set of flags used to control the
transaction, the second word is the command to execute, the
third byte is zero (ivtv driver suggests that this is some
kind of return value), and the fourth byte is a specified
timeout (windows driver always uses 0x00060000 except for one
case when it is zero). All successive words are the argument
words for the command.
First, write out the entire set of words, with the first word
being zero.
Next, write out just the first word again, but set it to
IVTV_MBOX_DRIVER_DONE | IVTV_DRIVER_BUSY this time (which
probably means "go").
Next, read back the return count words. Check the first word,
which should have IVTV_MBOX_FIRMWARE_DONE set. If however
that bit is not set, then the command isn't done so repeat the
read until it is set.
Finally, write out just the first word again, but set it to
0x0 this time (which probably means "idle").
*/
if (arg_cnt_send > (ARRAY_SIZE(wrData) - 4)) {
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
"Failed to write cx23416 command"
" - too many input arguments"
" (was given %u limit %lu)",
arg_cnt_send, (long unsigned) ARRAY_SIZE(wrData) - 4);
return -EINVAL;
}
if (arg_cnt_recv > (ARRAY_SIZE(rdData) - 4)) {
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
"Failed to write cx23416 command"
" - too many return arguments"
" (was given %u limit %lu)",
arg_cnt_recv, (long unsigned) ARRAY_SIZE(rdData) - 4);
return -EINVAL;
}
LOCK_TAKE(hdw->ctl_lock); do {
if (!hdw->state_encoder_ok) {
ret = -EIO;
break;
}
retry_flag = 0;
try_count++;
ret = 0;
wrData[0] = 0;
wrData[1] = cmd;
wrData[2] = 0;
wrData[3] = 0x00060000;
for (idx = 0; idx < arg_cnt_send; idx++) {
wrData[idx+4] = argp[idx];
}
for (; idx < ARRAY_SIZE(wrData) - 4; idx++) {
wrData[idx+4] = 0;
}
ret = pvr2_encoder_write_words(hdw,MBOX_BASE,wrData,idx);
if (ret) break;
wrData[0] = IVTV_MBOX_DRIVER_DONE|IVTV_MBOX_DRIVER_BUSY;
ret = pvr2_encoder_write_words(hdw,MBOX_BASE,wrData,1);
if (ret) break;
poll_count = 0;
while (1) {
poll_count++;
ret = pvr2_encoder_read_words(hdw,MBOX_BASE,rdData,
arg_cnt_recv+4);
if (ret) {
break;
}
if (rdData[0] & IVTV_MBOX_FIRMWARE_DONE) {
break;
}
if (rdData[0] && (poll_count < 1000)) continue;
if (!rdData[0]) {
retry_flag = !0;
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
"Encoder timed out waiting for us"
"; arranging to retry");
} else {
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
"***WARNING*** device's encoder"
" appears to be stuck"
" (status=0x%08x)",rdData[0]);
}
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
"Encoder command: 0x%02x",cmd);
for (idx = 4; idx < arg_cnt_send; idx++) {
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
"Encoder arg%d: 0x%08x",
idx-3,wrData[idx]);
}
ret = -EBUSY;
break;
}
if (retry_flag) {
if (try_count < 20) continue;
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
"Too many retries...");
ret = -EBUSY;
}
if (ret) {
del_timer_sync(&hdw->encoder_run_timer);
hdw->state_encoder_ok = 0;
pvr2_trace(PVR2_TRACE_STBITS,
"State bit %s <-- %s",
"state_encoder_ok",
(hdw->state_encoder_ok ? "true" : "false"));
if (hdw->state_encoder_runok) {
hdw->state_encoder_runok = 0;
pvr2_trace(PVR2_TRACE_STBITS,
"State bit %s <-- %s",
"state_encoder_runok",
(hdw->state_encoder_runok ?
"true" : "false"));
}
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
"Giving up on command."
" This is normally recovered via a firmware"
" reload and re-initialization; concern"
" is only warranted if this happens repeatedly"
" and rapidly.");
break;
}
wrData[0] = 0x7;
for (idx = 0; idx < arg_cnt_recv; idx++) {
argp[idx] = rdData[idx+4];
}
wrData[0] = 0x0;
ret = pvr2_encoder_write_words(hdw,MBOX_BASE,wrData,1);
if (ret) break;
} while(0); LOCK_GIVE(hdw->ctl_lock);
return ret;
}
static int pvr2_encoder_vcmd(struct pvr2_hdw *hdw, int cmd,
int args, ...)
{
va_list vl;
unsigned int idx;
u32 data[12];
if (args > ARRAY_SIZE(data)) {
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
"Failed to write cx23416 command"
" - too many arguments"
" (was given %u limit %lu)",
args, (long unsigned) ARRAY_SIZE(data));
return -EINVAL;
}
va_start(vl, args);
for (idx = 0; idx < args; idx++) {
data[idx] = va_arg(vl, u32);
}
va_end(vl);
return pvr2_encoder_cmd(hdw,cmd,args,0,data);
}
/* This implements some extra setup for the encoder that seems to be
specific to the PVR USB2 hardware. */
static int pvr2_encoder_prep_config(struct pvr2_hdw *hdw)
{
int ret = 0;
int encMisc3Arg = 0;
#if 0
/* This inexplicable bit happens in the Hauppauge windows
driver (for both 24xxx and 29xxx devices). However I
currently see no difference in behavior with or without
this stuff. Leave this here as a note of its existence,
but don't use it. */
LOCK_TAKE(hdw->ctl_lock); do {
u32 dat[1];
dat[0] = 0x80000640;
pvr2_encoder_write_words(hdw,0x01fe,dat,1);
pvr2_encoder_write_words(hdw,0x023e,dat,1);
} while(0); LOCK_GIVE(hdw->ctl_lock);
#endif
/* Mike Isely <isely@pobox.com> 26-Jan-2006 The windows driver
sends the following list of ENC_MISC commands (for both
24xxx and 29xxx devices). Meanings are not entirely clear,
however without the ENC_MISC(3,1) command then we risk
random perpetual video corruption whenever the video input
breaks up for a moment (like when switching channels). */
#if 0
/* This ENC_MISC(5,0) command seems to hurt 29xxx sync
performance on channel changes, but is not a problem on
24xxx devices. */
ret |= pvr2_encoder_vcmd(hdw, CX2341X_ENC_MISC,4, 5,0,0,0);
#endif
/* This ENC_MISC(3,encMisc3Arg) command is critical - without
it there will eventually be video corruption. Also, the
saa7115 case is strange - the Windows driver is passing 1
regardless of device type but if we have 1 for saa7115
devices the video turns sluggish. */
if (hdw->hdw_desc->flag_has_cx25840) {
encMisc3Arg = 1;
} else {
encMisc3Arg = 0;
}
ret |= pvr2_encoder_vcmd(hdw, CX2341X_ENC_MISC,4, 3,
encMisc3Arg,0,0);
ret |= pvr2_encoder_vcmd(hdw, CX2341X_ENC_MISC,4, 8,0,0,0);
#if 0
/* This ENC_MISC(4,1) command is poisonous, so it is commented
out. But I'm leaving it here anyway to document its
existence in the Windows driver. The effect of this
command is that apps displaying the stream become sluggish
with stuttering video. */
ret |= pvr2_encoder_vcmd(hdw, CX2341X_ENC_MISC,4, 4,1,0,0);
#endif
ret |= pvr2_encoder_vcmd(hdw, CX2341X_ENC_MISC,4, 0,3,0,0);
ret |= pvr2_encoder_vcmd(hdw, CX2341X_ENC_MISC,4,15,0,0,0);
/* prevent the PTSs from slowly drifting away in the generated
MPEG stream */
ret |= pvr2_encoder_vcmd(hdw, CX2341X_ENC_MISC, 2, 4, 1);
return ret;
}
int pvr2_encoder_adjust(struct pvr2_hdw *hdw)
{
int ret;
ret = cx2341x_update(hdw,pvr2_encoder_cmd,
(hdw->enc_cur_valid ? &hdw->enc_cur_state : NULL),
&hdw->enc_ctl_state);
if (ret) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
"Error from cx2341x module code=%d",ret);
} else {
memcpy(&hdw->enc_cur_state,&hdw->enc_ctl_state,
sizeof(struct cx2341x_mpeg_params));
hdw->enc_cur_valid = !0;
}
return ret;
}
int pvr2_encoder_configure(struct pvr2_hdw *hdw)
{
int ret;
int val;
pvr2_trace(PVR2_TRACE_ENCODER,"pvr2_encoder_configure"
" (cx2341x module)");
hdw->enc_ctl_state.port = CX2341X_PORT_STREAMING;
hdw->enc_ctl_state.width = hdw->res_hor_val;
hdw->enc_ctl_state.height = hdw->res_ver_val;
hdw->enc_ctl_state.is_50hz = ((hdw->std_mask_cur & V4L2_STD_525_60) ?
0 : 1);
ret = 0;
ret |= pvr2_encoder_prep_config(hdw);
/* saa7115: 0xf0 */
val = 0xf0;
if (hdw->hdw_desc->flag_has_cx25840) {
/* ivtv cx25840: 0x140 */
val = 0x140;
}
if (!ret) ret = pvr2_encoder_vcmd(
hdw,CX2341X_ENC_SET_NUM_VSYNC_LINES, 2,
val, val);
/* setup firmware to notify us about some events (don't know why...) */
if (!ret) ret = pvr2_encoder_vcmd(
hdw,CX2341X_ENC_SET_EVENT_NOTIFICATION, 4,
0, 0, 0x10000000, 0xffffffff);
if (!ret) ret = pvr2_encoder_vcmd(
hdw,CX2341X_ENC_SET_VBI_LINE, 5,
0xffffffff,0,0,0,0);
if (ret) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
"Failed to configure cx23416");
return ret;
}
ret = pvr2_encoder_adjust(hdw);
if (ret) return ret;
ret = pvr2_encoder_vcmd(
hdw, CX2341X_ENC_INITIALIZE_INPUT, 0);
if (ret) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
"Failed to initialize cx23416 video input");
return ret;
}
return 0;
}
int pvr2_encoder_start(struct pvr2_hdw *hdw)
{
int status;
/* unmask some interrupts */
pvr2_write_register(hdw, 0x0048, 0xbfffffff);
pvr2_encoder_vcmd(hdw,CX2341X_ENC_MUTE_VIDEO,1,
hdw->input_val == PVR2_CVAL_INPUT_RADIO ? 1 : 0);
switch (hdw->active_stream_type) {
case pvr2_config_vbi:
status = pvr2_encoder_vcmd(hdw,CX2341X_ENC_START_CAPTURE,2,
0x01,0x14);
break;
case pvr2_config_mpeg:
status = pvr2_encoder_vcmd(hdw,CX2341X_ENC_START_CAPTURE,2,
0,0x13);
break;
default: /* Unhandled cases for now */
status = pvr2_encoder_vcmd(hdw,CX2341X_ENC_START_CAPTURE,2,
0,0x13);
break;
}
return status;
}
int pvr2_encoder_stop(struct pvr2_hdw *hdw)
{
int status;
/* mask all interrupts */
pvr2_write_register(hdw, 0x0048, 0xffffffff);
switch (hdw->active_stream_type) {
case pvr2_config_vbi:
status = pvr2_encoder_vcmd(hdw,CX2341X_ENC_STOP_CAPTURE,3,
0x01,0x01,0x14);
break;
case pvr2_config_mpeg:
status = pvr2_encoder_vcmd(hdw,CX2341X_ENC_STOP_CAPTURE,3,
0x01,0,0x13);
break;
default: /* Unhandled cases for now */
status = pvr2_encoder_vcmd(hdw,CX2341X_ENC_STOP_CAPTURE,3,
0x01,0,0x13);
break;
}
return status;
}
/*
Stuff for Emacs to see, in order to encourage consistent editing style:
*** Local Variables: ***
*** mode: c ***
*** fill-column: 70 ***
*** tab-width: 8 ***
*** c-basic-offset: 8 ***
*** End: ***
*/
| gpl-2.0 |
TeamExodus/kernel_moto_shamu | drivers/mtd/nand/nand_bch.c | 9440 | 7099 | /*
* This file provides ECC correction for more than 1 bit per block of data,
* using binary BCH codes. It relies on the generic BCH library lib/bch.c.
*
* Copyright © 2011 Ivan Djelic <ivan.djelic@parrot.com>
*
* This file is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 or (at your option) any
* later version.
*
* This file is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this file; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/nand_bch.h>
#include <linux/bch.h>
/**
* struct nand_bch_control - private NAND BCH control structure
* @bch: BCH control structure
* @ecclayout: private ecc layout for this BCH configuration
* @errloc: error location array
* @eccmask: XOR ecc mask, allows erased pages to be decoded as valid
*/
struct nand_bch_control {
struct bch_control *bch;
struct nand_ecclayout ecclayout;
unsigned int *errloc;
unsigned char *eccmask;
};
/**
* nand_bch_calculate_ecc - [NAND Interface] Calculate ECC for data block
* @mtd: MTD block structure
* @buf: input buffer with raw data
* @code: output buffer with ECC
*/
int nand_bch_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
unsigned char *code)
{
const struct nand_chip *chip = mtd->priv;
struct nand_bch_control *nbc = chip->ecc.priv;
unsigned int i;
memset(code, 0, chip->ecc.bytes);
encode_bch(nbc->bch, buf, chip->ecc.size, code);
/* apply mask so that an erased page is a valid codeword */
for (i = 0; i < chip->ecc.bytes; i++)
code[i] ^= nbc->eccmask[i];
return 0;
}
EXPORT_SYMBOL(nand_bch_calculate_ecc);
/**
* nand_bch_correct_data - [NAND Interface] Detect and correct bit error(s)
* @mtd: MTD block structure
* @buf: raw data read from the chip
* @read_ecc: ECC from the chip
* @calc_ecc: the ECC calculated from raw data
*
* Detect and correct bit errors for a data byte block
*/
int nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf,
unsigned char *read_ecc, unsigned char *calc_ecc)
{
const struct nand_chip *chip = mtd->priv;
struct nand_bch_control *nbc = chip->ecc.priv;
unsigned int *errloc = nbc->errloc;
int i, count;
count = decode_bch(nbc->bch, NULL, chip->ecc.size, read_ecc, calc_ecc,
NULL, errloc);
if (count > 0) {
for (i = 0; i < count; i++) {
if (errloc[i] < (chip->ecc.size*8))
/* error is located in data, correct it */
buf[errloc[i] >> 3] ^= (1 << (errloc[i] & 7));
/* else error in ecc, no action needed */
pr_debug("%s: corrected bitflip %u\n", __func__,
errloc[i]);
}
} else if (count < 0) {
printk(KERN_ERR "ecc unrecoverable error\n");
count = -1;
}
return count;
}
EXPORT_SYMBOL(nand_bch_correct_data);
/**
* nand_bch_init - [NAND Interface] Initialize NAND BCH error correction
* @mtd: MTD block structure
* @eccsize: ecc block size in bytes
* @eccbytes: ecc length in bytes
* @ecclayout: output default layout
*
* Returns:
* a pointer to a new NAND BCH control structure, or NULL upon failure
*
* Initialize NAND BCH error correction. Parameters @eccsize and @eccbytes
* are used to compute BCH parameters m (Galois field order) and t (error
* correction capability). @eccbytes should be equal to the number of bytes
* required to store m*t bits, where m is such that 2^m-1 > @eccsize*8.
*
* Example: to configure 4 bit correction per 512 bytes, you should pass
* @eccsize = 512 (thus, m=13 is the smallest integer such that 2^m-1 > 512*8)
* @eccbytes = 7 (7 bytes are required to store m*t = 13*4 = 52 bits)
*/
struct nand_bch_control *
nand_bch_init(struct mtd_info *mtd, unsigned int eccsize, unsigned int eccbytes,
struct nand_ecclayout **ecclayout)
{
unsigned int m, t, eccsteps, i;
struct nand_ecclayout *layout;
struct nand_bch_control *nbc = NULL;
unsigned char *erased_page;
if (!eccsize || !eccbytes) {
printk(KERN_WARNING "ecc parameters not supplied\n");
goto fail;
}
m = fls(1+8*eccsize);
t = (eccbytes*8)/m;
nbc = kzalloc(sizeof(*nbc), GFP_KERNEL);
if (!nbc)
goto fail;
nbc->bch = init_bch(m, t, 0);
if (!nbc->bch)
goto fail;
/* verify that eccbytes has the expected value */
if (nbc->bch->ecc_bytes != eccbytes) {
printk(KERN_WARNING "invalid eccbytes %u, should be %u\n",
eccbytes, nbc->bch->ecc_bytes);
goto fail;
}
eccsteps = mtd->writesize/eccsize;
/* if no ecc placement scheme was provided, build one */
if (!*ecclayout) {
/* handle large page devices only */
if (mtd->oobsize < 64) {
printk(KERN_WARNING "must provide an oob scheme for "
"oobsize %d\n", mtd->oobsize);
goto fail;
}
layout = &nbc->ecclayout;
layout->eccbytes = eccsteps*eccbytes;
/* reserve 2 bytes for bad block marker */
if (layout->eccbytes+2 > mtd->oobsize) {
printk(KERN_WARNING "no suitable oob scheme available "
"for oobsize %d eccbytes %u\n", mtd->oobsize,
eccbytes);
goto fail;
}
/* put ecc bytes at oob tail */
for (i = 0; i < layout->eccbytes; i++)
layout->eccpos[i] = mtd->oobsize-layout->eccbytes+i;
layout->oobfree[0].offset = 2;
layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes;
*ecclayout = layout;
}
/* sanity checks */
if (8*(eccsize+eccbytes) >= (1 << m)) {
printk(KERN_WARNING "eccsize %u is too large\n", eccsize);
goto fail;
}
if ((*ecclayout)->eccbytes != (eccsteps*eccbytes)) {
printk(KERN_WARNING "invalid ecc layout\n");
goto fail;
}
nbc->eccmask = kmalloc(eccbytes, GFP_KERNEL);
nbc->errloc = kmalloc(t*sizeof(*nbc->errloc), GFP_KERNEL);
if (!nbc->eccmask || !nbc->errloc)
goto fail;
/*
* compute and store the inverted ecc of an erased ecc block
*/
erased_page = kmalloc(eccsize, GFP_KERNEL);
if (!erased_page)
goto fail;
memset(erased_page, 0xff, eccsize);
memset(nbc->eccmask, 0, eccbytes);
encode_bch(nbc->bch, erased_page, eccsize, nbc->eccmask);
kfree(erased_page);
for (i = 0; i < eccbytes; i++)
nbc->eccmask[i] ^= 0xff;
return nbc;
fail:
nand_bch_free(nbc);
return NULL;
}
EXPORT_SYMBOL(nand_bch_init);
/**
* nand_bch_free - [NAND Interface] Release NAND BCH ECC resources
* @nbc: NAND BCH control structure
*/
void nand_bch_free(struct nand_bch_control *nbc)
{
if (nbc) {
free_bch(nbc->bch);
kfree(nbc->errloc);
kfree(nbc->eccmask);
kfree(nbc);
}
}
EXPORT_SYMBOL(nand_bch_free);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ivan Djelic <ivan.djelic@parrot.com>");
MODULE_DESCRIPTION("NAND software BCH ECC support");
| gpl-2.0 |
adrian17/gcc | gcc/testsuite/gcc.c-torture/compile/980408-1.c | 225 | 4083 | typedef struct _RunlengthPacket
{
unsigned short
red,
green,
blue,
length;
unsigned short
index;
} RunlengthPacket;
typedef struct _Image
{
int
status,
temporary;
char
filename[1664 ];
long int
filesize;
int
pipe;
char
magick[1664 ],
*comments,
*label,
*text;
unsigned int
matte;
unsigned int
columns,
rows,
depth;
unsigned int
scene,
number_scenes;
char
*montage,
*directory;
unsigned int
colors;
double
gamma;
float
x_resolution,
y_resolution;
unsigned int
mean_error_per_pixel;
double
normalized_mean_error,
normalized_maximum_error;
unsigned long
total_colors;
char
*signature;
unsigned int
packets,
runlength,
packet_size;
unsigned char
*packed_pixels;
long int
magick_time;
char
magick_filename[1664 ];
unsigned int
magick_columns,
magick_rows;
char
*geometry,
*page;
unsigned int
dispose,
delay,
iterations;
unsigned int
orphan;
struct _Image
*previous,
*list,
*next;
} Image;
Image *MinifyImage(Image *image)
{
Image
*minified_image;
register RunlengthPacket
*q,
*s,
*s0,
*s1,
*s2,
*s3;
register unsigned int
x;
unsigned int
blue,
green,
red;
unsigned long
total_matte,
total_blue,
total_green,
total_red;
unsigned short
index;
for (x=0; x < (image->columns-1); x+=2)
{
total_red=0;
total_green=0;
total_blue=0;
total_matte=0;
s=s0;
total_red+=( 3 )*(s->red); total_green+=( 3 )*(s->green); total_blue+=( 3 )*(s->blue); total_matte+=( 3 )*(s->index); s++; ; total_red+=( 7 )*(s->red); total_green+=( 7 )*(s->green); total_blue+=( 7 )*(s->blue); total_matte+=( 7 )*(s->index); s++; ; total_red+=( 7 )*(s->red); total_green+=( 7 )*(s->green); total_blue+=( 7 )*(s->blue); total_matte+=( 7 )*(s->index); s++; ; total_red+=( 3 )*(s->red); total_green+=( 3 )*(s->green); total_blue+=( 3 )*(s->blue); total_matte+=( 3 )*(s->index); s++; ;
s=s1;
total_red+=( 7 )*(s->red); total_green+=( 7 )*(s->green); total_blue+=( 7 )*(s->blue); total_matte+=( 7 )*(s->index); s++; ; total_red+=( 15 )*(s->red); total_green+=( 15 )*(s->green); total_blue+=( 15 )*(s->blue); total_matte+=( 15 )*(s->index); s++; ; total_red+=( 15 )*(s->red); total_green+=( 15 )*(s->green); total_blue+=( 15 )*(s->blue); total_matte+=( 15 )*(s->index); s++; ; total_red+=( 7 )*(s->red); total_green+=( 7 )*(s->green); total_blue+=( 7 )*(s->blue); total_matte+=( 7 )*(s->index); s++; ;
s=s2;
total_red+=( 7 )*(s->red); total_green+=( 7 )*(s->green); total_blue+=( 7 )*(s->blue); total_matte+=( 7 )*(s->index); s++; ; total_red+=( 15 )*(s->red); total_green+=( 15 )*(s->green); total_blue+=( 15 )*(s->blue); total_matte+=( 15 )*(s->index); s++; ; total_red+=( 15 )*(s->red); total_green+=( 15 )*(s->green); total_blue+=( 15 )*(s->blue); total_matte+=( 15 )*(s->index); s++; ; total_red+=( 7 )*(s->red); total_green+=( 7 )*(s->green); total_blue+=( 7 )*(s->blue); total_matte+=( 7 )*(s->index); s++; ;
s=s3;
total_red+=( 3 )*(s->red); total_green+=( 3 )*(s->green); total_blue+=( 3 )*(s->blue); total_matte+=( 3 )*(s->index); s++; ; total_red+=( 7 )*(s->red); total_green+=( 7 )*(s->green); total_blue+=( 7 )*(s->blue); total_matte+=( 7 )*(s->index); s++; ; total_red+=( 7 )*(s->red); total_green+=( 7 )*(s->green); total_blue+=( 7 )*(s->blue); total_matte+=( 7 )*(s->index); s++; ; total_red+=( 3 )*(s->red); total_green+=( 3 )*(s->green); total_blue+=( 3 )*(s->blue); total_matte+=( 3 )*(s->index); s++; ;
red=(unsigned short) ((total_red+63) >> 7);
green=(unsigned short) ((total_green+63) >> 7);
blue=(unsigned short) ((total_blue+63) >> 7);
index=(unsigned short) ((total_matte+63) >> 7);
if ((red == q->red) && (green == q->green) && (blue == q->blue) &&
(index == q->index) && ((int) q->length < 65535L ))
q->length++;
}
return(minified_image);
}
| gpl-2.0 |
buenajuan300/android_kernel_samsung_grandprimevelte | drivers/char/hw_random/atmel-rng.c | 481 | 3311 | /*
* Copyright (c) 2011 Peter Korsgaard <jacmet@sunsite.dk>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/hw_random.h>
#include <linux/platform_device.h>
#define TRNG_CR 0x00
#define TRNG_ISR 0x1c
#define TRNG_ODATA 0x50
#define TRNG_KEY 0x524e4700 /* RNG */
struct atmel_trng {
struct clk *clk;
void __iomem *base;
struct hwrng rng;
};
static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max,
bool wait)
{
struct atmel_trng *trng = container_of(rng, struct atmel_trng, rng);
u32 *data = buf;
/* data ready? */
if (readl(trng->base + TRNG_ISR) & 1) {
*data = readl(trng->base + TRNG_ODATA);
/*
ensure data ready is only set again AFTER the next data
word is ready in case it got set between checking ISR
and reading ODATA, so we don't risk re-reading the
same word
*/
readl(trng->base + TRNG_ISR);
return 4;
} else
return 0;
}
static int atmel_trng_probe(struct platform_device *pdev)
{
struct atmel_trng *trng;
struct resource *res;
int ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -EINVAL;
trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL);
if (!trng)
return -ENOMEM;
if (!devm_request_mem_region(&pdev->dev, res->start,
resource_size(res), pdev->name))
return -EBUSY;
trng->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!trng->base)
return -EBUSY;
trng->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(trng->clk))
return PTR_ERR(trng->clk);
ret = clk_enable(trng->clk);
if (ret)
goto err_enable;
writel(TRNG_KEY | 1, trng->base + TRNG_CR);
trng->rng.name = pdev->name;
trng->rng.read = atmel_trng_read;
ret = hwrng_register(&trng->rng);
if (ret)
goto err_register;
platform_set_drvdata(pdev, trng);
return 0;
err_register:
clk_disable(trng->clk);
err_enable:
clk_put(trng->clk);
return ret;
}
static int atmel_trng_remove(struct platform_device *pdev)
{
struct atmel_trng *trng = platform_get_drvdata(pdev);
hwrng_unregister(&trng->rng);
writel(TRNG_KEY, trng->base + TRNG_CR);
clk_disable(trng->clk);
clk_put(trng->clk);
return 0;
}
#ifdef CONFIG_PM
static int atmel_trng_suspend(struct device *dev)
{
struct atmel_trng *trng = dev_get_drvdata(dev);
clk_disable(trng->clk);
return 0;
}
static int atmel_trng_resume(struct device *dev)
{
struct atmel_trng *trng = dev_get_drvdata(dev);
return clk_enable(trng->clk);
}
static const struct dev_pm_ops atmel_trng_pm_ops = {
.suspend = atmel_trng_suspend,
.resume = atmel_trng_resume,
};
#endif /* CONFIG_PM */
static struct platform_driver atmel_trng_driver = {
.probe = atmel_trng_probe,
.remove = atmel_trng_remove,
.driver = {
.name = "atmel-trng",
.owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &atmel_trng_pm_ops,
#endif /* CONFIG_PM */
},
};
module_platform_driver(atmel_trng_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>");
MODULE_DESCRIPTION("Atmel true random number generator driver");
| gpl-2.0 |
JB1tz/Moto_omap3_ics_kernel | drivers/net/sfc/falcon.c | 481 | 90249 | /****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2008 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/mii.h>
#include "net_driver.h"
#include "bitfield.h"
#include "efx.h"
#include "mac.h"
#include "spi.h"
#include "falcon.h"
#include "falcon_hwdefs.h"
#include "falcon_io.h"
#include "mdio_10g.h"
#include "phy.h"
#include "boards.h"
#include "workarounds.h"
/* Falcon hardware control.
* Falcon is the internal codename for the SFC4000 controller that is
* present in SFE400X evaluation boards
*/
/**
* struct falcon_nic_data - Falcon NIC state
* @next_buffer_table: First available buffer table id
* @pci_dev2: The secondary PCI device if present
* @i2c_data: Operations and state for I2C bit-bashing algorithm
* @int_error_count: Number of internal errors seen recently
* @int_error_expire: Time at which error count will be expired
*/
struct falcon_nic_data {
unsigned next_buffer_table;
struct pci_dev *pci_dev2;
struct i2c_algo_bit_data i2c_data;
unsigned int_error_count;
unsigned long int_error_expire;
};
/**************************************************************************
*
* Configurable values
*
**************************************************************************
*/
static int disable_dma_stats;
/* This is set to 16 for a good reason. In summary, if larger than
* 16, the descriptor cache holds more than a default socket
* buffer's worth of packets (for UDP we can only have at most one
* socket buffer's worth outstanding). This combined with the fact
* that we only get 1 TX event per descriptor cache means the NIC
* goes idle.
*/
#define TX_DC_ENTRIES 16
#define TX_DC_ENTRIES_ORDER 0
#define TX_DC_BASE 0x130000
#define RX_DC_ENTRIES 64
#define RX_DC_ENTRIES_ORDER 2
#define RX_DC_BASE 0x100000
static const unsigned int
/* "Large" EEPROM device: Atmel AT25640 or similar
* 8 KB, 16-bit address, 32 B write block */
large_eeprom_type = ((13 << SPI_DEV_TYPE_SIZE_LBN)
| (2 << SPI_DEV_TYPE_ADDR_LEN_LBN)
| (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN)),
/* Default flash device: Atmel AT25F1024
* 128 KB, 24-bit address, 32 KB erase block, 256 B write block */
default_flash_type = ((17 << SPI_DEV_TYPE_SIZE_LBN)
| (3 << SPI_DEV_TYPE_ADDR_LEN_LBN)
| (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN)
| (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN)
| (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN));
/* RX FIFO XOFF watermark
*
* When the amount of the RX FIFO increases used increases past this
* watermark send XOFF. Only used if RX flow control is enabled (ethtool -A)
* This also has an effect on RX/TX arbitration
*/
static int rx_xoff_thresh_bytes = -1;
module_param(rx_xoff_thresh_bytes, int, 0644);
MODULE_PARM_DESC(rx_xoff_thresh_bytes, "RX fifo XOFF threshold");
/* RX FIFO XON watermark
*
* When the amount of the RX FIFO used decreases below this
* watermark send XON. Only used if TX flow control is enabled (ethtool -A)
* This also has an effect on RX/TX arbitration
*/
static int rx_xon_thresh_bytes = -1;
module_param(rx_xon_thresh_bytes, int, 0644);
MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
/* TX descriptor ring size - min 512 max 4k */
#define FALCON_TXD_RING_ORDER TX_DESCQ_SIZE_1K
#define FALCON_TXD_RING_SIZE 1024
#define FALCON_TXD_RING_MASK (FALCON_TXD_RING_SIZE - 1)
/* RX descriptor ring size - min 512 max 4k */
#define FALCON_RXD_RING_ORDER RX_DESCQ_SIZE_1K
#define FALCON_RXD_RING_SIZE 1024
#define FALCON_RXD_RING_MASK (FALCON_RXD_RING_SIZE - 1)
/* Event queue size - max 32k */
#define FALCON_EVQ_ORDER EVQ_SIZE_4K
#define FALCON_EVQ_SIZE 4096
#define FALCON_EVQ_MASK (FALCON_EVQ_SIZE - 1)
/* If FALCON_MAX_INT_ERRORS internal errors occur within
* FALCON_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
* disable it.
*/
#define FALCON_INT_ERROR_EXPIRE 3600
#define FALCON_MAX_INT_ERRORS 5
/* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times
*/
#define FALCON_FLUSH_INTERVAL 10
#define FALCON_FLUSH_POLL_COUNT 100
/**************************************************************************
*
* Falcon constants
*
**************************************************************************
*/
/* DMA address mask */
#define FALCON_DMA_MASK DMA_BIT_MASK(46)
/* TX DMA length mask (13-bit) */
#define FALCON_TX_DMA_MASK (4096 - 1)
/* Size and alignment of special buffers (4KB) */
#define FALCON_BUF_SIZE 4096
/* Dummy SRAM size code */
#define SRM_NB_BSZ_ONCHIP_ONLY (-1)
#define FALCON_IS_DUAL_FUNC(efx) \
(falcon_rev(efx) < FALCON_REV_B0)
/**************************************************************************
*
* Falcon hardware access
*
**************************************************************************/
/* Read the current event from the event queue */
static inline efx_qword_t *falcon_event(struct efx_channel *channel,
unsigned int index)
{
return (((efx_qword_t *) (channel->eventq.addr)) + index);
}
/* See if an event is present
*
* We check both the high and low dword of the event for all ones. We
* wrote all ones when we cleared the event, and no valid event can
* have all ones in either its high or low dwords. This approach is
* robust against reordering.
*
* Note that using a single 64-bit comparison is incorrect; even
* though the CPU read will be atomic, the DMA write may not be.
*/
static inline int falcon_event_present(efx_qword_t *event)
{
return (!(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
EFX_DWORD_IS_ALL_ONES(event->dword[1])));
}
/**************************************************************************
*
* I2C bus - this is a bit-bashing interface using GPIO pins
* Note that it uses the output enables to tristate the outputs
* SDA is the data pin and SCL is the clock
*
**************************************************************************
*/
static void falcon_setsda(void *data, int state)
{
struct efx_nic *efx = (struct efx_nic *)data;
efx_oword_t reg;
falcon_read(efx, ®, GPIO_CTL_REG_KER);
EFX_SET_OWORD_FIELD(reg, GPIO3_OEN, !state);
falcon_write(efx, ®, GPIO_CTL_REG_KER);
}
static void falcon_setscl(void *data, int state)
{
struct efx_nic *efx = (struct efx_nic *)data;
efx_oword_t reg;
falcon_read(efx, ®, GPIO_CTL_REG_KER);
EFX_SET_OWORD_FIELD(reg, GPIO0_OEN, !state);
falcon_write(efx, ®, GPIO_CTL_REG_KER);
}
static int falcon_getsda(void *data)
{
struct efx_nic *efx = (struct efx_nic *)data;
efx_oword_t reg;
falcon_read(efx, ®, GPIO_CTL_REG_KER);
return EFX_OWORD_FIELD(reg, GPIO3_IN);
}
static int falcon_getscl(void *data)
{
struct efx_nic *efx = (struct efx_nic *)data;
efx_oword_t reg;
falcon_read(efx, ®, GPIO_CTL_REG_KER);
return EFX_OWORD_FIELD(reg, GPIO0_IN);
}
static struct i2c_algo_bit_data falcon_i2c_bit_operations = {
.setsda = falcon_setsda,
.setscl = falcon_setscl,
.getsda = falcon_getsda,
.getscl = falcon_getscl,
.udelay = 5,
/* Wait up to 50 ms for slave to let us pull SCL high */
.timeout = DIV_ROUND_UP(HZ, 20),
};
/**************************************************************************
*
* Falcon special buffer handling
* Special buffers are used for event queues and the TX and RX
* descriptor rings.
*
*************************************************************************/
/*
* Initialise a Falcon special buffer
*
* This will define a buffer (previously allocated via
* falcon_alloc_special_buffer()) in Falcon's buffer table, allowing
* it to be used for event queues, descriptor rings etc.
*/
static void
falcon_init_special_buffer(struct efx_nic *efx,
struct efx_special_buffer *buffer)
{
efx_qword_t buf_desc;
int index;
dma_addr_t dma_addr;
int i;
EFX_BUG_ON_PARANOID(!buffer->addr);
/* Write buffer descriptors to NIC */
for (i = 0; i < buffer->entries; i++) {
index = buffer->index + i;
dma_addr = buffer->dma_addr + (i * 4096);
EFX_LOG(efx, "mapping special buffer %d at %llx\n",
index, (unsigned long long)dma_addr);
EFX_POPULATE_QWORD_4(buf_desc,
IP_DAT_BUF_SIZE, IP_DAT_BUF_SIZE_4K,
BUF_ADR_REGION, 0,
BUF_ADR_FBUF, (dma_addr >> 12),
BUF_OWNER_ID_FBUF, 0);
falcon_write_sram(efx, &buf_desc, index);
}
}
/* Unmaps a buffer from Falcon and clears the buffer table entries */
static void
falcon_fini_special_buffer(struct efx_nic *efx,
struct efx_special_buffer *buffer)
{
efx_oword_t buf_tbl_upd;
unsigned int start = buffer->index;
unsigned int end = (buffer->index + buffer->entries - 1);
if (!buffer->entries)
return;
EFX_LOG(efx, "unmapping special buffers %d-%d\n",
buffer->index, buffer->index + buffer->entries - 1);
EFX_POPULATE_OWORD_4(buf_tbl_upd,
BUF_UPD_CMD, 0,
BUF_CLR_CMD, 1,
BUF_CLR_END_ID, end,
BUF_CLR_START_ID, start);
falcon_write(efx, &buf_tbl_upd, BUF_TBL_UPD_REG_KER);
}
/*
* Allocate a new Falcon special buffer
*
* This allocates memory for a new buffer, clears it and allocates a
* new buffer ID range. It does not write into Falcon's buffer table.
*
* This call will allocate 4KB buffers, since Falcon can't use 8KB
* buffers for event queues and descriptor rings.
*/
static int falcon_alloc_special_buffer(struct efx_nic *efx,
struct efx_special_buffer *buffer,
unsigned int len)
{
struct falcon_nic_data *nic_data = efx->nic_data;
len = ALIGN(len, FALCON_BUF_SIZE);
buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
&buffer->dma_addr);
if (!buffer->addr)
return -ENOMEM;
buffer->len = len;
buffer->entries = len / FALCON_BUF_SIZE;
BUG_ON(buffer->dma_addr & (FALCON_BUF_SIZE - 1));
/* All zeros is a potentially valid event so memset to 0xff */
memset(buffer->addr, 0xff, len);
/* Select new buffer ID */
buffer->index = nic_data->next_buffer_table;
nic_data->next_buffer_table += buffer->entries;
EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x "
"(virt %p phys %llx)\n", buffer->index,
buffer->index + buffer->entries - 1,
(u64)buffer->dma_addr, len,
buffer->addr, (u64)virt_to_phys(buffer->addr));
return 0;
}
static void falcon_free_special_buffer(struct efx_nic *efx,
struct efx_special_buffer *buffer)
{
if (!buffer->addr)
return;
EFX_LOG(efx, "deallocating special buffers %d-%d at %llx+%x "
"(virt %p phys %llx)\n", buffer->index,
buffer->index + buffer->entries - 1,
(u64)buffer->dma_addr, buffer->len,
buffer->addr, (u64)virt_to_phys(buffer->addr));
pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr,
buffer->dma_addr);
buffer->addr = NULL;
buffer->entries = 0;
}
/**************************************************************************
*
* Falcon generic buffer handling
* These buffers are used for interrupt status and MAC stats
*
**************************************************************************/
static int falcon_alloc_buffer(struct efx_nic *efx,
struct efx_buffer *buffer, unsigned int len)
{
buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
&buffer->dma_addr);
if (!buffer->addr)
return -ENOMEM;
buffer->len = len;
memset(buffer->addr, 0, len);
return 0;
}
static void falcon_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
{
if (buffer->addr) {
pci_free_consistent(efx->pci_dev, buffer->len,
buffer->addr, buffer->dma_addr);
buffer->addr = NULL;
}
}
/**************************************************************************
*
* Falcon TX path
*
**************************************************************************/
/* Returns a pointer to the specified transmit descriptor in the TX
* descriptor queue belonging to the specified channel.
*/
static inline efx_qword_t *falcon_tx_desc(struct efx_tx_queue *tx_queue,
unsigned int index)
{
return (((efx_qword_t *) (tx_queue->txd.addr)) + index);
}
/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
static inline void falcon_notify_tx_desc(struct efx_tx_queue *tx_queue)
{
unsigned write_ptr;
efx_dword_t reg;
write_ptr = tx_queue->write_count & FALCON_TXD_RING_MASK;
EFX_POPULATE_DWORD_1(reg, TX_DESC_WPTR_DWORD, write_ptr);
falcon_writel_page(tx_queue->efx, ®,
TX_DESC_UPD_REG_KER_DWORD, tx_queue->queue);
}
/* For each entry inserted into the software descriptor ring, create a
* descriptor in the hardware TX descriptor ring (in host memory), and
* write a doorbell.
*/
void falcon_push_buffers(struct efx_tx_queue *tx_queue)
{
struct efx_tx_buffer *buffer;
efx_qword_t *txd;
unsigned write_ptr;
BUG_ON(tx_queue->write_count == tx_queue->insert_count);
do {
write_ptr = tx_queue->write_count & FALCON_TXD_RING_MASK;
buffer = &tx_queue->buffer[write_ptr];
txd = falcon_tx_desc(tx_queue, write_ptr);
++tx_queue->write_count;
/* Create TX descriptor ring entry */
EFX_POPULATE_QWORD_5(*txd,
TX_KER_PORT, 0,
TX_KER_CONT, buffer->continuation,
TX_KER_BYTE_CNT, buffer->len,
TX_KER_BUF_REGION, 0,
TX_KER_BUF_ADR, buffer->dma_addr);
} while (tx_queue->write_count != tx_queue->insert_count);
wmb(); /* Ensure descriptors are written before they are fetched */
falcon_notify_tx_desc(tx_queue);
}
/* Allocate hardware resources for a TX queue */
int falcon_probe_tx(struct efx_tx_queue *tx_queue)
{
struct efx_nic *efx = tx_queue->efx;
return falcon_alloc_special_buffer(efx, &tx_queue->txd,
FALCON_TXD_RING_SIZE *
sizeof(efx_qword_t));
}
void falcon_init_tx(struct efx_tx_queue *tx_queue)
{
efx_oword_t tx_desc_ptr;
struct efx_nic *efx = tx_queue->efx;
tx_queue->flushed = false;
/* Pin TX descriptor ring */
falcon_init_special_buffer(efx, &tx_queue->txd);
/* Push TX descriptor ring to card */
EFX_POPULATE_OWORD_10(tx_desc_ptr,
TX_DESCQ_EN, 1,
TX_ISCSI_DDIG_EN, 0,
TX_ISCSI_HDIG_EN, 0,
TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
TX_DESCQ_EVQ_ID, tx_queue->channel->channel,
TX_DESCQ_OWNER_ID, 0,
TX_DESCQ_LABEL, tx_queue->queue,
TX_DESCQ_SIZE, FALCON_TXD_RING_ORDER,
TX_DESCQ_TYPE, 0,
TX_NON_IP_DROP_DIS_B0, 1);
if (falcon_rev(efx) >= FALCON_REV_B0) {
int csum = tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM;
EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, !csum);
EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, !csum);
}
falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
tx_queue->queue);
if (falcon_rev(efx) < FALCON_REV_B0) {
efx_oword_t reg;
/* Only 128 bits in this register */
BUILD_BUG_ON(EFX_TX_QUEUE_COUNT >= 128);
falcon_read(efx, ®, TX_CHKSM_CFG_REG_KER_A1);
if (tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM)
clear_bit_le(tx_queue->queue, (void *)®);
else
set_bit_le(tx_queue->queue, (void *)®);
falcon_write(efx, ®, TX_CHKSM_CFG_REG_KER_A1);
}
}
static void falcon_flush_tx_queue(struct efx_tx_queue *tx_queue)
{
struct efx_nic *efx = tx_queue->efx;
efx_oword_t tx_flush_descq;
/* Post a flush command */
EFX_POPULATE_OWORD_2(tx_flush_descq,
TX_FLUSH_DESCQ_CMD, 1,
TX_FLUSH_DESCQ, tx_queue->queue);
falcon_write(efx, &tx_flush_descq, TX_FLUSH_DESCQ_REG_KER);
}
void falcon_fini_tx(struct efx_tx_queue *tx_queue)
{
struct efx_nic *efx = tx_queue->efx;
efx_oword_t tx_desc_ptr;
/* The queue should have been flushed */
WARN_ON(!tx_queue->flushed);
/* Remove TX descriptor ring from card */
EFX_ZERO_OWORD(tx_desc_ptr);
falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
tx_queue->queue);
/* Unpin TX descriptor ring */
falcon_fini_special_buffer(efx, &tx_queue->txd);
}
/* Free buffers backing TX queue */
void falcon_remove_tx(struct efx_tx_queue *tx_queue)
{
falcon_free_special_buffer(tx_queue->efx, &tx_queue->txd);
}
/**************************************************************************
*
* Falcon RX path
*
**************************************************************************/
/* Returns a pointer to the specified descriptor in the RX descriptor queue */
static inline efx_qword_t *falcon_rx_desc(struct efx_rx_queue *rx_queue,
unsigned int index)
{
return (((efx_qword_t *) (rx_queue->rxd.addr)) + index);
}
/* This creates an entry in the RX descriptor queue */
static inline void falcon_build_rx_desc(struct efx_rx_queue *rx_queue,
unsigned index)
{
struct efx_rx_buffer *rx_buf;
efx_qword_t *rxd;
rxd = falcon_rx_desc(rx_queue, index);
rx_buf = efx_rx_buffer(rx_queue, index);
EFX_POPULATE_QWORD_3(*rxd,
RX_KER_BUF_SIZE,
rx_buf->len -
rx_queue->efx->type->rx_buffer_padding,
RX_KER_BUF_REGION, 0,
RX_KER_BUF_ADR, rx_buf->dma_addr);
}
/* This writes to the RX_DESC_WPTR register for the specified receive
* descriptor ring.
*/
void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue)
{
efx_dword_t reg;
unsigned write_ptr;
while (rx_queue->notified_count != rx_queue->added_count) {
falcon_build_rx_desc(rx_queue,
rx_queue->notified_count &
FALCON_RXD_RING_MASK);
++rx_queue->notified_count;
}
wmb();
write_ptr = rx_queue->added_count & FALCON_RXD_RING_MASK;
EFX_POPULATE_DWORD_1(reg, RX_DESC_WPTR_DWORD, write_ptr);
falcon_writel_page(rx_queue->efx, ®,
RX_DESC_UPD_REG_KER_DWORD, rx_queue->queue);
}
int falcon_probe_rx(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
return falcon_alloc_special_buffer(efx, &rx_queue->rxd,
FALCON_RXD_RING_SIZE *
sizeof(efx_qword_t));
}
void falcon_init_rx(struct efx_rx_queue *rx_queue)
{
efx_oword_t rx_desc_ptr;
struct efx_nic *efx = rx_queue->efx;
bool is_b0 = falcon_rev(efx) >= FALCON_REV_B0;
bool iscsi_digest_en = is_b0;
EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n",
rx_queue->queue, rx_queue->rxd.index,
rx_queue->rxd.index + rx_queue->rxd.entries - 1);
rx_queue->flushed = false;
/* Pin RX descriptor ring */
falcon_init_special_buffer(efx, &rx_queue->rxd);
/* Push RX descriptor ring to card */
EFX_POPULATE_OWORD_10(rx_desc_ptr,
RX_ISCSI_DDIG_EN, iscsi_digest_en,
RX_ISCSI_HDIG_EN, iscsi_digest_en,
RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
RX_DESCQ_EVQ_ID, rx_queue->channel->channel,
RX_DESCQ_OWNER_ID, 0,
RX_DESCQ_LABEL, rx_queue->queue,
RX_DESCQ_SIZE, FALCON_RXD_RING_ORDER,
RX_DESCQ_TYPE, 0 /* kernel queue */ ,
/* For >=B0 this is scatter so disable */
RX_DESCQ_JUMBO, !is_b0,
RX_DESCQ_EN, 1);
falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
rx_queue->queue);
}
static void falcon_flush_rx_queue(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
efx_oword_t rx_flush_descq;
/* Post a flush command */
EFX_POPULATE_OWORD_2(rx_flush_descq,
RX_FLUSH_DESCQ_CMD, 1,
RX_FLUSH_DESCQ, rx_queue->queue);
falcon_write(efx, &rx_flush_descq, RX_FLUSH_DESCQ_REG_KER);
}
void falcon_fini_rx(struct efx_rx_queue *rx_queue)
{
efx_oword_t rx_desc_ptr;
struct efx_nic *efx = rx_queue->efx;
/* The queue should already have been flushed */
WARN_ON(!rx_queue->flushed);
/* Remove RX descriptor ring from card */
EFX_ZERO_OWORD(rx_desc_ptr);
falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
rx_queue->queue);
/* Unpin RX descriptor ring */
falcon_fini_special_buffer(efx, &rx_queue->rxd);
}
/* Free buffers backing RX queue */
void falcon_remove_rx(struct efx_rx_queue *rx_queue)
{
falcon_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
}
/**************************************************************************
*
* Falcon event queue processing
* Event queues are processed by per-channel tasklets.
*
**************************************************************************/
/* Update a channel's event queue's read pointer (RPTR) register
*
* This writes the EVQ_RPTR_REG register for the specified channel's
* event queue.
*
* Note that EVQ_RPTR_REG contains the index of the "last read" event,
* whereas channel->eventq_read_ptr contains the index of the "next to
* read" event.
*/
void falcon_eventq_read_ack(struct efx_channel *channel)
{
efx_dword_t reg;
struct efx_nic *efx = channel->efx;
EFX_POPULATE_DWORD_1(reg, EVQ_RPTR_DWORD, channel->eventq_read_ptr);
falcon_writel_table(efx, ®, efx->type->evq_rptr_tbl_base,
channel->channel);
}
/* Use HW to insert a SW defined event */
void falcon_generate_event(struct efx_channel *channel, efx_qword_t *event)
{
efx_oword_t drv_ev_reg;
EFX_POPULATE_OWORD_2(drv_ev_reg,
DRV_EV_QID, channel->channel,
DRV_EV_DATA,
EFX_QWORD_FIELD64(*event, WHOLE_EVENT));
falcon_write(channel->efx, &drv_ev_reg, DRV_EV_REG_KER);
}
/* Handle a transmit completion event
*
* Falcon batches TX completion events; the message we receive is of
* the form "complete all TX events up to this index".
*/
static void falcon_handle_tx_event(struct efx_channel *channel,
efx_qword_t *event)
{
unsigned int tx_ev_desc_ptr;
unsigned int tx_ev_q_label;
struct efx_tx_queue *tx_queue;
struct efx_nic *efx = channel->efx;
if (likely(EFX_QWORD_FIELD(*event, TX_EV_COMP))) {
/* Transmit completion */
tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, TX_EV_DESC_PTR);
tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL);
tx_queue = &efx->tx_queue[tx_ev_q_label];
channel->irq_mod_score +=
(tx_ev_desc_ptr - tx_queue->read_count) &
efx->type->txd_ring_mask;
efx_xmit_done(tx_queue, tx_ev_desc_ptr);
} else if (EFX_QWORD_FIELD(*event, TX_EV_WQ_FF_FULL)) {
/* Rewrite the FIFO write pointer */
tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL);
tx_queue = &efx->tx_queue[tx_ev_q_label];
if (efx_dev_registered(efx))
netif_tx_lock(efx->net_dev);
falcon_notify_tx_desc(tx_queue);
if (efx_dev_registered(efx))
netif_tx_unlock(efx->net_dev);
} else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) &&
EFX_WORKAROUND_10727(efx)) {
efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
} else {
EFX_ERR(efx, "channel %d unexpected TX event "
EFX_QWORD_FMT"\n", channel->channel,
EFX_QWORD_VAL(*event));
}
}
/* Detect errors included in the rx_evt_pkt_ok bit. */
static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
const efx_qword_t *event,
bool *rx_ev_pkt_ok,
bool *discard)
{
struct efx_nic *efx = rx_queue->efx;
bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
bool rx_ev_other_err, rx_ev_pause_frm;
bool rx_ev_ip_frag_err, rx_ev_hdr_type, rx_ev_mcast_pkt;
unsigned rx_ev_pkt_type;
rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE);
rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT);
rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, RX_EV_TOBE_DISC);
rx_ev_pkt_type = EFX_QWORD_FIELD(*event, RX_EV_PKT_TYPE);
rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
RX_EV_BUF_OWNER_ID_ERR);
rx_ev_ip_frag_err = EFX_QWORD_FIELD(*event, RX_EV_IF_FRAG_ERR);
rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
RX_EV_IP_HDR_CHKSUM_ERR);
rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
RX_EV_TCP_UDP_CHKSUM_ERR);
rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR);
rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC);
rx_ev_drib_nib = ((falcon_rev(efx) >= FALCON_REV_B0) ?
0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB));
rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR);
/* Every error apart from tobe_disc and pause_frm */
rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
/* Count errors that are not in MAC stats. Ignore expected
* checksum errors during self-test. */
if (rx_ev_frm_trunc)
++rx_queue->channel->n_rx_frm_trunc;
else if (rx_ev_tobe_disc)
++rx_queue->channel->n_rx_tobe_disc;
else if (!efx->loopback_selftest) {
if (rx_ev_ip_hdr_chksum_err)
++rx_queue->channel->n_rx_ip_hdr_chksum_err;
else if (rx_ev_tcp_udp_chksum_err)
++rx_queue->channel->n_rx_tcp_udp_chksum_err;
}
if (rx_ev_ip_frag_err)
++rx_queue->channel->n_rx_ip_frag_err;
/* The frame must be discarded if any of these are true. */
*discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
rx_ev_tobe_disc | rx_ev_pause_frm);
/* TOBE_DISC is expected on unicast mismatches; don't print out an
* error message. FRM_TRUNC indicates RXDP dropped the packet due
* to a FIFO overflow.
*/
#ifdef EFX_ENABLE_DEBUG
if (rx_ev_other_err) {
EFX_INFO_RL(efx, " RX queue %d unexpected RX event "
EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
rx_queue->queue, EFX_QWORD_VAL(*event),
rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
rx_ev_ip_hdr_chksum_err ?
" [IP_HDR_CHKSUM_ERR]" : "",
rx_ev_tcp_udp_chksum_err ?
" [TCP_UDP_CHKSUM_ERR]" : "",
rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
rx_ev_drib_nib ? " [DRIB_NIB]" : "",
rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
rx_ev_pause_frm ? " [PAUSE]" : "");
}
#endif
}
/* Handle receive events that are not in-order. */
static void falcon_handle_rx_bad_index(struct efx_rx_queue *rx_queue,
unsigned index)
{
struct efx_nic *efx = rx_queue->efx;
unsigned expected, dropped;
expected = rx_queue->removed_count & FALCON_RXD_RING_MASK;
dropped = ((index + FALCON_RXD_RING_SIZE - expected) &
FALCON_RXD_RING_MASK);
EFX_INFO(efx, "dropped %d events (index=%d expected=%d)\n",
dropped, index, expected);
efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
}
/* Handle a packet received event
*
* Falcon silicon gives a "discard" flag if it's a unicast packet with the
* wrong destination address
* Also "is multicast" and "matches multicast filter" flags can be used to
* discard non-matching multicast packets.
*/
static void falcon_handle_rx_event(struct efx_channel *channel,
const efx_qword_t *event)
{
unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
unsigned expected_ptr;
bool rx_ev_pkt_ok, discard = false, checksummed;
struct efx_rx_queue *rx_queue;
struct efx_nic *efx = channel->efx;
/* Basic packet information */
rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, RX_EV_BYTE_CNT);
rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, RX_EV_PKT_OK);
rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE);
WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_JUMBO_CONT));
WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_SOP) != 1);
WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_Q_LABEL) != channel->channel);
rx_queue = &efx->rx_queue[channel->channel];
rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, RX_EV_DESC_PTR);
expected_ptr = rx_queue->removed_count & FALCON_RXD_RING_MASK;
if (unlikely(rx_ev_desc_ptr != expected_ptr))
falcon_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
if (likely(rx_ev_pkt_ok)) {
/* If packet is marked as OK and packet type is TCP/IPv4 or
* UDP/IPv4, then we can rely on the hardware checksum.
*/
checksummed = RX_EV_HDR_TYPE_HAS_CHECKSUMS(rx_ev_hdr_type);
} else {
falcon_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok,
&discard);
checksummed = false;
}
/* Detect multicast packets that didn't match the filter */
rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT);
if (rx_ev_mcast_pkt) {
unsigned int rx_ev_mcast_hash_match =
EFX_QWORD_FIELD(*event, RX_EV_MCAST_HASH_MATCH);
if (unlikely(!rx_ev_mcast_hash_match))
discard = true;
}
channel->irq_mod_score += 2;
/* Handle received packet */
efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt,
checksummed, discard);
}
/* Global events are basically PHY events */
static void falcon_handle_global_event(struct efx_channel *channel,
efx_qword_t *event)
{
struct efx_nic *efx = channel->efx;
bool handled = false;
if (EFX_QWORD_FIELD(*event, G_PHY0_INTR) ||
EFX_QWORD_FIELD(*event, G_PHY1_INTR) ||
EFX_QWORD_FIELD(*event, XG_PHY_INTR) ||
EFX_QWORD_FIELD(*event, XFP_PHY_INTR)) {
efx->phy_op->clear_interrupt(efx);
queue_work(efx->workqueue, &efx->phy_work);
handled = true;
}
if ((falcon_rev(efx) >= FALCON_REV_B0) &&
EFX_QWORD_FIELD(*event, XG_MNT_INTR_B0)) {
queue_work(efx->workqueue, &efx->mac_work);
handled = true;
}
if (EFX_QWORD_FIELD_VER(efx, *event, RX_RECOVERY)) {
EFX_ERR(efx, "channel %d seen global RX_RESET "
"event. Resetting.\n", channel->channel);
atomic_inc(&efx->rx_reset);
efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
handled = true;
}
if (!handled)
EFX_ERR(efx, "channel %d unknown global event "
EFX_QWORD_FMT "\n", channel->channel,
EFX_QWORD_VAL(*event));
}
static void falcon_handle_driver_event(struct efx_channel *channel,
efx_qword_t *event)
{
struct efx_nic *efx = channel->efx;
unsigned int ev_sub_code;
unsigned int ev_sub_data;
ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
ev_sub_data = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_DATA);
switch (ev_sub_code) {
case TX_DESCQ_FLS_DONE_EV_DECODE:
EFX_TRACE(efx, "channel %d TXQ %d flushed\n",
channel->channel, ev_sub_data);
break;
case RX_DESCQ_FLS_DONE_EV_DECODE:
EFX_TRACE(efx, "channel %d RXQ %d flushed\n",
channel->channel, ev_sub_data);
break;
case EVQ_INIT_DONE_EV_DECODE:
EFX_LOG(efx, "channel %d EVQ %d initialised\n",
channel->channel, ev_sub_data);
break;
case SRM_UPD_DONE_EV_DECODE:
EFX_TRACE(efx, "channel %d SRAM update done\n",
channel->channel);
break;
case WAKE_UP_EV_DECODE:
EFX_TRACE(efx, "channel %d RXQ %d wakeup event\n",
channel->channel, ev_sub_data);
break;
case TIMER_EV_DECODE:
EFX_TRACE(efx, "channel %d RX queue %d timer expired\n",
channel->channel, ev_sub_data);
break;
case RX_RECOVERY_EV_DECODE:
EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. "
"Resetting.\n", channel->channel);
atomic_inc(&efx->rx_reset);
efx_schedule_reset(efx,
EFX_WORKAROUND_6555(efx) ?
RESET_TYPE_RX_RECOVERY :
RESET_TYPE_DISABLE);
break;
case RX_DSC_ERROR_EV_DECODE:
EFX_ERR(efx, "RX DMA Q %d reports descriptor fetch error."
" RX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
break;
case TX_DSC_ERROR_EV_DECODE:
EFX_ERR(efx, "TX DMA Q %d reports descriptor fetch error."
" TX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
break;
default:
EFX_TRACE(efx, "channel %d unknown driver event code %d "
"data %04x\n", channel->channel, ev_sub_code,
ev_sub_data);
break;
}
}
int falcon_process_eventq(struct efx_channel *channel, int rx_quota)
{
unsigned int read_ptr;
efx_qword_t event, *p_event;
int ev_code;
int rx_packets = 0;
read_ptr = channel->eventq_read_ptr;
do {
p_event = falcon_event(channel, read_ptr);
event = *p_event;
if (!falcon_event_present(&event))
/* End of events */
break;
EFX_TRACE(channel->efx, "channel %d event is "EFX_QWORD_FMT"\n",
channel->channel, EFX_QWORD_VAL(event));
/* Clear this event by marking it all ones */
EFX_SET_QWORD(*p_event);
ev_code = EFX_QWORD_FIELD(event, EV_CODE);
switch (ev_code) {
case RX_IP_EV_DECODE:
falcon_handle_rx_event(channel, &event);
++rx_packets;
break;
case TX_IP_EV_DECODE:
falcon_handle_tx_event(channel, &event);
break;
case DRV_GEN_EV_DECODE:
channel->eventq_magic
= EFX_QWORD_FIELD(event, EVQ_MAGIC);
EFX_LOG(channel->efx, "channel %d received generated "
"event "EFX_QWORD_FMT"\n", channel->channel,
EFX_QWORD_VAL(event));
break;
case GLOBAL_EV_DECODE:
falcon_handle_global_event(channel, &event);
break;
case DRIVER_EV_DECODE:
falcon_handle_driver_event(channel, &event);
break;
default:
EFX_ERR(channel->efx, "channel %d unknown event type %d"
" (data " EFX_QWORD_FMT ")\n", channel->channel,
ev_code, EFX_QWORD_VAL(event));
}
/* Increment read pointer */
read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
} while (rx_packets < rx_quota);
channel->eventq_read_ptr = read_ptr;
return rx_packets;
}
void falcon_set_int_moderation(struct efx_channel *channel)
{
efx_dword_t timer_cmd;
struct efx_nic *efx = channel->efx;
/* Set timer register */
if (channel->irq_moderation) {
/* Round to resolution supported by hardware. The value we
* program is based at 0. So actual interrupt moderation
* achieved is ((x + 1) * res).
*/
channel->irq_moderation -= (channel->irq_moderation %
FALCON_IRQ_MOD_RESOLUTION);
if (channel->irq_moderation < FALCON_IRQ_MOD_RESOLUTION)
channel->irq_moderation = FALCON_IRQ_MOD_RESOLUTION;
EFX_POPULATE_DWORD_2(timer_cmd,
TIMER_MODE, TIMER_MODE_INT_HLDOFF,
TIMER_VAL,
channel->irq_moderation /
FALCON_IRQ_MOD_RESOLUTION - 1);
} else {
EFX_POPULATE_DWORD_2(timer_cmd,
TIMER_MODE, TIMER_MODE_DIS,
TIMER_VAL, 0);
}
falcon_writel_page_locked(efx, &timer_cmd, TIMER_CMD_REG_KER,
channel->channel);
}
/* Allocate buffer table entries for event queue */
int falcon_probe_eventq(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
unsigned int evq_size;
evq_size = FALCON_EVQ_SIZE * sizeof(efx_qword_t);
return falcon_alloc_special_buffer(efx, &channel->eventq, evq_size);
}
void falcon_init_eventq(struct efx_channel *channel)
{
efx_oword_t evq_ptr;
struct efx_nic *efx = channel->efx;
EFX_LOG(efx, "channel %d event queue in special buffers %d-%d\n",
channel->channel, channel->eventq.index,
channel->eventq.index + channel->eventq.entries - 1);
/* Pin event queue buffer */
falcon_init_special_buffer(efx, &channel->eventq);
/* Fill event queue with all ones (i.e. empty events) */
memset(channel->eventq.addr, 0xff, channel->eventq.len);
/* Push event queue to card */
EFX_POPULATE_OWORD_3(evq_ptr,
EVQ_EN, 1,
EVQ_SIZE, FALCON_EVQ_ORDER,
EVQ_BUF_BASE_ID, channel->eventq.index);
falcon_write_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base,
channel->channel);
falcon_set_int_moderation(channel);
}
void falcon_fini_eventq(struct efx_channel *channel)
{
efx_oword_t eventq_ptr;
struct efx_nic *efx = channel->efx;
/* Remove event queue from card */
EFX_ZERO_OWORD(eventq_ptr);
falcon_write_table(efx, &eventq_ptr, efx->type->evq_ptr_tbl_base,
channel->channel);
/* Unpin event queue */
falcon_fini_special_buffer(efx, &channel->eventq);
}
/* Free buffers backing event queue */
void falcon_remove_eventq(struct efx_channel *channel)
{
falcon_free_special_buffer(channel->efx, &channel->eventq);
}
/* Generates a test event on the event queue. A subsequent call to
* process_eventq() should pick up the event and place the value of
* "magic" into channel->eventq_magic;
*/
void falcon_generate_test_event(struct efx_channel *channel, unsigned int magic)
{
efx_qword_t test_event;
EFX_POPULATE_QWORD_2(test_event,
EV_CODE, DRV_GEN_EV_DECODE,
EVQ_MAGIC, magic);
falcon_generate_event(channel, &test_event);
}
void falcon_sim_phy_event(struct efx_nic *efx)
{
efx_qword_t phy_event;
EFX_POPULATE_QWORD_1(phy_event, EV_CODE, GLOBAL_EV_DECODE);
if (EFX_IS10G(efx))
EFX_SET_QWORD_FIELD(phy_event, XG_PHY_INTR, 1);
else
EFX_SET_QWORD_FIELD(phy_event, G_PHY0_INTR, 1);
falcon_generate_event(&efx->channel[0], &phy_event);
}
/**************************************************************************
*
* Flush handling
*
**************************************************************************/
static void falcon_poll_flush_events(struct efx_nic *efx)
{
struct efx_channel *channel = &efx->channel[0];
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
unsigned int read_ptr = channel->eventq_read_ptr;
unsigned int end_ptr = (read_ptr - 1) & FALCON_EVQ_MASK;
do {
efx_qword_t *event = falcon_event(channel, read_ptr);
int ev_code, ev_sub_code, ev_queue;
bool ev_failed;
if (!falcon_event_present(event))
break;
ev_code = EFX_QWORD_FIELD(*event, EV_CODE);
ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
if (ev_code == DRIVER_EV_DECODE &&
ev_sub_code == TX_DESCQ_FLS_DONE_EV_DECODE) {
ev_queue = EFX_QWORD_FIELD(*event,
DRIVER_EV_TX_DESCQ_ID);
if (ev_queue < EFX_TX_QUEUE_COUNT) {
tx_queue = efx->tx_queue + ev_queue;
tx_queue->flushed = true;
}
} else if (ev_code == DRIVER_EV_DECODE &&
ev_sub_code == RX_DESCQ_FLS_DONE_EV_DECODE) {
ev_queue = EFX_QWORD_FIELD(*event,
DRIVER_EV_RX_DESCQ_ID);
ev_failed = EFX_QWORD_FIELD(*event,
DRIVER_EV_RX_FLUSH_FAIL);
if (ev_queue < efx->n_rx_queues) {
rx_queue = efx->rx_queue + ev_queue;
/* retry the rx flush */
if (ev_failed)
falcon_flush_rx_queue(rx_queue);
else
rx_queue->flushed = true;
}
}
read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
} while (read_ptr != end_ptr);
}
/* Handle tx and rx flushes at the same time, since they run in
* parallel in the hardware and there's no reason for us to
* serialise them */
int falcon_flush_queues(struct efx_nic *efx)
{
struct efx_rx_queue *rx_queue;
struct efx_tx_queue *tx_queue;
int i;
bool outstanding;
/* Issue flush requests */
efx_for_each_tx_queue(tx_queue, efx) {
tx_queue->flushed = false;
falcon_flush_tx_queue(tx_queue);
}
efx_for_each_rx_queue(rx_queue, efx) {
rx_queue->flushed = false;
falcon_flush_rx_queue(rx_queue);
}
/* Poll the evq looking for flush completions. Since we're not pushing
* any more rx or tx descriptors at this point, we're in no danger of
* overflowing the evq whilst we wait */
for (i = 0; i < FALCON_FLUSH_POLL_COUNT; ++i) {
msleep(FALCON_FLUSH_INTERVAL);
falcon_poll_flush_events(efx);
/* Check if every queue has been succesfully flushed */
outstanding = false;
efx_for_each_tx_queue(tx_queue, efx)
outstanding |= !tx_queue->flushed;
efx_for_each_rx_queue(rx_queue, efx)
outstanding |= !rx_queue->flushed;
if (!outstanding)
return 0;
}
/* Mark the queues as all flushed. We're going to return failure
* leading to a reset, or fake up success anyway. "flushed" now
* indicates that we tried to flush. */
efx_for_each_tx_queue(tx_queue, efx) {
if (!tx_queue->flushed)
EFX_ERR(efx, "tx queue %d flush command timed out\n",
tx_queue->queue);
tx_queue->flushed = true;
}
efx_for_each_rx_queue(rx_queue, efx) {
if (!rx_queue->flushed)
EFX_ERR(efx, "rx queue %d flush command timed out\n",
rx_queue->queue);
rx_queue->flushed = true;
}
if (EFX_WORKAROUND_7803(efx))
return 0;
return -ETIMEDOUT;
}
/**************************************************************************
*
* Falcon hardware interrupts
* The hardware interrupt handler does very little work; all the event
* queue processing is carried out by per-channel tasklets.
*
**************************************************************************/
/* Enable/disable/generate Falcon interrupts */
static inline void falcon_interrupts(struct efx_nic *efx, int enabled,
int force)
{
efx_oword_t int_en_reg_ker;
EFX_POPULATE_OWORD_2(int_en_reg_ker,
KER_INT_KER, force,
DRV_INT_EN_KER, enabled);
falcon_write(efx, &int_en_reg_ker, INT_EN_REG_KER);
}
void falcon_enable_interrupts(struct efx_nic *efx)
{
efx_oword_t int_adr_reg_ker;
struct efx_channel *channel;
EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
/* Program address */
EFX_POPULATE_OWORD_2(int_adr_reg_ker,
NORM_INT_VEC_DIS_KER, EFX_INT_MODE_USE_MSI(efx),
INT_ADR_KER, efx->irq_status.dma_addr);
falcon_write(efx, &int_adr_reg_ker, INT_ADR_REG_KER);
/* Enable interrupts */
falcon_interrupts(efx, 1, 0);
/* Force processing of all the channels to get the EVQ RPTRs up to
date */
efx_for_each_channel(channel, efx)
efx_schedule_channel(channel);
}
void falcon_disable_interrupts(struct efx_nic *efx)
{
/* Disable interrupts */
falcon_interrupts(efx, 0, 0);
}
/* Generate a Falcon test interrupt
* Interrupt must already have been enabled, otherwise nasty things
* may happen.
*/
void falcon_generate_interrupt(struct efx_nic *efx)
{
falcon_interrupts(efx, 1, 1);
}
/* Acknowledge a legacy interrupt from Falcon
*
* This acknowledges a legacy (not MSI) interrupt via INT_ACK_KER_REG.
*
* Due to SFC bug 3706 (silicon revision <=A1) reads can be duplicated in the
* BIU. Interrupt acknowledge is read sensitive so must write instead
* (then read to ensure the BIU collector is flushed)
*
* NB most hardware supports MSI interrupts
*/
static inline void falcon_irq_ack_a1(struct efx_nic *efx)
{
efx_dword_t reg;
EFX_POPULATE_DWORD_1(reg, INT_ACK_DUMMY_DATA, 0xb7eb7e);
falcon_writel(efx, ®, INT_ACK_REG_KER_A1);
falcon_readl(efx, ®, WORK_AROUND_BROKEN_PCI_READS_REG_KER_A1);
}
/* Process a fatal interrupt
* Disable bus mastering ASAP and schedule a reset
*/
static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
{
struct falcon_nic_data *nic_data = efx->nic_data;
efx_oword_t *int_ker = efx->irq_status.addr;
efx_oword_t fatal_intr;
int error, mem_perr;
falcon_read(efx, &fatal_intr, FATAL_INTR_REG_KER);
error = EFX_OWORD_FIELD(fatal_intr, INT_KER_ERROR);
EFX_ERR(efx, "SYSTEM ERROR " EFX_OWORD_FMT " status "
EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
EFX_OWORD_VAL(fatal_intr),
error ? "disabling bus mastering" : "no recognised error");
if (error == 0)
goto out;
/* If this is a memory parity error dump which blocks are offending */
mem_perr = EFX_OWORD_FIELD(fatal_intr, MEM_PERR_INT_KER);
if (mem_perr) {
efx_oword_t reg;
falcon_read(efx, ®, MEM_STAT_REG_KER);
EFX_ERR(efx, "SYSTEM ERROR: memory parity error "
EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg));
}
/* Disable both devices */
pci_clear_master(efx->pci_dev);
if (FALCON_IS_DUAL_FUNC(efx))
pci_clear_master(nic_data->pci_dev2);
falcon_disable_interrupts(efx);
/* Count errors and reset or disable the NIC accordingly */
if (nic_data->int_error_count == 0 ||
time_after(jiffies, nic_data->int_error_expire)) {
nic_data->int_error_count = 0;
nic_data->int_error_expire =
jiffies + FALCON_INT_ERROR_EXPIRE * HZ;
}
if (++nic_data->int_error_count < FALCON_MAX_INT_ERRORS) {
EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n");
efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
} else {
EFX_ERR(efx, "SYSTEM ERROR - max number of errors seen."
"NIC will be disabled\n");
efx_schedule_reset(efx, RESET_TYPE_DISABLE);
}
out:
return IRQ_HANDLED;
}
/* Handle a legacy interrupt from Falcon
* Acknowledges the interrupt and schedule event queue processing.
*/
static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)
{
struct efx_nic *efx = dev_id;
efx_oword_t *int_ker = efx->irq_status.addr;
irqreturn_t result = IRQ_NONE;
struct efx_channel *channel;
efx_dword_t reg;
u32 queues;
int syserr;
/* Read the ISR which also ACKs the interrupts */
falcon_readl(efx, ®, INT_ISR0_B0);
queues = EFX_EXTRACT_DWORD(reg, 0, 31);
/* Check to see if we have a serious error condition */
syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT);
if (unlikely(syserr))
return falcon_fatal_interrupt(efx);
/* Schedule processing of any interrupting queues */
efx_for_each_channel(channel, efx) {
if ((queues & 1) ||
falcon_event_present(
falcon_event(channel, channel->eventq_read_ptr))) {
efx_schedule_channel(channel);
result = IRQ_HANDLED;
}
queues >>= 1;
}
if (result == IRQ_HANDLED) {
efx->last_irq_cpu = raw_smp_processor_id();
EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
}
return result;
}
static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
{
struct efx_nic *efx = dev_id;
efx_oword_t *int_ker = efx->irq_status.addr;
struct efx_channel *channel;
int syserr;
int queues;
/* Check to see if this is our interrupt. If it isn't, we
* exit without having touched the hardware.
*/
if (unlikely(EFX_OWORD_IS_ZERO(*int_ker))) {
EFX_TRACE(efx, "IRQ %d on CPU %d not for me\n", irq,
raw_smp_processor_id());
return IRQ_NONE;
}
efx->last_irq_cpu = raw_smp_processor_id();
EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
/* Check to see if we have a serious error condition */
syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT);
if (unlikely(syserr))
return falcon_fatal_interrupt(efx);
/* Determine interrupting queues, clear interrupt status
* register and acknowledge the device interrupt.
*/
BUILD_BUG_ON(INT_EVQS_WIDTH > EFX_MAX_CHANNELS);
queues = EFX_OWORD_FIELD(*int_ker, INT_EVQS);
EFX_ZERO_OWORD(*int_ker);
wmb(); /* Ensure the vector is cleared before interrupt ack */
falcon_irq_ack_a1(efx);
/* Schedule processing of any interrupting queues */
channel = &efx->channel[0];
while (queues) {
if (queues & 0x01)
efx_schedule_channel(channel);
channel++;
queues >>= 1;
}
return IRQ_HANDLED;
}
/* Handle an MSI interrupt from Falcon
*
* Handle an MSI hardware interrupt. This routine schedules event
* queue processing. No interrupt acknowledgement cycle is necessary.
* Also, we never need to check that the interrupt is for us, since
* MSI interrupts cannot be shared.
*/
static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id)
{
struct efx_channel *channel = dev_id;
struct efx_nic *efx = channel->efx;
efx_oword_t *int_ker = efx->irq_status.addr;
int syserr;
efx->last_irq_cpu = raw_smp_processor_id();
EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
/* Check to see if we have a serious error condition */
syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT);
if (unlikely(syserr))
return falcon_fatal_interrupt(efx);
/* Schedule processing of the channel */
efx_schedule_channel(channel);
return IRQ_HANDLED;
}
/* Setup RSS indirection table.
* This maps from the hash value of the packet to RXQ
*/
static void falcon_setup_rss_indir_table(struct efx_nic *efx)
{
int i = 0;
unsigned long offset;
efx_dword_t dword;
if (falcon_rev(efx) < FALCON_REV_B0)
return;
for (offset = RX_RSS_INDIR_TBL_B0;
offset < RX_RSS_INDIR_TBL_B0 + 0x800;
offset += 0x10) {
EFX_POPULATE_DWORD_1(dword, RX_RSS_INDIR_ENT_B0,
i % efx->n_rx_queues);
falcon_writel(efx, &dword, offset);
i++;
}
}
/* Hook interrupt handler(s)
* Try MSI and then legacy interrupts.
*/
int falcon_init_interrupt(struct efx_nic *efx)
{
struct efx_channel *channel;
int rc;
if (!EFX_INT_MODE_USE_MSI(efx)) {
irq_handler_t handler;
if (falcon_rev(efx) >= FALCON_REV_B0)
handler = falcon_legacy_interrupt_b0;
else
handler = falcon_legacy_interrupt_a1;
rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
efx->name, efx);
if (rc) {
EFX_ERR(efx, "failed to hook legacy IRQ %d\n",
efx->pci_dev->irq);
goto fail1;
}
return 0;
}
/* Hook MSI or MSI-X interrupt */
efx_for_each_channel(channel, efx) {
rc = request_irq(channel->irq, falcon_msi_interrupt,
IRQF_PROBE_SHARED, /* Not shared */
channel->name, channel);
if (rc) {
EFX_ERR(efx, "failed to hook IRQ %d\n", channel->irq);
goto fail2;
}
}
return 0;
fail2:
efx_for_each_channel(channel, efx)
free_irq(channel->irq, channel);
fail1:
return rc;
}
void falcon_fini_interrupt(struct efx_nic *efx)
{
struct efx_channel *channel;
efx_oword_t reg;
/* Disable MSI/MSI-X interrupts */
efx_for_each_channel(channel, efx) {
if (channel->irq)
free_irq(channel->irq, channel);
}
/* ACK legacy interrupt */
if (falcon_rev(efx) >= FALCON_REV_B0)
falcon_read(efx, ®, INT_ISR0_B0);
else
falcon_irq_ack_a1(efx);
/* Disable legacy interrupt */
if (efx->legacy_irq)
free_irq(efx->legacy_irq, efx);
}
/**************************************************************************
*
* EEPROM/flash
*
**************************************************************************
*/
#define FALCON_SPI_MAX_LEN sizeof(efx_oword_t)
static int falcon_spi_poll(struct efx_nic *efx)
{
efx_oword_t reg;
falcon_read(efx, ®, EE_SPI_HCMD_REG_KER);
return EFX_OWORD_FIELD(reg, EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0;
}
/* Wait for SPI command completion */
static int falcon_spi_wait(struct efx_nic *efx)
{
/* Most commands will finish quickly, so we start polling at
* very short intervals. Sometimes the command may have to
* wait for VPD or expansion ROM access outside of our
* control, so we allow up to 100 ms. */
unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 10);
int i;
for (i = 0; i < 10; i++) {
if (!falcon_spi_poll(efx))
return 0;
udelay(10);
}
for (;;) {
if (!falcon_spi_poll(efx))
return 0;
if (time_after_eq(jiffies, timeout)) {
EFX_ERR(efx, "timed out waiting for SPI\n");
return -ETIMEDOUT;
}
schedule_timeout_uninterruptible(1);
}
}
int falcon_spi_cmd(const struct efx_spi_device *spi,
unsigned int command, int address,
const void *in, void *out, size_t len)
{
struct efx_nic *efx = spi->efx;
bool addressed = (address >= 0);
bool reading = (out != NULL);
efx_oword_t reg;
int rc;
/* Input validation */
if (len > FALCON_SPI_MAX_LEN)
return -EINVAL;
BUG_ON(!mutex_is_locked(&efx->spi_lock));
/* Check that previous command is not still running */
rc = falcon_spi_poll(efx);
if (rc)
return rc;
/* Program address register, if we have an address */
if (addressed) {
EFX_POPULATE_OWORD_1(reg, EE_SPI_HADR_ADR, address);
falcon_write(efx, ®, EE_SPI_HADR_REG_KER);
}
/* Program data register, if we have data */
if (in != NULL) {
memcpy(®, in, len);
falcon_write(efx, ®, EE_SPI_HDATA_REG_KER);
}
/* Issue read/write command */
EFX_POPULATE_OWORD_7(reg,
EE_SPI_HCMD_CMD_EN, 1,
EE_SPI_HCMD_SF_SEL, spi->device_id,
EE_SPI_HCMD_DABCNT, len,
EE_SPI_HCMD_READ, reading,
EE_SPI_HCMD_DUBCNT, 0,
EE_SPI_HCMD_ADBCNT,
(addressed ? spi->addr_len : 0),
EE_SPI_HCMD_ENC, command);
falcon_write(efx, ®, EE_SPI_HCMD_REG_KER);
/* Wait for read/write to complete */
rc = falcon_spi_wait(efx);
if (rc)
return rc;
/* Read data */
if (out != NULL) {
falcon_read(efx, ®, EE_SPI_HDATA_REG_KER);
memcpy(out, ®, len);
}
return 0;
}
static size_t
falcon_spi_write_limit(const struct efx_spi_device *spi, size_t start)
{
return min(FALCON_SPI_MAX_LEN,
(spi->block_size - (start & (spi->block_size - 1))));
}
static inline u8
efx_spi_munge_command(const struct efx_spi_device *spi,
const u8 command, const unsigned int address)
{
return command | (((address >> 8) & spi->munge_address) << 3);
}
/* Wait up to 10 ms for buffered write completion */
int falcon_spi_wait_write(const struct efx_spi_device *spi)
{
struct efx_nic *efx = spi->efx;
unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
u8 status;
int rc;
for (;;) {
rc = falcon_spi_cmd(spi, SPI_RDSR, -1, NULL,
&status, sizeof(status));
if (rc)
return rc;
if (!(status & SPI_STATUS_NRDY))
return 0;
if (time_after_eq(jiffies, timeout)) {
EFX_ERR(efx, "SPI write timeout on device %d"
" last status=0x%02x\n",
spi->device_id, status);
return -ETIMEDOUT;
}
schedule_timeout_uninterruptible(1);
}
}
int falcon_spi_read(const struct efx_spi_device *spi, loff_t start,
size_t len, size_t *retlen, u8 *buffer)
{
size_t block_len, pos = 0;
unsigned int command;
int rc = 0;
while (pos < len) {
block_len = min(len - pos, FALCON_SPI_MAX_LEN);
command = efx_spi_munge_command(spi, SPI_READ, start + pos);
rc = falcon_spi_cmd(spi, command, start + pos, NULL,
buffer + pos, block_len);
if (rc)
break;
pos += block_len;
/* Avoid locking up the system */
cond_resched();
if (signal_pending(current)) {
rc = -EINTR;
break;
}
}
if (retlen)
*retlen = pos;
return rc;
}
int falcon_spi_write(const struct efx_spi_device *spi, loff_t start,
size_t len, size_t *retlen, const u8 *buffer)
{
u8 verify_buffer[FALCON_SPI_MAX_LEN];
size_t block_len, pos = 0;
unsigned int command;
int rc = 0;
while (pos < len) {
rc = falcon_spi_cmd(spi, SPI_WREN, -1, NULL, NULL, 0);
if (rc)
break;
block_len = min(len - pos,
falcon_spi_write_limit(spi, start + pos));
command = efx_spi_munge_command(spi, SPI_WRITE, start + pos);
rc = falcon_spi_cmd(spi, command, start + pos,
buffer + pos, NULL, block_len);
if (rc)
break;
rc = falcon_spi_wait_write(spi);
if (rc)
break;
command = efx_spi_munge_command(spi, SPI_READ, start + pos);
rc = falcon_spi_cmd(spi, command, start + pos,
NULL, verify_buffer, block_len);
if (memcmp(verify_buffer, buffer + pos, block_len)) {
rc = -EIO;
break;
}
pos += block_len;
/* Avoid locking up the system */
cond_resched();
if (signal_pending(current)) {
rc = -EINTR;
break;
}
}
if (retlen)
*retlen = pos;
return rc;
}
/**************************************************************************
*
* MAC wrapper
*
**************************************************************************
*/
static int falcon_reset_macs(struct efx_nic *efx)
{
efx_oword_t reg;
int count;
if (falcon_rev(efx) < FALCON_REV_B0) {
/* It's not safe to use GLB_CTL_REG to reset the
* macs, so instead use the internal MAC resets
*/
if (!EFX_IS10G(efx)) {
EFX_POPULATE_OWORD_1(reg, GM_SW_RST, 1);
falcon_write(efx, ®, GM_CFG1_REG);
udelay(1000);
EFX_POPULATE_OWORD_1(reg, GM_SW_RST, 0);
falcon_write(efx, ®, GM_CFG1_REG);
udelay(1000);
return 0;
} else {
EFX_POPULATE_OWORD_1(reg, XM_CORE_RST, 1);
falcon_write(efx, ®, XM_GLB_CFG_REG);
for (count = 0; count < 10000; count++) {
falcon_read(efx, ®, XM_GLB_CFG_REG);
if (EFX_OWORD_FIELD(reg, XM_CORE_RST) == 0)
return 0;
udelay(10);
}
EFX_ERR(efx, "timed out waiting for XMAC core reset\n");
return -ETIMEDOUT;
}
}
/* MAC stats will fail whilst the TX fifo is draining. Serialise
* the drain sequence with the statistics fetch */
efx_stats_disable(efx);
falcon_read(efx, ®, MAC0_CTRL_REG_KER);
EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, 1);
falcon_write(efx, ®, MAC0_CTRL_REG_KER);
falcon_read(efx, ®, GLB_CTL_REG_KER);
EFX_SET_OWORD_FIELD(reg, RST_XGTX, 1);
EFX_SET_OWORD_FIELD(reg, RST_XGRX, 1);
EFX_SET_OWORD_FIELD(reg, RST_EM, 1);
falcon_write(efx, ®, GLB_CTL_REG_KER);
count = 0;
while (1) {
falcon_read(efx, ®, GLB_CTL_REG_KER);
if (!EFX_OWORD_FIELD(reg, RST_XGTX) &&
!EFX_OWORD_FIELD(reg, RST_XGRX) &&
!EFX_OWORD_FIELD(reg, RST_EM)) {
EFX_LOG(efx, "Completed MAC reset after %d loops\n",
count);
break;
}
if (count > 20) {
EFX_ERR(efx, "MAC reset failed\n");
break;
}
count++;
udelay(10);
}
efx_stats_enable(efx);
/* If we've reset the EM block and the link is up, then
* we'll have to kick the XAUI link so the PHY can recover */
if (efx->link_up && EFX_IS10G(efx) && EFX_WORKAROUND_5147(efx))
falcon_reset_xaui(efx);
return 0;
}
void falcon_drain_tx_fifo(struct efx_nic *efx)
{
efx_oword_t reg;
if ((falcon_rev(efx) < FALCON_REV_B0) ||
(efx->loopback_mode != LOOPBACK_NONE))
return;
falcon_read(efx, ®, MAC0_CTRL_REG_KER);
/* There is no point in draining more than once */
if (EFX_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0))
return;
falcon_reset_macs(efx);
}
void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
{
efx_oword_t reg;
if (falcon_rev(efx) < FALCON_REV_B0)
return;
/* Isolate the MAC -> RX */
falcon_read(efx, ®, RX_CFG_REG_KER);
EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 0);
falcon_write(efx, ®, RX_CFG_REG_KER);
if (!efx->link_up)
falcon_drain_tx_fifo(efx);
}
void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
{
efx_oword_t reg;
int link_speed;
bool tx_fc;
switch (efx->link_speed) {
case 10000: link_speed = 3; break;
case 1000: link_speed = 2; break;
case 100: link_speed = 1; break;
default: link_speed = 0; break;
}
/* MAC_LINK_STATUS controls MAC backpressure but doesn't work
* as advertised. Disable to ensure packets are not
* indefinitely held and TX queue can be flushed at any point
* while the link is down. */
EFX_POPULATE_OWORD_5(reg,
MAC_XOFF_VAL, 0xffff /* max pause time */,
MAC_BCAD_ACPT, 1,
MAC_UC_PROM, efx->promiscuous,
MAC_LINK_STATUS, 1, /* always set */
MAC_SPEED, link_speed);
/* On B0, MAC backpressure can be disabled and packets get
* discarded. */
if (falcon_rev(efx) >= FALCON_REV_B0) {
EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0,
!efx->link_up);
}
falcon_write(efx, ®, MAC0_CTRL_REG_KER);
/* Restore the multicast hash registers. */
falcon_set_multicast_hash(efx);
/* Transmission of pause frames when RX crosses the threshold is
* covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL.
* Action on receipt of pause frames is controller by XM_DIS_FCNTL */
tx_fc = !!(efx->link_fc & EFX_FC_TX);
falcon_read(efx, ®, RX_CFG_REG_KER);
EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc);
/* Unisolate the MAC -> RX */
if (falcon_rev(efx) >= FALCON_REV_B0)
EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1);
falcon_write(efx, ®, RX_CFG_REG_KER);
}
int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset)
{
efx_oword_t reg;
u32 *dma_done;
int i;
if (disable_dma_stats)
return 0;
/* Statistics fetch will fail if the MAC is in TX drain */
if (falcon_rev(efx) >= FALCON_REV_B0) {
efx_oword_t temp;
falcon_read(efx, &temp, MAC0_CTRL_REG_KER);
if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0))
return 0;
}
dma_done = (efx->stats_buffer.addr + done_offset);
*dma_done = FALCON_STATS_NOT_DONE;
wmb(); /* ensure done flag is clear */
/* Initiate DMA transfer of stats */
EFX_POPULATE_OWORD_2(reg,
MAC_STAT_DMA_CMD, 1,
MAC_STAT_DMA_ADR,
efx->stats_buffer.dma_addr);
falcon_write(efx, ®, MAC0_STAT_DMA_REG_KER);
/* Wait for transfer to complete */
for (i = 0; i < 400; i++) {
if (*(volatile u32 *)dma_done == FALCON_STATS_DONE) {
rmb(); /* Ensure the stats are valid. */
return 0;
}
udelay(10);
}
EFX_ERR(efx, "timed out waiting for statistics\n");
return -ETIMEDOUT;
}
/**************************************************************************
*
* PHY access via GMII
*
**************************************************************************
*/
/* Wait for GMII access to complete */
static int falcon_gmii_wait(struct efx_nic *efx)
{
efx_dword_t md_stat;
int count;
/* wait upto 50ms - taken max from datasheet */
for (count = 0; count < 5000; count++) {
falcon_readl(efx, &md_stat, MD_STAT_REG_KER);
if (EFX_DWORD_FIELD(md_stat, MD_BSY) == 0) {
if (EFX_DWORD_FIELD(md_stat, MD_LNFL) != 0 ||
EFX_DWORD_FIELD(md_stat, MD_BSERR) != 0) {
EFX_ERR(efx, "error from GMII access "
EFX_DWORD_FMT"\n",
EFX_DWORD_VAL(md_stat));
return -EIO;
}
return 0;
}
udelay(10);
}
EFX_ERR(efx, "timed out waiting for GMII\n");
return -ETIMEDOUT;
}
/* Write an MDIO register of a PHY connected to Falcon. */
static int falcon_mdio_write(struct net_device *net_dev,
int prtad, int devad, u16 addr, u16 value)
{
struct efx_nic *efx = netdev_priv(net_dev);
efx_oword_t reg;
int rc;
EFX_REGDUMP(efx, "writing MDIO %d register %d.%d with 0x%04x\n",
prtad, devad, addr, value);
spin_lock_bh(&efx->phy_lock);
/* Check MDIO not currently being accessed */
rc = falcon_gmii_wait(efx);
if (rc)
goto out;
/* Write the address/ID register */
EFX_POPULATE_OWORD_1(reg, MD_PHY_ADR, addr);
falcon_write(efx, ®, MD_PHY_ADR_REG_KER);
EFX_POPULATE_OWORD_2(reg, MD_PRT_ADR, prtad, MD_DEV_ADR, devad);
falcon_write(efx, ®, MD_ID_REG_KER);
/* Write data */
EFX_POPULATE_OWORD_1(reg, MD_TXD, value);
falcon_write(efx, ®, MD_TXD_REG_KER);
EFX_POPULATE_OWORD_2(reg,
MD_WRC, 1,
MD_GC, 0);
falcon_write(efx, ®, MD_CS_REG_KER);
/* Wait for data to be written */
rc = falcon_gmii_wait(efx);
if (rc) {
/* Abort the write operation */
EFX_POPULATE_OWORD_2(reg,
MD_WRC, 0,
MD_GC, 1);
falcon_write(efx, ®, MD_CS_REG_KER);
udelay(10);
}
out:
spin_unlock_bh(&efx->phy_lock);
return rc;
}
/* Read an MDIO register of a PHY connected to Falcon. */
static int falcon_mdio_read(struct net_device *net_dev,
int prtad, int devad, u16 addr)
{
struct efx_nic *efx = netdev_priv(net_dev);
efx_oword_t reg;
int rc;
spin_lock_bh(&efx->phy_lock);
/* Check MDIO not currently being accessed */
rc = falcon_gmii_wait(efx);
if (rc)
goto out;
EFX_POPULATE_OWORD_1(reg, MD_PHY_ADR, addr);
falcon_write(efx, ®, MD_PHY_ADR_REG_KER);
EFX_POPULATE_OWORD_2(reg, MD_PRT_ADR, prtad, MD_DEV_ADR, devad);
falcon_write(efx, ®, MD_ID_REG_KER);
/* Request data to be read */
EFX_POPULATE_OWORD_2(reg, MD_RDC, 1, MD_GC, 0);
falcon_write(efx, ®, MD_CS_REG_KER);
/* Wait for data to become available */
rc = falcon_gmii_wait(efx);
if (rc == 0) {
falcon_read(efx, ®, MD_RXD_REG_KER);
rc = EFX_OWORD_FIELD(reg, MD_RXD);
EFX_REGDUMP(efx, "read from MDIO %d register %d.%d, got %04x\n",
prtad, devad, addr, rc);
} else {
/* Abort the read operation */
EFX_POPULATE_OWORD_2(reg,
MD_RIC, 0,
MD_GC, 1);
falcon_write(efx, ®, MD_CS_REG_KER);
EFX_LOG(efx, "read from MDIO %d register %d.%d, got error %d\n",
prtad, devad, addr, rc);
}
out:
spin_unlock_bh(&efx->phy_lock);
return rc;
}
static int falcon_probe_phy(struct efx_nic *efx)
{
switch (efx->phy_type) {
case PHY_TYPE_SFX7101:
efx->phy_op = &falcon_sfx7101_phy_ops;
break;
case PHY_TYPE_SFT9001A:
case PHY_TYPE_SFT9001B:
efx->phy_op = &falcon_sft9001_phy_ops;
break;
case PHY_TYPE_QT2022C2:
case PHY_TYPE_QT2025C:
efx->phy_op = &falcon_xfp_phy_ops;
break;
default:
EFX_ERR(efx, "Unknown PHY type %d\n",
efx->phy_type);
return -1;
}
if (efx->phy_op->macs & EFX_XMAC)
efx->loopback_modes |= ((1 << LOOPBACK_XGMII) |
(1 << LOOPBACK_XGXS) |
(1 << LOOPBACK_XAUI));
if (efx->phy_op->macs & EFX_GMAC)
efx->loopback_modes |= (1 << LOOPBACK_GMAC);
efx->loopback_modes |= efx->phy_op->loopbacks;
return 0;
}
int falcon_switch_mac(struct efx_nic *efx)
{
struct efx_mac_operations *old_mac_op = efx->mac_op;
efx_oword_t nic_stat;
unsigned strap_val;
int rc = 0;
/* Don't try to fetch MAC stats while we're switching MACs */
efx_stats_disable(efx);
/* Internal loopbacks override the phy speed setting */
if (efx->loopback_mode == LOOPBACK_GMAC) {
efx->link_speed = 1000;
efx->link_fd = true;
} else if (LOOPBACK_INTERNAL(efx)) {
efx->link_speed = 10000;
efx->link_fd = true;
}
WARN_ON(!mutex_is_locked(&efx->mac_lock));
efx->mac_op = (EFX_IS10G(efx) ?
&falcon_xmac_operations : &falcon_gmac_operations);
/* Always push the NIC_STAT_REG setting even if the mac hasn't
* changed, because this function is run post online reset */
falcon_read(efx, &nic_stat, NIC_STAT_REG);
strap_val = EFX_IS10G(efx) ? 5 : 3;
if (falcon_rev(efx) >= FALCON_REV_B0) {
EFX_SET_OWORD_FIELD(nic_stat, EE_STRAP_EN, 1);
EFX_SET_OWORD_FIELD(nic_stat, EE_STRAP_OVR, strap_val);
falcon_write(efx, &nic_stat, NIC_STAT_REG);
} else {
/* Falcon A1 does not support 1G/10G speed switching
* and must not be used with a PHY that does. */
BUG_ON(EFX_OWORD_FIELD(nic_stat, STRAP_PINS) != strap_val);
}
if (old_mac_op == efx->mac_op)
goto out;
EFX_LOG(efx, "selected %cMAC\n", EFX_IS10G(efx) ? 'X' : 'G');
/* Not all macs support a mac-level link state */
efx->mac_up = true;
rc = falcon_reset_macs(efx);
out:
efx_stats_enable(efx);
return rc;
}
/* This call is responsible for hooking in the MAC and PHY operations */
int falcon_probe_port(struct efx_nic *efx)
{
int rc;
/* Hook in PHY operations table */
rc = falcon_probe_phy(efx);
if (rc)
return rc;
/* Set up MDIO structure for PHY */
efx->mdio.mmds = efx->phy_op->mmds;
efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
efx->mdio.mdio_read = falcon_mdio_read;
efx->mdio.mdio_write = falcon_mdio_write;
/* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
if (falcon_rev(efx) >= FALCON_REV_B0)
efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
else
efx->wanted_fc = EFX_FC_RX;
/* Allocate buffer for stats */
rc = falcon_alloc_buffer(efx, &efx->stats_buffer,
FALCON_MAC_STATS_SIZE);
if (rc)
return rc;
EFX_LOG(efx, "stats buffer at %llx (virt %p phys %llx)\n",
(u64)efx->stats_buffer.dma_addr,
efx->stats_buffer.addr,
(u64)virt_to_phys(efx->stats_buffer.addr));
return 0;
}
void falcon_remove_port(struct efx_nic *efx)
{
falcon_free_buffer(efx, &efx->stats_buffer);
}
/**************************************************************************
*
* Multicast filtering
*
**************************************************************************
*/
void falcon_set_multicast_hash(struct efx_nic *efx)
{
union efx_multicast_hash *mc_hash = &efx->multicast_hash;
/* Broadcast packets go through the multicast hash filter.
* ether_crc_le() of the broadcast address is 0xbe2612ff
* so we always add bit 0xff to the mask.
*/
set_bit_le(0xff, mc_hash->byte);
falcon_write(efx, &mc_hash->oword[0], MAC_MCAST_HASH_REG0_KER);
falcon_write(efx, &mc_hash->oword[1], MAC_MCAST_HASH_REG1_KER);
}
/**************************************************************************
*
* Falcon test code
*
**************************************************************************/
int falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
{
struct falcon_nvconfig *nvconfig;
struct efx_spi_device *spi;
void *region;
int rc, magic_num, struct_ver;
__le16 *word, *limit;
u32 csum;
spi = efx->spi_flash ? efx->spi_flash : efx->spi_eeprom;
if (!spi)
return -EINVAL;
region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL);
if (!region)
return -ENOMEM;
nvconfig = region + NVCONFIG_OFFSET;
mutex_lock(&efx->spi_lock);
rc = falcon_spi_read(spi, 0, FALCON_NVCONFIG_END, NULL, region);
mutex_unlock(&efx->spi_lock);
if (rc) {
EFX_ERR(efx, "Failed to read %s\n",
efx->spi_flash ? "flash" : "EEPROM");
rc = -EIO;
goto out;
}
magic_num = le16_to_cpu(nvconfig->board_magic_num);
struct_ver = le16_to_cpu(nvconfig->board_struct_ver);
rc = -EINVAL;
if (magic_num != NVCONFIG_BOARD_MAGIC_NUM) {
EFX_ERR(efx, "NVRAM bad magic 0x%x\n", magic_num);
goto out;
}
if (struct_ver < 2) {
EFX_ERR(efx, "NVRAM has ancient version 0x%x\n", struct_ver);
goto out;
} else if (struct_ver < 4) {
word = &nvconfig->board_magic_num;
limit = (__le16 *) (nvconfig + 1);
} else {
word = region;
limit = region + FALCON_NVCONFIG_END;
}
for (csum = 0; word < limit; ++word)
csum += le16_to_cpu(*word);
if (~csum & 0xffff) {
EFX_ERR(efx, "NVRAM has incorrect checksum\n");
goto out;
}
rc = 0;
if (nvconfig_out)
memcpy(nvconfig_out, nvconfig, sizeof(*nvconfig));
out:
kfree(region);
return rc;
}
/* Registers tested in the falcon register test */
static struct {
unsigned address;
efx_oword_t mask;
} efx_test_registers[] = {
{ ADR_REGION_REG_KER,
EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) },
{ RX_CFG_REG_KER,
EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
{ TX_CFG_REG_KER,
EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) },
{ TX_CFG2_REG_KER,
EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
{ MAC0_CTRL_REG_KER,
EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) },
{ SRM_TX_DC_CFG_REG_KER,
EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
{ RX_DC_CFG_REG_KER,
EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) },
{ RX_DC_PF_WM_REG_KER,
EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
{ DP_CTRL_REG,
EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
{ GM_CFG2_REG,
EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) },
{ GMF_CFG0_REG,
EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) },
{ XM_GLB_CFG_REG,
EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) },
{ XM_TX_CFG_REG,
EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) },
{ XM_RX_CFG_REG,
EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) },
{ XM_RX_PARAM_REG,
EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) },
{ XM_FC_REG,
EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) },
{ XM_ADR_LO_REG,
EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) },
{ XX_SD_CTL_REG,
EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
};
static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
const efx_oword_t *mask)
{
return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
}
int falcon_test_registers(struct efx_nic *efx)
{
unsigned address = 0, i, j;
efx_oword_t mask, imask, original, reg, buf;
/* Falcon should be in loopback to isolate the XMAC from the PHY */
WARN_ON(!LOOPBACK_INTERNAL(efx));
for (i = 0; i < ARRAY_SIZE(efx_test_registers); ++i) {
address = efx_test_registers[i].address;
mask = imask = efx_test_registers[i].mask;
EFX_INVERT_OWORD(imask);
falcon_read(efx, &original, address);
/* bit sweep on and off */
for (j = 0; j < 128; j++) {
if (!EFX_EXTRACT_OWORD32(mask, j, j))
continue;
/* Test this testable bit can be set in isolation */
EFX_AND_OWORD(reg, original, mask);
EFX_SET_OWORD32(reg, j, j, 1);
falcon_write(efx, ®, address);
falcon_read(efx, &buf, address);
if (efx_masked_compare_oword(®, &buf, &mask))
goto fail;
/* Test this testable bit can be cleared in isolation */
EFX_OR_OWORD(reg, original, mask);
EFX_SET_OWORD32(reg, j, j, 0);
falcon_write(efx, ®, address);
falcon_read(efx, &buf, address);
if (efx_masked_compare_oword(®, &buf, &mask))
goto fail;
}
falcon_write(efx, &original, address);
}
return 0;
fail:
EFX_ERR(efx, "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
" at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
return -EIO;
}
/**************************************************************************
*
* Device reset
*
**************************************************************************
*/
/* Resets NIC to known state. This routine must be called in process
* context and is allowed to sleep. */
int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
{
struct falcon_nic_data *nic_data = efx->nic_data;
efx_oword_t glb_ctl_reg_ker;
int rc;
EFX_LOG(efx, "performing hardware reset (%d)\n", method);
/* Initiate device reset */
if (method == RESET_TYPE_WORLD) {
rc = pci_save_state(efx->pci_dev);
if (rc) {
EFX_ERR(efx, "failed to backup PCI state of primary "
"function prior to hardware reset\n");
goto fail1;
}
if (FALCON_IS_DUAL_FUNC(efx)) {
rc = pci_save_state(nic_data->pci_dev2);
if (rc) {
EFX_ERR(efx, "failed to backup PCI state of "
"secondary function prior to "
"hardware reset\n");
goto fail2;
}
}
EFX_POPULATE_OWORD_2(glb_ctl_reg_ker,
EXT_PHY_RST_DUR, 0x7,
SWRST, 1);
} else {
int reset_phy = (method == RESET_TYPE_INVISIBLE ?
EXCLUDE_FROM_RESET : 0);
EFX_POPULATE_OWORD_7(glb_ctl_reg_ker,
EXT_PHY_RST_CTL, reset_phy,
PCIE_CORE_RST_CTL, EXCLUDE_FROM_RESET,
PCIE_NSTCK_RST_CTL, EXCLUDE_FROM_RESET,
PCIE_SD_RST_CTL, EXCLUDE_FROM_RESET,
EE_RST_CTL, EXCLUDE_FROM_RESET,
EXT_PHY_RST_DUR, 0x7 /* 10ms */,
SWRST, 1);
}
falcon_write(efx, &glb_ctl_reg_ker, GLB_CTL_REG_KER);
EFX_LOG(efx, "waiting for hardware reset\n");
schedule_timeout_uninterruptible(HZ / 20);
/* Restore PCI configuration if needed */
if (method == RESET_TYPE_WORLD) {
if (FALCON_IS_DUAL_FUNC(efx)) {
rc = pci_restore_state(nic_data->pci_dev2);
if (rc) {
EFX_ERR(efx, "failed to restore PCI config for "
"the secondary function\n");
goto fail3;
}
}
rc = pci_restore_state(efx->pci_dev);
if (rc) {
EFX_ERR(efx, "failed to restore PCI config for the "
"primary function\n");
goto fail4;
}
EFX_LOG(efx, "successfully restored PCI config\n");
}
/* Assert that reset complete */
falcon_read(efx, &glb_ctl_reg_ker, GLB_CTL_REG_KER);
if (EFX_OWORD_FIELD(glb_ctl_reg_ker, SWRST) != 0) {
rc = -ETIMEDOUT;
EFX_ERR(efx, "timed out waiting for hardware reset\n");
goto fail5;
}
EFX_LOG(efx, "hardware reset complete\n");
return 0;
/* pci_save_state() and pci_restore_state() MUST be called in pairs */
fail2:
fail3:
pci_restore_state(efx->pci_dev);
fail1:
fail4:
fail5:
return rc;
}
/* Zeroes out the SRAM contents. This routine must be called in
* process context and is allowed to sleep.
*/
static int falcon_reset_sram(struct efx_nic *efx)
{
efx_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker;
int count;
/* Set the SRAM wake/sleep GPIO appropriately. */
falcon_read(efx, &gpio_cfg_reg_ker, GPIO_CTL_REG_KER);
EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, GPIO1_OEN, 1);
EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, GPIO1_OUT, 1);
falcon_write(efx, &gpio_cfg_reg_ker, GPIO_CTL_REG_KER);
/* Initiate SRAM reset */
EFX_POPULATE_OWORD_2(srm_cfg_reg_ker,
SRAM_OOB_BT_INIT_EN, 1,
SRM_NUM_BANKS_AND_BANK_SIZE, 0);
falcon_write(efx, &srm_cfg_reg_ker, SRM_CFG_REG_KER);
/* Wait for SRAM reset to complete */
count = 0;
do {
EFX_LOG(efx, "waiting for SRAM reset (attempt %d)...\n", count);
/* SRAM reset is slow; expect around 16ms */
schedule_timeout_uninterruptible(HZ / 50);
/* Check for reset complete */
falcon_read(efx, &srm_cfg_reg_ker, SRM_CFG_REG_KER);
if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, SRAM_OOB_BT_INIT_EN)) {
EFX_LOG(efx, "SRAM reset complete\n");
return 0;
}
} while (++count < 20); /* wait upto 0.4 sec */
EFX_ERR(efx, "timed out waiting for SRAM reset\n");
return -ETIMEDOUT;
}
static int falcon_spi_device_init(struct efx_nic *efx,
struct efx_spi_device **spi_device_ret,
unsigned int device_id, u32 device_type)
{
struct efx_spi_device *spi_device;
if (device_type != 0) {
spi_device = kzalloc(sizeof(*spi_device), GFP_KERNEL);
if (!spi_device)
return -ENOMEM;
spi_device->device_id = device_id;
spi_device->size =
1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE);
spi_device->addr_len =
SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ADDR_LEN);
spi_device->munge_address = (spi_device->size == 1 << 9 &&
spi_device->addr_len == 1);
spi_device->erase_command =
SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ERASE_CMD);
spi_device->erase_size =
1 << SPI_DEV_TYPE_FIELD(device_type,
SPI_DEV_TYPE_ERASE_SIZE);
spi_device->block_size =
1 << SPI_DEV_TYPE_FIELD(device_type,
SPI_DEV_TYPE_BLOCK_SIZE);
spi_device->efx = efx;
} else {
spi_device = NULL;
}
kfree(*spi_device_ret);
*spi_device_ret = spi_device;
return 0;
}
static void falcon_remove_spi_devices(struct efx_nic *efx)
{
kfree(efx->spi_eeprom);
efx->spi_eeprom = NULL;
kfree(efx->spi_flash);
efx->spi_flash = NULL;
}
/* Extract non-volatile configuration */
static int falcon_probe_nvconfig(struct efx_nic *efx)
{
struct falcon_nvconfig *nvconfig;
int board_rev;
int rc;
nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
if (!nvconfig)
return -ENOMEM;
rc = falcon_read_nvram(efx, nvconfig);
if (rc == -EINVAL) {
EFX_ERR(efx, "NVRAM is invalid therefore using defaults\n");
efx->phy_type = PHY_TYPE_NONE;
efx->mdio.prtad = MDIO_PRTAD_NONE;
board_rev = 0;
rc = 0;
} else if (rc) {
goto fail1;
} else {
struct falcon_nvconfig_board_v2 *v2 = &nvconfig->board_v2;
struct falcon_nvconfig_board_v3 *v3 = &nvconfig->board_v3;
efx->phy_type = v2->port0_phy_type;
efx->mdio.prtad = v2->port0_phy_addr;
board_rev = le16_to_cpu(v2->board_revision);
if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
__le32 fl = v3->spi_device_type[EE_SPI_FLASH];
__le32 ee = v3->spi_device_type[EE_SPI_EEPROM];
rc = falcon_spi_device_init(efx, &efx->spi_flash,
EE_SPI_FLASH,
le32_to_cpu(fl));
if (rc)
goto fail2;
rc = falcon_spi_device_init(efx, &efx->spi_eeprom,
EE_SPI_EEPROM,
le32_to_cpu(ee));
if (rc)
goto fail2;
}
}
/* Read the MAC addresses */
memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN);
EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad);
efx_set_board_info(efx, board_rev);
kfree(nvconfig);
return 0;
fail2:
falcon_remove_spi_devices(efx);
fail1:
kfree(nvconfig);
return rc;
}
/* Probe the NIC variant (revision, ASIC vs FPGA, function count, port
* count, port speed). Set workaround and feature flags accordingly.
*/
static int falcon_probe_nic_variant(struct efx_nic *efx)
{
efx_oword_t altera_build;
efx_oword_t nic_stat;
falcon_read(efx, &altera_build, ALTERA_BUILD_REG_KER);
if (EFX_OWORD_FIELD(altera_build, VER_ALL)) {
EFX_ERR(efx, "Falcon FPGA not supported\n");
return -ENODEV;
}
falcon_read(efx, &nic_stat, NIC_STAT_REG);
switch (falcon_rev(efx)) {
case FALCON_REV_A0:
case 0xff:
EFX_ERR(efx, "Falcon rev A0 not supported\n");
return -ENODEV;
case FALCON_REV_A1:
if (EFX_OWORD_FIELD(nic_stat, STRAP_PCIE) == 0) {
EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n");
return -ENODEV;
}
break;
case FALCON_REV_B0:
break;
default:
EFX_ERR(efx, "Unknown Falcon rev %d\n", falcon_rev(efx));
return -ENODEV;
}
/* Initial assumed speed */
efx->link_speed = EFX_OWORD_FIELD(nic_stat, STRAP_10G) ? 10000 : 1000;
return 0;
}
/* Probe all SPI devices on the NIC */
static void falcon_probe_spi_devices(struct efx_nic *efx)
{
efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
int boot_dev;
falcon_read(efx, &gpio_ctl, GPIO_CTL_REG_KER);
falcon_read(efx, &nic_stat, NIC_STAT_REG);
falcon_read(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER);
if (EFX_OWORD_FIELD(gpio_ctl, BOOTED_USING_NVDEVICE)) {
boot_dev = (EFX_OWORD_FIELD(nic_stat, SF_PRST) ?
EE_SPI_FLASH : EE_SPI_EEPROM);
EFX_LOG(efx, "Booted from %s\n",
boot_dev == EE_SPI_FLASH ? "flash" : "EEPROM");
} else {
/* Disable VPD and set clock dividers to safe
* values for initial programming. */
boot_dev = -1;
EFX_LOG(efx, "Booted from internal ASIC settings;"
" setting SPI config\n");
EFX_POPULATE_OWORD_3(ee_vpd_cfg, EE_VPD_EN, 0,
/* 125 MHz / 7 ~= 20 MHz */
EE_SF_CLOCK_DIV, 7,
/* 125 MHz / 63 ~= 2 MHz */
EE_EE_CLOCK_DIV, 63);
falcon_write(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER);
}
if (boot_dev == EE_SPI_FLASH)
falcon_spi_device_init(efx, &efx->spi_flash, EE_SPI_FLASH,
default_flash_type);
if (boot_dev == EE_SPI_EEPROM)
falcon_spi_device_init(efx, &efx->spi_eeprom, EE_SPI_EEPROM,
large_eeprom_type);
}
int falcon_probe_nic(struct efx_nic *efx)
{
struct falcon_nic_data *nic_data;
int rc;
/* Allocate storage for hardware specific data */
nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
if (!nic_data)
return -ENOMEM;
efx->nic_data = nic_data;
/* Determine number of ports etc. */
rc = falcon_probe_nic_variant(efx);
if (rc)
goto fail1;
/* Probe secondary function if expected */
if (FALCON_IS_DUAL_FUNC(efx)) {
struct pci_dev *dev = pci_dev_get(efx->pci_dev);
while ((dev = pci_get_device(EFX_VENDID_SFC, FALCON_A_S_DEVID,
dev))) {
if (dev->bus == efx->pci_dev->bus &&
dev->devfn == efx->pci_dev->devfn + 1) {
nic_data->pci_dev2 = dev;
break;
}
}
if (!nic_data->pci_dev2) {
EFX_ERR(efx, "failed to find secondary function\n");
rc = -ENODEV;
goto fail2;
}
}
/* Now we can reset the NIC */
rc = falcon_reset_hw(efx, RESET_TYPE_ALL);
if (rc) {
EFX_ERR(efx, "failed to reset NIC\n");
goto fail3;
}
/* Allocate memory for INT_KER */
rc = falcon_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
if (rc)
goto fail4;
BUG_ON(efx->irq_status.dma_addr & 0x0f);
EFX_LOG(efx, "INT_KER at %llx (virt %p phys %llx)\n",
(u64)efx->irq_status.dma_addr,
efx->irq_status.addr, (u64)virt_to_phys(efx->irq_status.addr));
falcon_probe_spi_devices(efx);
/* Read in the non-volatile configuration */
rc = falcon_probe_nvconfig(efx);
if (rc)
goto fail5;
/* Initialise I2C adapter */
efx->i2c_adap.owner = THIS_MODULE;
nic_data->i2c_data = falcon_i2c_bit_operations;
nic_data->i2c_data.data = efx;
efx->i2c_adap.algo_data = &nic_data->i2c_data;
efx->i2c_adap.dev.parent = &efx->pci_dev->dev;
strlcpy(efx->i2c_adap.name, "SFC4000 GPIO", sizeof(efx->i2c_adap.name));
rc = i2c_bit_add_bus(&efx->i2c_adap);
if (rc)
goto fail5;
return 0;
fail5:
falcon_remove_spi_devices(efx);
falcon_free_buffer(efx, &efx->irq_status);
fail4:
fail3:
if (nic_data->pci_dev2) {
pci_dev_put(nic_data->pci_dev2);
nic_data->pci_dev2 = NULL;
}
fail2:
fail1:
kfree(efx->nic_data);
return rc;
}
/* This call performs hardware-specific global initialisation, such as
* defining the descriptor cache sizes and number of RSS channels.
* It does not set up any buffers, descriptor rings or event queues.
*/
int falcon_init_nic(struct efx_nic *efx)
{
efx_oword_t temp;
unsigned thresh;
int rc;
/* Use on-chip SRAM */
falcon_read(efx, &temp, NIC_STAT_REG);
EFX_SET_OWORD_FIELD(temp, ONCHIP_SRAM, 1);
falcon_write(efx, &temp, NIC_STAT_REG);
/* Set the source of the GMAC clock */
if (falcon_rev(efx) == FALCON_REV_B0) {
falcon_read(efx, &temp, GPIO_CTL_REG_KER);
EFX_SET_OWORD_FIELD(temp, GPIO_USE_NIC_CLK, true);
falcon_write(efx, &temp, GPIO_CTL_REG_KER);
}
/* Set buffer table mode */
EFX_POPULATE_OWORD_1(temp, BUF_TBL_MODE, BUF_TBL_MODE_FULL);
falcon_write(efx, &temp, BUF_TBL_CFG_REG_KER);
rc = falcon_reset_sram(efx);
if (rc)
return rc;
/* Set positions of descriptor caches in SRAM. */
EFX_POPULATE_OWORD_1(temp, SRM_TX_DC_BASE_ADR, TX_DC_BASE / 8);
falcon_write(efx, &temp, SRM_TX_DC_CFG_REG_KER);
EFX_POPULATE_OWORD_1(temp, SRM_RX_DC_BASE_ADR, RX_DC_BASE / 8);
falcon_write(efx, &temp, SRM_RX_DC_CFG_REG_KER);
/* Set TX descriptor cache size. */
BUILD_BUG_ON(TX_DC_ENTRIES != (16 << TX_DC_ENTRIES_ORDER));
EFX_POPULATE_OWORD_1(temp, TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
falcon_write(efx, &temp, TX_DC_CFG_REG_KER);
/* Set RX descriptor cache size. Set low watermark to size-8, as
* this allows most efficient prefetching.
*/
BUILD_BUG_ON(RX_DC_ENTRIES != (16 << RX_DC_ENTRIES_ORDER));
EFX_POPULATE_OWORD_1(temp, RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
falcon_write(efx, &temp, RX_DC_CFG_REG_KER);
EFX_POPULATE_OWORD_1(temp, RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
falcon_write(efx, &temp, RX_DC_PF_WM_REG_KER);
/* Clear the parity enables on the TX data fifos as
* they produce false parity errors because of timing issues
*/
if (EFX_WORKAROUND_5129(efx)) {
falcon_read(efx, &temp, SPARE_REG_KER);
EFX_SET_OWORD_FIELD(temp, MEM_PERR_EN_TX_DATA, 0);
falcon_write(efx, &temp, SPARE_REG_KER);
}
/* Enable all the genuinely fatal interrupts. (They are still
* masked by the overall interrupt mask, controlled by
* falcon_interrupts()).
*
* Note: All other fatal interrupts are enabled
*/
EFX_POPULATE_OWORD_3(temp,
ILL_ADR_INT_KER_EN, 1,
RBUF_OWN_INT_KER_EN, 1,
TBUF_OWN_INT_KER_EN, 1);
EFX_INVERT_OWORD(temp);
falcon_write(efx, &temp, FATAL_INTR_REG_KER);
if (EFX_WORKAROUND_7244(efx)) {
falcon_read(efx, &temp, RX_FILTER_CTL_REG);
EFX_SET_OWORD_FIELD(temp, UDP_FULL_SRCH_LIMIT, 8);
EFX_SET_OWORD_FIELD(temp, UDP_WILD_SRCH_LIMIT, 8);
EFX_SET_OWORD_FIELD(temp, TCP_FULL_SRCH_LIMIT, 8);
EFX_SET_OWORD_FIELD(temp, TCP_WILD_SRCH_LIMIT, 8);
falcon_write(efx, &temp, RX_FILTER_CTL_REG);
}
falcon_setup_rss_indir_table(efx);
/* Setup RX. Wait for descriptor is broken and must
* be disabled. RXDP recovery shouldn't be needed, but is.
*/
falcon_read(efx, &temp, RX_SELF_RST_REG_KER);
EFX_SET_OWORD_FIELD(temp, RX_NODESC_WAIT_DIS, 1);
EFX_SET_OWORD_FIELD(temp, RX_RECOVERY_EN, 1);
if (EFX_WORKAROUND_5583(efx))
EFX_SET_OWORD_FIELD(temp, RX_ISCSI_DIS, 1);
falcon_write(efx, &temp, RX_SELF_RST_REG_KER);
/* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
* controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
*/
falcon_read(efx, &temp, TX_CFG2_REG_KER);
EFX_SET_OWORD_FIELD(temp, TX_RX_SPACER, 0xfe);
EFX_SET_OWORD_FIELD(temp, TX_RX_SPACER_EN, 1);
EFX_SET_OWORD_FIELD(temp, TX_ONE_PKT_PER_Q, 1);
EFX_SET_OWORD_FIELD(temp, TX_CSR_PUSH_EN, 0);
EFX_SET_OWORD_FIELD(temp, TX_DIS_NON_IP_EV, 1);
/* Enable SW_EV to inherit in char driver - assume harmless here */
EFX_SET_OWORD_FIELD(temp, TX_SW_EV_EN, 1);
/* Prefetch threshold 2 => fetch when descriptor cache half empty */
EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2);
/* Squash TX of packets of 16 bytes or less */
if (falcon_rev(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx))
EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1);
falcon_write(efx, &temp, TX_CFG2_REG_KER);
/* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
* descriptors (which is bad).
*/
falcon_read(efx, &temp, TX_CFG_REG_KER);
EFX_SET_OWORD_FIELD(temp, TX_NO_EOP_DISC_EN, 0);
falcon_write(efx, &temp, TX_CFG_REG_KER);
/* RX config */
falcon_read(efx, &temp, RX_CFG_REG_KER);
EFX_SET_OWORD_FIELD_VER(efx, temp, RX_DESC_PUSH_EN, 0);
if (EFX_WORKAROUND_7575(efx))
EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE,
(3 * 4096) / 32);
if (falcon_rev(efx) >= FALCON_REV_B0)
EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1);
/* RX FIFO flow control thresholds */
thresh = ((rx_xon_thresh_bytes >= 0) ?
rx_xon_thresh_bytes : efx->type->rx_xon_thresh);
EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_MAC_TH, thresh / 256);
thresh = ((rx_xoff_thresh_bytes >= 0) ?
rx_xoff_thresh_bytes : efx->type->rx_xoff_thresh);
EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_MAC_TH, thresh / 256);
/* RX control FIFO thresholds [32 entries] */
EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_TX_TH, 20);
EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_TX_TH, 25);
falcon_write(efx, &temp, RX_CFG_REG_KER);
/* Set destination of both TX and RX Flush events */
if (falcon_rev(efx) >= FALCON_REV_B0) {
EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0);
falcon_write(efx, &temp, DP_CTRL_REG);
}
return 0;
}
void falcon_remove_nic(struct efx_nic *efx)
{
struct falcon_nic_data *nic_data = efx->nic_data;
int rc;
/* Remove I2C adapter and clear it in preparation for a retry */
rc = i2c_del_adapter(&efx->i2c_adap);
BUG_ON(rc);
memset(&efx->i2c_adap, 0, sizeof(efx->i2c_adap));
falcon_remove_spi_devices(efx);
falcon_free_buffer(efx, &efx->irq_status);
falcon_reset_hw(efx, RESET_TYPE_ALL);
/* Release the second function after the reset */
if (nic_data->pci_dev2) {
pci_dev_put(nic_data->pci_dev2);
nic_data->pci_dev2 = NULL;
}
/* Tear down the private nic state */
kfree(efx->nic_data);
efx->nic_data = NULL;
}
void falcon_update_nic_stats(struct efx_nic *efx)
{
efx_oword_t cnt;
falcon_read(efx, &cnt, RX_NODESC_DROP_REG_KER);
efx->n_rx_nodesc_drop_cnt += EFX_OWORD_FIELD(cnt, RX_NODESC_DROP_CNT);
}
/**************************************************************************
*
* Revision-dependent attributes used by efx.c
*
**************************************************************************
*/
struct efx_nic_type falcon_a_nic_type = {
.mem_bar = 2,
.mem_map_size = 0x20000,
.txd_ptr_tbl_base = TX_DESC_PTR_TBL_KER_A1,
.rxd_ptr_tbl_base = RX_DESC_PTR_TBL_KER_A1,
.buf_tbl_base = BUF_TBL_KER_A1,
.evq_ptr_tbl_base = EVQ_PTR_TBL_KER_A1,
.evq_rptr_tbl_base = EVQ_RPTR_REG_KER_A1,
.txd_ring_mask = FALCON_TXD_RING_MASK,
.rxd_ring_mask = FALCON_RXD_RING_MASK,
.evq_size = FALCON_EVQ_SIZE,
.max_dma_mask = FALCON_DMA_MASK,
.tx_dma_mask = FALCON_TX_DMA_MASK,
.bug5391_mask = 0xf,
.rx_xoff_thresh = 2048,
.rx_xon_thresh = 512,
.rx_buffer_padding = 0x24,
.max_interrupt_mode = EFX_INT_MODE_MSI,
.phys_addr_channels = 4,
};
struct efx_nic_type falcon_b_nic_type = {
.mem_bar = 2,
/* Map everything up to and including the RSS indirection
* table. Don't map MSI-X table, MSI-X PBA since Linux
* requires that they not be mapped. */
.mem_map_size = RX_RSS_INDIR_TBL_B0 + 0x800,
.txd_ptr_tbl_base = TX_DESC_PTR_TBL_KER_B0,
.rxd_ptr_tbl_base = RX_DESC_PTR_TBL_KER_B0,
.buf_tbl_base = BUF_TBL_KER_B0,
.evq_ptr_tbl_base = EVQ_PTR_TBL_KER_B0,
.evq_rptr_tbl_base = EVQ_RPTR_REG_KER_B0,
.txd_ring_mask = FALCON_TXD_RING_MASK,
.rxd_ring_mask = FALCON_RXD_RING_MASK,
.evq_size = FALCON_EVQ_SIZE,
.max_dma_mask = FALCON_DMA_MASK,
.tx_dma_mask = FALCON_TX_DMA_MASK,
.bug5391_mask = 0,
.rx_xoff_thresh = 54272, /* ~80Kb - 3*max MTU */
.rx_xon_thresh = 27648, /* ~3*max MTU */
.rx_buffer_padding = 0,
.max_interrupt_mode = EFX_INT_MODE_MSIX,
.phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
* interrupt handler only supports 32
* channels */
};
| gpl-2.0 |
Oceandragon/engine | code/opus-1.1/silk/table_LSF_cos.c | 481 | 4191 | /***********************************************************************
Copyright (c) 2006-2011, Skype Limited. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of Internet Society, IETF or IETF Trust, nor the
names of specific contributors, may be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
***********************************************************************/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "tables.h"
/* Cosine approximation table for LSF conversion */
/* Q12 values (even) */
const opus_int16 silk_LSFCosTab_FIX_Q12[ LSF_COS_TAB_SZ_FIX + 1 ] = {
8192, 8190, 8182, 8170,
8152, 8130, 8104, 8072,
8034, 7994, 7946, 7896,
7840, 7778, 7714, 7644,
7568, 7490, 7406, 7318,
7226, 7128, 7026, 6922,
6812, 6698, 6580, 6458,
6332, 6204, 6070, 5934,
5792, 5648, 5502, 5352,
5198, 5040, 4880, 4718,
4552, 4382, 4212, 4038,
3862, 3684, 3502, 3320,
3136, 2948, 2760, 2570,
2378, 2186, 1990, 1794,
1598, 1400, 1202, 1002,
802, 602, 402, 202,
0, -202, -402, -602,
-802, -1002, -1202, -1400,
-1598, -1794, -1990, -2186,
-2378, -2570, -2760, -2948,
-3136, -3320, -3502, -3684,
-3862, -4038, -4212, -4382,
-4552, -4718, -4880, -5040,
-5198, -5352, -5502, -5648,
-5792, -5934, -6070, -6204,
-6332, -6458, -6580, -6698,
-6812, -6922, -7026, -7128,
-7226, -7318, -7406, -7490,
-7568, -7644, -7714, -7778,
-7840, -7896, -7946, -7994,
-8034, -8072, -8104, -8130,
-8152, -8170, -8182, -8190,
-8192
};
| gpl-2.0 |
virtuous/kernel-vivow-gingerbread-v2 | drivers/mtd/nand/davinci_nand.c | 737 | 24339 | /*
* davinci_nand.c - NAND Flash Driver for DaVinci family chips
*
* Copyright © 2006 Texas Instruments.
*
* Port to 2.6.23 Copyright © 2008 by:
* Sander Huijsen <Shuijsen@optelecom-nkf.com>
* Troy Kisky <troy.kisky@boundarydevices.com>
* Dirk Behme <Dirk.Behme@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <linux/slab.h>
#include <mach/nand.h>
#include <asm/mach-types.h>
/*
* This is a device driver for the NAND flash controller found on the
* various DaVinci family chips. It handles up to four SoC chipselects,
* and some flavors of secondary chipselect (e.g. based on A12) as used
* with multichip packages.
*
* The 1-bit ECC hardware is supported, as well as the newer 4-bit ECC
* available on chips like the DM355 and OMAP-L137 and needed with the
* more error-prone MLC NAND chips.
*
* This driver assumes EM_WAIT connects all the NAND devices' RDY/nBUSY
* outputs in a "wire-AND" configuration, with no per-chip signals.
*/
struct davinci_nand_info {
struct mtd_info mtd;
struct nand_chip chip;
struct nand_ecclayout ecclayout;
struct device *dev;
struct clk *clk;
bool partitioned;
bool is_readmode;
void __iomem *base;
void __iomem *vaddr;
uint32_t ioaddr;
uint32_t current_cs;
uint32_t mask_chipsel;
uint32_t mask_ale;
uint32_t mask_cle;
uint32_t core_chipsel;
};
static DEFINE_SPINLOCK(davinci_nand_lock);
static bool ecc4_busy;
#define to_davinci_nand(m) container_of(m, struct davinci_nand_info, mtd)
static inline unsigned int davinci_nand_readl(struct davinci_nand_info *info,
int offset)
{
return __raw_readl(info->base + offset);
}
static inline void davinci_nand_writel(struct davinci_nand_info *info,
int offset, unsigned long value)
{
__raw_writel(value, info->base + offset);
}
/*----------------------------------------------------------------------*/
/*
* Access to hardware control lines: ALE, CLE, secondary chipselect.
*/
static void nand_davinci_hwcontrol(struct mtd_info *mtd, int cmd,
unsigned int ctrl)
{
struct davinci_nand_info *info = to_davinci_nand(mtd);
uint32_t addr = info->current_cs;
struct nand_chip *nand = mtd->priv;
/* Did the control lines change? */
if (ctrl & NAND_CTRL_CHANGE) {
if ((ctrl & NAND_CTRL_CLE) == NAND_CTRL_CLE)
addr |= info->mask_cle;
else if ((ctrl & NAND_CTRL_ALE) == NAND_CTRL_ALE)
addr |= info->mask_ale;
nand->IO_ADDR_W = (void __iomem __force *)addr;
}
if (cmd != NAND_CMD_NONE)
iowrite8(cmd, nand->IO_ADDR_W);
}
static void nand_davinci_select_chip(struct mtd_info *mtd, int chip)
{
struct davinci_nand_info *info = to_davinci_nand(mtd);
uint32_t addr = info->ioaddr;
/* maybe kick in a second chipselect */
if (chip > 0)
addr |= info->mask_chipsel;
info->current_cs = addr;
info->chip.IO_ADDR_W = (void __iomem __force *)addr;
info->chip.IO_ADDR_R = info->chip.IO_ADDR_W;
}
/*----------------------------------------------------------------------*/
/*
* 1-bit hardware ECC ... context maintained for each core chipselect
*/
static inline uint32_t nand_davinci_readecc_1bit(struct mtd_info *mtd)
{
struct davinci_nand_info *info = to_davinci_nand(mtd);
return davinci_nand_readl(info, NANDF1ECC_OFFSET
+ 4 * info->core_chipsel);
}
static void nand_davinci_hwctl_1bit(struct mtd_info *mtd, int mode)
{
struct davinci_nand_info *info;
uint32_t nandcfr;
unsigned long flags;
info = to_davinci_nand(mtd);
/* Reset ECC hardware */
nand_davinci_readecc_1bit(mtd);
spin_lock_irqsave(&davinci_nand_lock, flags);
/* Restart ECC hardware */
nandcfr = davinci_nand_readl(info, NANDFCR_OFFSET);
nandcfr |= BIT(8 + info->core_chipsel);
davinci_nand_writel(info, NANDFCR_OFFSET, nandcfr);
spin_unlock_irqrestore(&davinci_nand_lock, flags);
}
/*
* Read hardware ECC value and pack into three bytes
*/
static int nand_davinci_calculate_1bit(struct mtd_info *mtd,
const u_char *dat, u_char *ecc_code)
{
unsigned int ecc_val = nand_davinci_readecc_1bit(mtd);
unsigned int ecc24 = (ecc_val & 0x0fff) | ((ecc_val & 0x0fff0000) >> 4);
/* invert so that erased block ecc is correct */
ecc24 = ~ecc24;
ecc_code[0] = (u_char)(ecc24);
ecc_code[1] = (u_char)(ecc24 >> 8);
ecc_code[2] = (u_char)(ecc24 >> 16);
return 0;
}
static int nand_davinci_correct_1bit(struct mtd_info *mtd, u_char *dat,
u_char *read_ecc, u_char *calc_ecc)
{
struct nand_chip *chip = mtd->priv;
uint32_t eccNand = read_ecc[0] | (read_ecc[1] << 8) |
(read_ecc[2] << 16);
uint32_t eccCalc = calc_ecc[0] | (calc_ecc[1] << 8) |
(calc_ecc[2] << 16);
uint32_t diff = eccCalc ^ eccNand;
if (diff) {
if ((((diff >> 12) ^ diff) & 0xfff) == 0xfff) {
/* Correctable error */
if ((diff >> (12 + 3)) < chip->ecc.size) {
dat[diff >> (12 + 3)] ^= BIT((diff >> 12) & 7);
return 1;
} else {
return -1;
}
} else if (!(diff & (diff - 1))) {
/* Single bit ECC error in the ECC itself,
* nothing to fix */
return 1;
} else {
/* Uncorrectable error */
return -1;
}
}
return 0;
}
/*----------------------------------------------------------------------*/
/*
* 4-bit hardware ECC ... context maintained over entire AEMIF
*
* This is a syndrome engine, but we avoid NAND_ECC_HW_SYNDROME
* since that forces use of a problematic "infix OOB" layout.
* Among other things, it trashes manufacturer bad block markers.
* Also, and specific to this hardware, it ECC-protects the "prepad"
* in the OOB ... while having ECC protection for parts of OOB would
* seem useful, the current MTD stack sometimes wants to update the
* OOB without recomputing ECC.
*/
static void nand_davinci_hwctl_4bit(struct mtd_info *mtd, int mode)
{
struct davinci_nand_info *info = to_davinci_nand(mtd);
unsigned long flags;
u32 val;
spin_lock_irqsave(&davinci_nand_lock, flags);
/* Start 4-bit ECC calculation for read/write */
val = davinci_nand_readl(info, NANDFCR_OFFSET);
val &= ~(0x03 << 4);
val |= (info->core_chipsel << 4) | BIT(12);
davinci_nand_writel(info, NANDFCR_OFFSET, val);
info->is_readmode = (mode == NAND_ECC_READ);
spin_unlock_irqrestore(&davinci_nand_lock, flags);
}
/* Read raw ECC code after writing to NAND. */
static void
nand_davinci_readecc_4bit(struct davinci_nand_info *info, u32 code[4])
{
const u32 mask = 0x03ff03ff;
code[0] = davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET) & mask;
code[1] = davinci_nand_readl(info, NAND_4BIT_ECC2_OFFSET) & mask;
code[2] = davinci_nand_readl(info, NAND_4BIT_ECC3_OFFSET) & mask;
code[3] = davinci_nand_readl(info, NAND_4BIT_ECC4_OFFSET) & mask;
}
/* Terminate read ECC; or return ECC (as bytes) of data written to NAND. */
static int nand_davinci_calculate_4bit(struct mtd_info *mtd,
const u_char *dat, u_char *ecc_code)
{
struct davinci_nand_info *info = to_davinci_nand(mtd);
u32 raw_ecc[4], *p;
unsigned i;
/* After a read, terminate ECC calculation by a dummy read
* of some 4-bit ECC register. ECC covers everything that
* was read; correct() just uses the hardware state, so
* ecc_code is not needed.
*/
if (info->is_readmode) {
davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
return 0;
}
/* Pack eight raw 10-bit ecc values into ten bytes, making
* two passes which each convert four values (in upper and
* lower halves of two 32-bit words) into five bytes. The
* ROM boot loader uses this same packing scheme.
*/
nand_davinci_readecc_4bit(info, raw_ecc);
for (i = 0, p = raw_ecc; i < 2; i++, p += 2) {
*ecc_code++ = p[0] & 0xff;
*ecc_code++ = ((p[0] >> 8) & 0x03) | ((p[0] >> 14) & 0xfc);
*ecc_code++ = ((p[0] >> 22) & 0x0f) | ((p[1] << 4) & 0xf0);
*ecc_code++ = ((p[1] >> 4) & 0x3f) | ((p[1] >> 10) & 0xc0);
*ecc_code++ = (p[1] >> 18) & 0xff;
}
return 0;
}
/* Correct up to 4 bits in data we just read, using state left in the
* hardware plus the ecc_code computed when it was first written.
*/
static int nand_davinci_correct_4bit(struct mtd_info *mtd,
u_char *data, u_char *ecc_code, u_char *null)
{
int i;
struct davinci_nand_info *info = to_davinci_nand(mtd);
unsigned short ecc10[8];
unsigned short *ecc16;
u32 syndrome[4];
unsigned num_errors, corrected;
/* All bytes 0xff? It's an erased page; ignore its ECC. */
for (i = 0; i < 10; i++) {
if (ecc_code[i] != 0xff)
goto compare;
}
return 0;
compare:
/* Unpack ten bytes into eight 10 bit values. We know we're
* little-endian, and use type punning for less shifting/masking.
*/
if (WARN_ON(0x01 & (unsigned) ecc_code))
return -EINVAL;
ecc16 = (unsigned short *)ecc_code;
ecc10[0] = (ecc16[0] >> 0) & 0x3ff;
ecc10[1] = ((ecc16[0] >> 10) & 0x3f) | ((ecc16[1] << 6) & 0x3c0);
ecc10[2] = (ecc16[1] >> 4) & 0x3ff;
ecc10[3] = ((ecc16[1] >> 14) & 0x3) | ((ecc16[2] << 2) & 0x3fc);
ecc10[4] = (ecc16[2] >> 8) | ((ecc16[3] << 8) & 0x300);
ecc10[5] = (ecc16[3] >> 2) & 0x3ff;
ecc10[6] = ((ecc16[3] >> 12) & 0xf) | ((ecc16[4] << 4) & 0x3f0);
ecc10[7] = (ecc16[4] >> 6) & 0x3ff;
/* Tell ECC controller about the expected ECC codes. */
for (i = 7; i >= 0; i--)
davinci_nand_writel(info, NAND_4BIT_ECC_LOAD_OFFSET, ecc10[i]);
/* Allow time for syndrome calculation ... then read it.
* A syndrome of all zeroes 0 means no detected errors.
*/
davinci_nand_readl(info, NANDFSR_OFFSET);
nand_davinci_readecc_4bit(info, syndrome);
if (!(syndrome[0] | syndrome[1] | syndrome[2] | syndrome[3]))
return 0;
/*
* Clear any previous address calculation by doing a dummy read of an
* error address register.
*/
davinci_nand_readl(info, NAND_ERR_ADD1_OFFSET);
/* Start address calculation, and wait for it to complete.
* We _could_ start reading more data while this is working,
* to speed up the overall page read.
*/
davinci_nand_writel(info, NANDFCR_OFFSET,
davinci_nand_readl(info, NANDFCR_OFFSET) | BIT(13));
for (;;) {
u32 fsr = davinci_nand_readl(info, NANDFSR_OFFSET);
switch ((fsr >> 8) & 0x0f) {
case 0: /* no error, should not happen */
davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
return 0;
case 1: /* five or more errors detected */
davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
return -EIO;
case 2: /* error addresses computed */
case 3:
num_errors = 1 + ((fsr >> 16) & 0x03);
goto correct;
default: /* still working on it */
cpu_relax();
continue;
}
}
correct:
/* correct each error */
for (i = 0, corrected = 0; i < num_errors; i++) {
int error_address, error_value;
if (i > 1) {
error_address = davinci_nand_readl(info,
NAND_ERR_ADD2_OFFSET);
error_value = davinci_nand_readl(info,
NAND_ERR_ERRVAL2_OFFSET);
} else {
error_address = davinci_nand_readl(info,
NAND_ERR_ADD1_OFFSET);
error_value = davinci_nand_readl(info,
NAND_ERR_ERRVAL1_OFFSET);
}
if (i & 1) {
error_address >>= 16;
error_value >>= 16;
}
error_address &= 0x3ff;
error_address = (512 + 7) - error_address;
if (error_address < 512) {
data[error_address] ^= error_value;
corrected++;
}
}
return corrected;
}
/*----------------------------------------------------------------------*/
/*
* NOTE: NAND boot requires ALE == EM_A[1], CLE == EM_A[2], so that's
* how these chips are normally wired. This translates to both 8 and 16
* bit busses using ALE == BIT(3) in byte addresses, and CLE == BIT(4).
*
* For now we assume that configuration, or any other one which ignores
* the two LSBs for NAND access ... so we can issue 32-bit reads/writes
* and have that transparently morphed into multiple NAND operations.
*/
static void nand_davinci_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
{
struct nand_chip *chip = mtd->priv;
if ((0x03 & ((unsigned)buf)) == 0 && (0x03 & len) == 0)
ioread32_rep(chip->IO_ADDR_R, buf, len >> 2);
else if ((0x01 & ((unsigned)buf)) == 0 && (0x01 & len) == 0)
ioread16_rep(chip->IO_ADDR_R, buf, len >> 1);
else
ioread8_rep(chip->IO_ADDR_R, buf, len);
}
static void nand_davinci_write_buf(struct mtd_info *mtd,
const uint8_t *buf, int len)
{
struct nand_chip *chip = mtd->priv;
if ((0x03 & ((unsigned)buf)) == 0 && (0x03 & len) == 0)
iowrite32_rep(chip->IO_ADDR_R, buf, len >> 2);
else if ((0x01 & ((unsigned)buf)) == 0 && (0x01 & len) == 0)
iowrite16_rep(chip->IO_ADDR_R, buf, len >> 1);
else
iowrite8_rep(chip->IO_ADDR_R, buf, len);
}
/*
* Check hardware register for wait status. Returns 1 if device is ready,
* 0 if it is still busy.
*/
static int nand_davinci_dev_ready(struct mtd_info *mtd)
{
struct davinci_nand_info *info = to_davinci_nand(mtd);
return davinci_nand_readl(info, NANDFSR_OFFSET) & BIT(0);
}
static void __init nand_dm6446evm_flash_init(struct davinci_nand_info *info)
{
uint32_t regval, a1cr;
/*
* NAND FLASH timings @ PLL1 == 459 MHz
* - AEMIF.CLK freq = PLL1/6 = 459/6 = 76.5 MHz
* - AEMIF.CLK period = 1/76.5 MHz = 13.1 ns
*/
regval = 0
| (0 << 31) /* selectStrobe */
| (0 << 30) /* extWait (never with NAND) */
| (1 << 26) /* writeSetup 10 ns */
| (3 << 20) /* writeStrobe 40 ns */
| (1 << 17) /* writeHold 10 ns */
| (0 << 13) /* readSetup 10 ns */
| (3 << 7) /* readStrobe 60 ns */
| (0 << 4) /* readHold 10 ns */
| (3 << 2) /* turnAround ?? ns */
| (0 << 0) /* asyncSize 8-bit bus */
;
a1cr = davinci_nand_readl(info, A1CR_OFFSET);
if (a1cr != regval) {
dev_dbg(info->dev, "Warning: NAND config: Set A1CR " \
"reg to 0x%08x, was 0x%08x, should be done by " \
"bootloader.\n", regval, a1cr);
davinci_nand_writel(info, A1CR_OFFSET, regval);
}
}
/*----------------------------------------------------------------------*/
/* An ECC layout for using 4-bit ECC with small-page flash, storing
* ten ECC bytes plus the manufacturer's bad block marker byte, and
* and not overlapping the default BBT markers.
*/
static struct nand_ecclayout hwecc4_small __initconst = {
.eccbytes = 10,
.eccpos = { 0, 1, 2, 3, 4,
/* offset 5 holds the badblock marker */
6, 7,
13, 14, 15, },
.oobfree = {
{.offset = 8, .length = 5, },
{.offset = 16, },
},
};
/* An ECC layout for using 4-bit ECC with large-page (2048bytes) flash,
* storing ten ECC bytes plus the manufacturer's bad block marker byte,
* and not overlapping the default BBT markers.
*/
static struct nand_ecclayout hwecc4_2048 __initconst = {
.eccbytes = 40,
.eccpos = {
/* at the end of spare sector */
24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
},
.oobfree = {
/* 2 bytes at offset 0 hold manufacturer badblock markers */
{.offset = 2, .length = 22, },
/* 5 bytes at offset 8 hold BBT markers */
/* 8 bytes at offset 16 hold JFFS2 clean markers */
},
};
static int __init nand_davinci_probe(struct platform_device *pdev)
{
struct davinci_nand_pdata *pdata = pdev->dev.platform_data;
struct davinci_nand_info *info;
struct resource *res1;
struct resource *res2;
void __iomem *vaddr;
void __iomem *base;
int ret;
uint32_t val;
nand_ecc_modes_t ecc_mode;
/* insist on board-specific configuration */
if (!pdata)
return -ENODEV;
/* which external chipselect will we be managing? */
if (pdev->id < 0 || pdev->id > 3)
return -ENODEV;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
dev_err(&pdev->dev, "unable to allocate memory\n");
ret = -ENOMEM;
goto err_nomem;
}
platform_set_drvdata(pdev, info);
res1 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
res2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!res1 || !res2) {
dev_err(&pdev->dev, "resource missing\n");
ret = -EINVAL;
goto err_nomem;
}
vaddr = ioremap(res1->start, resource_size(res1));
base = ioremap(res2->start, resource_size(res2));
if (!vaddr || !base) {
dev_err(&pdev->dev, "ioremap failed\n");
ret = -EINVAL;
goto err_ioremap;
}
info->dev = &pdev->dev;
info->base = base;
info->vaddr = vaddr;
info->mtd.priv = &info->chip;
info->mtd.name = dev_name(&pdev->dev);
info->mtd.owner = THIS_MODULE;
info->mtd.dev.parent = &pdev->dev;
info->chip.IO_ADDR_R = vaddr;
info->chip.IO_ADDR_W = vaddr;
info->chip.chip_delay = 0;
info->chip.select_chip = nand_davinci_select_chip;
/* options such as NAND_USE_FLASH_BBT or 16-bit widths */
info->chip.options = pdata->options;
info->chip.bbt_td = pdata->bbt_td;
info->chip.bbt_md = pdata->bbt_md;
info->ioaddr = (uint32_t __force) vaddr;
info->current_cs = info->ioaddr;
info->core_chipsel = pdev->id;
info->mask_chipsel = pdata->mask_chipsel;
/* use nandboot-capable ALE/CLE masks by default */
info->mask_ale = pdata->mask_ale ? : MASK_ALE;
info->mask_cle = pdata->mask_cle ? : MASK_CLE;
/* Set address of hardware control function */
info->chip.cmd_ctrl = nand_davinci_hwcontrol;
info->chip.dev_ready = nand_davinci_dev_ready;
/* Speed up buffer I/O */
info->chip.read_buf = nand_davinci_read_buf;
info->chip.write_buf = nand_davinci_write_buf;
/* Use board-specific ECC config */
ecc_mode = pdata->ecc_mode;
ret = -EINVAL;
switch (ecc_mode) {
case NAND_ECC_NONE:
case NAND_ECC_SOFT:
pdata->ecc_bits = 0;
break;
case NAND_ECC_HW:
if (pdata->ecc_bits == 4) {
/* No sanity checks: CPUs must support this,
* and the chips may not use NAND_BUSWIDTH_16.
*/
/* No sharing 4-bit hardware between chipselects yet */
spin_lock_irq(&davinci_nand_lock);
if (ecc4_busy)
ret = -EBUSY;
else
ecc4_busy = true;
spin_unlock_irq(&davinci_nand_lock);
if (ret == -EBUSY)
goto err_ecc;
info->chip.ecc.calculate = nand_davinci_calculate_4bit;
info->chip.ecc.correct = nand_davinci_correct_4bit;
info->chip.ecc.hwctl = nand_davinci_hwctl_4bit;
info->chip.ecc.bytes = 10;
} else {
info->chip.ecc.calculate = nand_davinci_calculate_1bit;
info->chip.ecc.correct = nand_davinci_correct_1bit;
info->chip.ecc.hwctl = nand_davinci_hwctl_1bit;
info->chip.ecc.bytes = 3;
}
info->chip.ecc.size = 512;
break;
default:
ret = -EINVAL;
goto err_ecc;
}
info->chip.ecc.mode = ecc_mode;
info->clk = clk_get(&pdev->dev, "aemif");
if (IS_ERR(info->clk)) {
ret = PTR_ERR(info->clk);
dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret);
goto err_clk;
}
ret = clk_enable(info->clk);
if (ret < 0) {
dev_dbg(&pdev->dev, "unable to enable AEMIF clock, err %d\n",
ret);
goto err_clk_enable;
}
/* EMIF timings should normally be set by the boot loader,
* especially after boot-from-NAND. The *only* reason to
* have this special casing for the DM6446 EVM is to work
* with boot-from-NOR ... with CS0 manually re-jumpered
* (after startup) so it addresses the NAND flash, not NOR.
* Even for dev boards, that's unusually rude...
*/
if (machine_is_davinci_evm())
nand_dm6446evm_flash_init(info);
spin_lock_irq(&davinci_nand_lock);
/* put CSxNAND into NAND mode */
val = davinci_nand_readl(info, NANDFCR_OFFSET);
val |= BIT(info->core_chipsel);
davinci_nand_writel(info, NANDFCR_OFFSET, val);
spin_unlock_irq(&davinci_nand_lock);
/* Scan to find existence of the device(s) */
ret = nand_scan_ident(&info->mtd, pdata->mask_chipsel ? 2 : 1, NULL);
if (ret < 0) {
dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
goto err_scan;
}
/* Update ECC layout if needed ... for 1-bit HW ECC, the default
* is OK, but it allocates 6 bytes when only 3 are needed (for
* each 512 bytes). For the 4-bit HW ECC, that default is not
* usable: 10 bytes are needed, not 6.
*/
if (pdata->ecc_bits == 4) {
int chunks = info->mtd.writesize / 512;
if (!chunks || info->mtd.oobsize < 16) {
dev_dbg(&pdev->dev, "too small\n");
ret = -EINVAL;
goto err_scan;
}
/* For small page chips, preserve the manufacturer's
* badblock marking data ... and make sure a flash BBT
* table marker fits in the free bytes.
*/
if (chunks == 1) {
info->ecclayout = hwecc4_small;
info->ecclayout.oobfree[1].length =
info->mtd.oobsize - 16;
goto syndrome_done;
}
if (chunks == 4) {
info->ecclayout = hwecc4_2048;
info->chip.ecc.mode = NAND_ECC_HW_OOB_FIRST;
goto syndrome_done;
}
/* 4KiB page chips are not yet supported. The eccpos from
* nand_ecclayout cannot hold 80 bytes and change to eccpos[]
* breaks userspace ioctl interface with mtd-utils. Once we
* resolve this issue, NAND_ECC_HW_OOB_FIRST mode can be used
* for the 4KiB page chips.
*/
dev_warn(&pdev->dev, "no 4-bit ECC support yet "
"for 4KiB-page NAND\n");
ret = -EIO;
goto err_scan;
syndrome_done:
info->chip.ecc.layout = &info->ecclayout;
}
ret = nand_scan_tail(&info->mtd);
if (ret < 0)
goto err_scan;
if (mtd_has_partitions()) {
struct mtd_partition *mtd_parts = NULL;
int mtd_parts_nb = 0;
if (mtd_has_cmdlinepart()) {
static const char *probes[] __initconst =
{ "cmdlinepart", NULL };
mtd_parts_nb = parse_mtd_partitions(&info->mtd, probes,
&mtd_parts, 0);
}
if (mtd_parts_nb <= 0) {
mtd_parts = pdata->parts;
mtd_parts_nb = pdata->nr_parts;
}
/* Register any partitions */
if (mtd_parts_nb > 0) {
ret = add_mtd_partitions(&info->mtd,
mtd_parts, mtd_parts_nb);
if (ret == 0)
info->partitioned = true;
}
} else if (pdata->nr_parts) {
dev_warn(&pdev->dev, "ignoring %d default partitions on %s\n",
pdata->nr_parts, info->mtd.name);
}
/* If there's no partition info, just package the whole chip
* as a single MTD device.
*/
if (!info->partitioned)
ret = add_mtd_device(&info->mtd) ? -ENODEV : 0;
if (ret < 0)
goto err_scan;
val = davinci_nand_readl(info, NRCSR_OFFSET);
dev_info(&pdev->dev, "controller rev. %d.%d\n",
(val >> 8) & 0xff, val & 0xff);
return 0;
err_scan:
clk_disable(info->clk);
err_clk_enable:
clk_put(info->clk);
spin_lock_irq(&davinci_nand_lock);
if (ecc_mode == NAND_ECC_HW_SYNDROME)
ecc4_busy = false;
spin_unlock_irq(&davinci_nand_lock);
err_ecc:
err_clk:
err_ioremap:
if (base)
iounmap(base);
if (vaddr)
iounmap(vaddr);
err_nomem:
kfree(info);
return ret;
}
static int __exit nand_davinci_remove(struct platform_device *pdev)
{
struct davinci_nand_info *info = platform_get_drvdata(pdev);
int status;
if (mtd_has_partitions() && info->partitioned)
status = del_mtd_partitions(&info->mtd);
else
status = del_mtd_device(&info->mtd);
spin_lock_irq(&davinci_nand_lock);
if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
ecc4_busy = false;
spin_unlock_irq(&davinci_nand_lock);
iounmap(info->base);
iounmap(info->vaddr);
nand_release(&info->mtd);
clk_disable(info->clk);
clk_put(info->clk);
kfree(info);
return 0;
}
static struct platform_driver nand_davinci_driver = {
.remove = __exit_p(nand_davinci_remove),
.driver = {
.name = "davinci_nand",
},
};
MODULE_ALIAS("platform:davinci_nand");
static int __init nand_davinci_init(void)
{
return platform_driver_probe(&nand_davinci_driver, nand_davinci_probe);
}
module_init(nand_davinci_init);
static void __exit nand_davinci_exit(void)
{
platform_driver_unregister(&nand_davinci_driver);
}
module_exit(nand_davinci_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Texas Instruments");
MODULE_DESCRIPTION("Davinci NAND flash driver");
| gpl-2.0 |
LeeDroid-/Pyramid-2.6.35-MR | crypto/testmgr.c | 737 | 55042 | /*
* Algorithm testing framework and tests.
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
* Copyright (c) 2002 Jean-Francois Dive <jef@linuxbe.org>
* Copyright (c) 2007 Nokia Siemens Networks
* Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/hash.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <crypto/rng.h>
#include "internal.h"
#ifndef CONFIG_CRYPTO_MANAGER_TESTS
/* a perfect nop */
int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
{
return 0;
}
#else
#include "testmgr.h"
/*
* Need slab memory for testing (size in number of pages).
*/
#define XBUFSIZE 8
/*
* Indexes into the xbuf to simulate cross-page access.
*/
#define IDX1 32
#define IDX2 32400
#define IDX3 1
#define IDX4 8193
#define IDX5 22222
#define IDX6 17101
#define IDX7 27333
#define IDX8 3000
/*
* Used by test_cipher()
*/
#define ENCRYPT 1
#define DECRYPT 0
struct tcrypt_result {
struct completion completion;
int err;
};
struct aead_test_suite {
struct {
struct aead_testvec *vecs;
unsigned int count;
} enc, dec;
};
struct cipher_test_suite {
struct {
struct cipher_testvec *vecs;
unsigned int count;
} enc, dec;
};
struct comp_test_suite {
struct {
struct comp_testvec *vecs;
unsigned int count;
} comp, decomp;
};
struct pcomp_test_suite {
struct {
struct pcomp_testvec *vecs;
unsigned int count;
} comp, decomp;
};
struct hash_test_suite {
struct hash_testvec *vecs;
unsigned int count;
};
struct cprng_test_suite {
struct cprng_testvec *vecs;
unsigned int count;
};
struct alg_test_desc {
const char *alg;
int (*test)(const struct alg_test_desc *desc, const char *driver,
u32 type, u32 mask);
int fips_allowed; /* set if alg is allowed in fips mode */
union {
struct aead_test_suite aead;
struct cipher_test_suite cipher;
struct comp_test_suite comp;
struct pcomp_test_suite pcomp;
struct hash_test_suite hash;
struct cprng_test_suite cprng;
} suite;
};
static unsigned int IDX[8] = { IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 };
static void hexdump(unsigned char *buf, unsigned int len)
{
print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
16, 1,
buf, len, false);
}
static void tcrypt_complete(struct crypto_async_request *req, int err)
{
struct tcrypt_result *res = req->data;
if (err == -EINPROGRESS)
return;
res->err = err;
complete(&res->completion);
}
static int testmgr_alloc_buf(char *buf[XBUFSIZE])
{
int i;
for (i = 0; i < XBUFSIZE; i++) {
buf[i] = (void *)__get_free_page(GFP_KERNEL);
if (!buf[i])
goto err_free_buf;
}
return 0;
err_free_buf:
while (i-- > 0)
free_page((unsigned long)buf[i]);
return -ENOMEM;
}
static void testmgr_free_buf(char *buf[XBUFSIZE])
{
int i;
for (i = 0; i < XBUFSIZE; i++)
free_page((unsigned long)buf[i]);
}
static int do_one_async_hash_op(struct ahash_request *req,
struct tcrypt_result *tr,
int ret)
{
if (ret == -EINPROGRESS || ret == -EBUSY) {
ret = wait_for_completion_interruptible(&tr->completion);
if (!ret)
ret = tr->err;
INIT_COMPLETION(tr->completion);
}
return ret;
}
static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
unsigned int tcount, bool use_digest)
{
const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
unsigned int i, j, k, temp;
struct scatterlist sg[8];
char result[64];
struct ahash_request *req;
struct tcrypt_result tresult;
void *hash_buff;
char *xbuf[XBUFSIZE];
int ret = -ENOMEM;
if (testmgr_alloc_buf(xbuf))
goto out_nobuf;
init_completion(&tresult.completion);
req = ahash_request_alloc(tfm, GFP_KERNEL);
if (!req) {
printk(KERN_ERR "alg: hash: Failed to allocate request for "
"%s\n", algo);
goto out_noreq;
}
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
tcrypt_complete, &tresult);
j = 0;
for (i = 0; i < tcount; i++) {
if (template[i].np)
continue;
j++;
memset(result, 0, 64);
hash_buff = xbuf[0];
memcpy(hash_buff, template[i].plaintext, template[i].psize);
sg_init_one(&sg[0], hash_buff, template[i].psize);
if (template[i].ksize) {
crypto_ahash_clear_flags(tfm, ~0);
ret = crypto_ahash_setkey(tfm, template[i].key,
template[i].ksize);
if (ret) {
printk(KERN_ERR "alg: hash: setkey failed on "
"test %d for %s: ret=%d\n", j, algo,
-ret);
goto out;
}
}
ahash_request_set_crypt(req, sg, result, template[i].psize);
if (use_digest) {
ret = do_one_async_hash_op(req, &tresult,
crypto_ahash_digest(req));
if (ret) {
pr_err("alg: hash: digest failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
} else {
ret = do_one_async_hash_op(req, &tresult,
crypto_ahash_init(req));
if (ret) {
pr_err("alt: hash: init failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
ret = do_one_async_hash_op(req, &tresult,
crypto_ahash_update(req));
if (ret) {
pr_err("alt: hash: update failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
ret = do_one_async_hash_op(req, &tresult,
crypto_ahash_final(req));
if (ret) {
pr_err("alt: hash: final failed on test %d "
"for %s: ret=%d\n", j, algo, -ret);
goto out;
}
}
if (memcmp(result, template[i].digest,
crypto_ahash_digestsize(tfm))) {
printk(KERN_ERR "alg: hash: Test %d failed for %s\n",
j, algo);
hexdump(result, crypto_ahash_digestsize(tfm));
ret = -EINVAL;
goto out;
}
}
j = 0;
for (i = 0; i < tcount; i++) {
if (template[i].np) {
j++;
memset(result, 0, 64);
temp = 0;
sg_init_table(sg, template[i].np);
ret = -EINVAL;
for (k = 0; k < template[i].np; k++) {
if (WARN_ON(offset_in_page(IDX[k]) +
template[i].tap[k] > PAGE_SIZE))
goto out;
sg_set_buf(&sg[k],
memcpy(xbuf[IDX[k] >> PAGE_SHIFT] +
offset_in_page(IDX[k]),
template[i].plaintext + temp,
template[i].tap[k]),
template[i].tap[k]);
temp += template[i].tap[k];
}
if (template[i].ksize) {
crypto_ahash_clear_flags(tfm, ~0);
ret = crypto_ahash_setkey(tfm, template[i].key,
template[i].ksize);
if (ret) {
printk(KERN_ERR "alg: hash: setkey "
"failed on chunking test %d "
"for %s: ret=%d\n", j, algo,
-ret);
goto out;
}
}
ahash_request_set_crypt(req, sg, result,
template[i].psize);
ret = crypto_ahash_digest(req);
switch (ret) {
case 0:
break;
case -EINPROGRESS:
case -EBUSY:
ret = wait_for_completion_interruptible(
&tresult.completion);
if (!ret && !(ret = tresult.err)) {
INIT_COMPLETION(tresult.completion);
break;
}
/* fall through */
default:
printk(KERN_ERR "alg: hash: digest failed "
"on chunking test %d for %s: "
"ret=%d\n", j, algo, -ret);
goto out;
}
if (memcmp(result, template[i].digest,
crypto_ahash_digestsize(tfm))) {
printk(KERN_ERR "alg: hash: Chunking test %d "
"failed for %s\n", j, algo);
hexdump(result, crypto_ahash_digestsize(tfm));
ret = -EINVAL;
goto out;
}
}
}
ret = 0;
out:
ahash_request_free(req);
out_noreq:
testmgr_free_buf(xbuf);
out_nobuf:
return ret;
}
static int test_aead(struct crypto_aead *tfm, int enc,
struct aead_testvec *template, unsigned int tcount)
{
const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
unsigned int i, j, k, n, temp;
int ret = -ENOMEM;
char *q;
char *key;
struct aead_request *req;
struct scatterlist sg[8];
struct scatterlist asg[8];
const char *e;
struct tcrypt_result result;
unsigned int authsize;
void *input;
void *assoc;
char iv[MAX_IVLEN];
char *xbuf[XBUFSIZE];
char *axbuf[XBUFSIZE];
if (testmgr_alloc_buf(xbuf))
goto out_noxbuf;
if (testmgr_alloc_buf(axbuf))
goto out_noaxbuf;
if (enc == ENCRYPT)
e = "encryption";
else
e = "decryption";
init_completion(&result.completion);
req = aead_request_alloc(tfm, GFP_KERNEL);
if (!req) {
printk(KERN_ERR "alg: aead: Failed to allocate request for "
"%s\n", algo);
goto out;
}
aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
tcrypt_complete, &result);
for (i = 0, j = 0; i < tcount; i++) {
if (!template[i].np) {
j++;
/* some tepmplates have no input data but they will
* touch input
*/
input = xbuf[0];
assoc = axbuf[0];
ret = -EINVAL;
if (WARN_ON(template[i].ilen > PAGE_SIZE ||
template[i].alen > PAGE_SIZE))
goto out;
memcpy(input, template[i].input, template[i].ilen);
memcpy(assoc, template[i].assoc, template[i].alen);
if (template[i].iv)
memcpy(iv, template[i].iv, MAX_IVLEN);
else
memset(iv, 0, MAX_IVLEN);
crypto_aead_clear_flags(tfm, ~0);
if (template[i].wk)
crypto_aead_set_flags(
tfm, CRYPTO_TFM_REQ_WEAK_KEY);
key = template[i].key;
ret = crypto_aead_setkey(tfm, key,
template[i].klen);
if (!ret == template[i].fail) {
printk(KERN_ERR "alg: aead: setkey failed on "
"test %d for %s: flags=%x\n", j, algo,
crypto_aead_get_flags(tfm));
goto out;
} else if (ret)
continue;
authsize = abs(template[i].rlen - template[i].ilen);
ret = crypto_aead_setauthsize(tfm, authsize);
if (ret) {
printk(KERN_ERR "alg: aead: Failed to set "
"authsize to %u on test %d for %s\n",
authsize, j, algo);
goto out;
}
sg_init_one(&sg[0], input,
template[i].ilen + (enc ? authsize : 0));
sg_init_one(&asg[0], assoc, template[i].alen);
aead_request_set_crypt(req, sg, sg,
template[i].ilen, iv);
aead_request_set_assoc(req, asg, template[i].alen);
ret = enc ?
crypto_aead_encrypt(req) :
crypto_aead_decrypt(req);
switch (ret) {
case 0:
if (template[i].novrfy) {
/* verification was supposed to fail */
printk(KERN_ERR "alg: aead: %s failed "
"on test %d for %s: ret was 0, "
"expected -EBADMSG\n",
e, j, algo);
/* so really, we got a bad message */
ret = -EBADMSG;
goto out;
}
break;
case -EINPROGRESS:
case -EBUSY:
ret = wait_for_completion_interruptible(
&result.completion);
if (!ret && !(ret = result.err)) {
INIT_COMPLETION(result.completion);
break;
}
case -EBADMSG:
if (template[i].novrfy)
/* verification failure was expected */
continue;
/* fall through */
default:
printk(KERN_ERR "alg: aead: %s failed on test "
"%d for %s: ret=%d\n", e, j, algo, -ret);
goto out;
}
q = input;
if (memcmp(q, template[i].result, template[i].rlen)) {
printk(KERN_ERR "alg: aead: Test %d failed on "
"%s for %s\n", j, e, algo);
hexdump(q, template[i].rlen);
ret = -EINVAL;
goto out;
}
}
}
for (i = 0, j = 0; i < tcount; i++) {
if (template[i].np) {
j++;
if (template[i].iv)
memcpy(iv, template[i].iv, MAX_IVLEN);
else
memset(iv, 0, MAX_IVLEN);
crypto_aead_clear_flags(tfm, ~0);
if (template[i].wk)
crypto_aead_set_flags(
tfm, CRYPTO_TFM_REQ_WEAK_KEY);
key = template[i].key;
ret = crypto_aead_setkey(tfm, key, template[i].klen);
if (!ret == template[i].fail) {
printk(KERN_ERR "alg: aead: setkey failed on "
"chunk test %d for %s: flags=%x\n", j,
algo, crypto_aead_get_flags(tfm));
goto out;
} else if (ret)
continue;
authsize = abs(template[i].rlen - template[i].ilen);
ret = -EINVAL;
sg_init_table(sg, template[i].np);
for (k = 0, temp = 0; k < template[i].np; k++) {
if (WARN_ON(offset_in_page(IDX[k]) +
template[i].tap[k] > PAGE_SIZE))
goto out;
q = xbuf[IDX[k] >> PAGE_SHIFT] +
offset_in_page(IDX[k]);
memcpy(q, template[i].input + temp,
template[i].tap[k]);
n = template[i].tap[k];
if (k == template[i].np - 1 && enc)
n += authsize;
if (offset_in_page(q) + n < PAGE_SIZE)
q[n] = 0;
sg_set_buf(&sg[k], q, template[i].tap[k]);
temp += template[i].tap[k];
}
ret = crypto_aead_setauthsize(tfm, authsize);
if (ret) {
printk(KERN_ERR "alg: aead: Failed to set "
"authsize to %u on chunk test %d for "
"%s\n", authsize, j, algo);
goto out;
}
if (enc) {
if (WARN_ON(sg[k - 1].offset +
sg[k - 1].length + authsize >
PAGE_SIZE)) {
ret = -EINVAL;
goto out;
}
sg[k - 1].length += authsize;
}
sg_init_table(asg, template[i].anp);
ret = -EINVAL;
for (k = 0, temp = 0; k < template[i].anp; k++) {
if (WARN_ON(offset_in_page(IDX[k]) +
template[i].atap[k] > PAGE_SIZE))
goto out;
sg_set_buf(&asg[k],
memcpy(axbuf[IDX[k] >> PAGE_SHIFT] +
offset_in_page(IDX[k]),
template[i].assoc + temp,
template[i].atap[k]),
template[i].atap[k]);
temp += template[i].atap[k];
}
aead_request_set_crypt(req, sg, sg,
template[i].ilen,
iv);
aead_request_set_assoc(req, asg, template[i].alen);
ret = enc ?
crypto_aead_encrypt(req) :
crypto_aead_decrypt(req);
switch (ret) {
case 0:
if (template[i].novrfy) {
/* verification was supposed to fail */
printk(KERN_ERR "alg: aead: %s failed "
"on chunk test %d for %s: ret "
"was 0, expected -EBADMSG\n",
e, j, algo);
/* so really, we got a bad message */
ret = -EBADMSG;
goto out;
}
break;
case -EINPROGRESS:
case -EBUSY:
ret = wait_for_completion_interruptible(
&result.completion);
if (!ret && !(ret = result.err)) {
INIT_COMPLETION(result.completion);
break;
}
case -EBADMSG:
if (template[i].novrfy)
/* verification failure was expected */
continue;
/* fall through */
default:
printk(KERN_ERR "alg: aead: %s failed on "
"chunk test %d for %s: ret=%d\n", e, j,
algo, -ret);
goto out;
}
ret = -EINVAL;
for (k = 0, temp = 0; k < template[i].np; k++) {
q = xbuf[IDX[k] >> PAGE_SHIFT] +
offset_in_page(IDX[k]);
n = template[i].tap[k];
if (k == template[i].np - 1)
n += enc ? authsize : -authsize;
if (memcmp(q, template[i].result + temp, n)) {
printk(KERN_ERR "alg: aead: Chunk "
"test %d failed on %s at page "
"%u for %s\n", j, e, k, algo);
hexdump(q, n);
goto out;
}
q += n;
if (k == template[i].np - 1 && !enc) {
if (memcmp(q, template[i].input +
temp + n, authsize))
n = authsize;
else
n = 0;
} else {
for (n = 0; offset_in_page(q + n) &&
q[n]; n++)
;
}
if (n) {
printk(KERN_ERR "alg: aead: Result "
"buffer corruption in chunk "
"test %d on %s at page %u for "
"%s: %u bytes:\n", j, e, k,
algo, n);
hexdump(q, n);
goto out;
}
temp += template[i].tap[k];
}
}
}
ret = 0;
out:
aead_request_free(req);
testmgr_free_buf(axbuf);
out_noaxbuf:
testmgr_free_buf(xbuf);
out_noxbuf:
return ret;
}
static int test_cipher(struct crypto_cipher *tfm, int enc,
struct cipher_testvec *template, unsigned int tcount)
{
const char *algo = crypto_tfm_alg_driver_name(crypto_cipher_tfm(tfm));
unsigned int i, j, k;
char *q;
const char *e;
void *data;
char *xbuf[XBUFSIZE];
int ret = -ENOMEM;
if (testmgr_alloc_buf(xbuf))
goto out_nobuf;
if (enc == ENCRYPT)
e = "encryption";
else
e = "decryption";
j = 0;
for (i = 0; i < tcount; i++) {
if (template[i].np)
continue;
j++;
ret = -EINVAL;
if (WARN_ON(template[i].ilen > PAGE_SIZE))
goto out;
data = xbuf[0];
memcpy(data, template[i].input, template[i].ilen);
crypto_cipher_clear_flags(tfm, ~0);
if (template[i].wk)
crypto_cipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
ret = crypto_cipher_setkey(tfm, template[i].key,
template[i].klen);
if (!ret == template[i].fail) {
printk(KERN_ERR "alg: cipher: setkey failed "
"on test %d for %s: flags=%x\n", j,
algo, crypto_cipher_get_flags(tfm));
goto out;
} else if (ret)
continue;
for (k = 0; k < template[i].ilen;
k += crypto_cipher_blocksize(tfm)) {
if (enc)
crypto_cipher_encrypt_one(tfm, data + k,
data + k);
else
crypto_cipher_decrypt_one(tfm, data + k,
data + k);
}
q = data;
if (memcmp(q, template[i].result, template[i].rlen)) {
printk(KERN_ERR "alg: cipher: Test %d failed "
"on %s for %s\n", j, e, algo);
hexdump(q, template[i].rlen);
ret = -EINVAL;
goto out;
}
}
ret = 0;
out:
testmgr_free_buf(xbuf);
out_nobuf:
return ret;
}
static int test_skcipher(struct crypto_ablkcipher *tfm, int enc,
struct cipher_testvec *template, unsigned int tcount)
{
const char *algo =
crypto_tfm_alg_driver_name(crypto_ablkcipher_tfm(tfm));
unsigned int i, j, k, n, temp;
char *q;
struct ablkcipher_request *req;
struct scatterlist sg[8];
const char *e;
struct tcrypt_result result;
void *data;
char iv[MAX_IVLEN];
char *xbuf[XBUFSIZE];
int ret = -ENOMEM;
if (testmgr_alloc_buf(xbuf))
goto out_nobuf;
if (enc == ENCRYPT)
e = "encryption";
else
e = "decryption";
init_completion(&result.completion);
req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
if (!req) {
printk(KERN_ERR "alg: skcipher: Failed to allocate request "
"for %s\n", algo);
goto out;
}
ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
tcrypt_complete, &result);
j = 0;
for (i = 0; i < tcount; i++) {
if (template[i].iv)
memcpy(iv, template[i].iv, MAX_IVLEN);
else
memset(iv, 0, MAX_IVLEN);
if (!(template[i].np)) {
j++;
ret = -EINVAL;
if (WARN_ON(template[i].ilen > PAGE_SIZE))
goto out;
data = xbuf[0];
memcpy(data, template[i].input, template[i].ilen);
crypto_ablkcipher_clear_flags(tfm, ~0);
if (template[i].wk)
crypto_ablkcipher_set_flags(
tfm, CRYPTO_TFM_REQ_WEAK_KEY);
ret = crypto_ablkcipher_setkey(tfm, template[i].key,
template[i].klen);
if (!ret == template[i].fail) {
printk(KERN_ERR "alg: skcipher: setkey failed "
"on test %d for %s: flags=%x\n", j,
algo, crypto_ablkcipher_get_flags(tfm));
goto out;
} else if (ret)
continue;
sg_init_one(&sg[0], data, template[i].ilen);
ablkcipher_request_set_crypt(req, sg, sg,
template[i].ilen, iv);
ret = enc ?
crypto_ablkcipher_encrypt(req) :
crypto_ablkcipher_decrypt(req);
switch (ret) {
case 0:
break;
case -EINPROGRESS:
case -EBUSY:
ret = wait_for_completion_interruptible(
&result.completion);
if (!ret && !((ret = result.err))) {
INIT_COMPLETION(result.completion);
break;
}
/* fall through */
default:
printk(KERN_ERR "alg: skcipher: %s failed on "
"test %d for %s: ret=%d\n", e, j, algo,
-ret);
goto out;
}
q = data;
if (memcmp(q, template[i].result, template[i].rlen)) {
printk(KERN_ERR "alg: skcipher: Test %d "
"failed on %s for %s\n", j, e, algo);
hexdump(q, template[i].rlen);
ret = -EINVAL;
goto out;
}
}
}
j = 0;
for (i = 0; i < tcount; i++) {
if (template[i].iv)
memcpy(iv, template[i].iv, MAX_IVLEN);
else
memset(iv, 0, MAX_IVLEN);
if (template[i].np) {
j++;
crypto_ablkcipher_clear_flags(tfm, ~0);
if (template[i].wk)
crypto_ablkcipher_set_flags(
tfm, CRYPTO_TFM_REQ_WEAK_KEY);
ret = crypto_ablkcipher_setkey(tfm, template[i].key,
template[i].klen);
if (!ret == template[i].fail) {
printk(KERN_ERR "alg: skcipher: setkey failed "
"on chunk test %d for %s: flags=%x\n",
j, algo,
crypto_ablkcipher_get_flags(tfm));
goto out;
} else if (ret)
continue;
temp = 0;
ret = -EINVAL;
sg_init_table(sg, template[i].np);
for (k = 0; k < template[i].np; k++) {
if (WARN_ON(offset_in_page(IDX[k]) +
template[i].tap[k] > PAGE_SIZE))
goto out;
q = xbuf[IDX[k] >> PAGE_SHIFT] +
offset_in_page(IDX[k]);
memcpy(q, template[i].input + temp,
template[i].tap[k]);
if (offset_in_page(q) + template[i].tap[k] <
PAGE_SIZE)
q[template[i].tap[k]] = 0;
sg_set_buf(&sg[k], q, template[i].tap[k]);
temp += template[i].tap[k];
}
ablkcipher_request_set_crypt(req, sg, sg,
template[i].ilen, iv);
ret = enc ?
crypto_ablkcipher_encrypt(req) :
crypto_ablkcipher_decrypt(req);
switch (ret) {
case 0:
break;
case -EINPROGRESS:
case -EBUSY:
ret = wait_for_completion_interruptible(
&result.completion);
if (!ret && !((ret = result.err))) {
INIT_COMPLETION(result.completion);
break;
}
/* fall through */
default:
printk(KERN_ERR "alg: skcipher: %s failed on "
"chunk test %d for %s: ret=%d\n", e, j,
algo, -ret);
goto out;
}
temp = 0;
ret = -EINVAL;
for (k = 0; k < template[i].np; k++) {
q = xbuf[IDX[k] >> PAGE_SHIFT] +
offset_in_page(IDX[k]);
if (memcmp(q, template[i].result + temp,
template[i].tap[k])) {
printk(KERN_ERR "alg: skcipher: Chunk "
"test %d failed on %s at page "
"%u for %s\n", j, e, k, algo);
hexdump(q, template[i].tap[k]);
goto out;
}
q += template[i].tap[k];
for (n = 0; offset_in_page(q + n) && q[n]; n++)
;
if (n) {
printk(KERN_ERR "alg: skcipher: "
"Result buffer corruption in "
"chunk test %d on %s at page "
"%u for %s: %u bytes:\n", j, e,
k, algo, n);
hexdump(q, n);
goto out;
}
temp += template[i].tap[k];
}
}
}
ret = 0;
out:
ablkcipher_request_free(req);
testmgr_free_buf(xbuf);
out_nobuf:
return ret;
}
static int test_comp(struct crypto_comp *tfm, struct comp_testvec *ctemplate,
struct comp_testvec *dtemplate, int ctcount, int dtcount)
{
const char *algo = crypto_tfm_alg_driver_name(crypto_comp_tfm(tfm));
unsigned int i;
char result[COMP_BUF_SIZE];
int ret;
for (i = 0; i < ctcount; i++) {
int ilen;
unsigned int dlen = COMP_BUF_SIZE;
memset(result, 0, sizeof (result));
ilen = ctemplate[i].inlen;
ret = crypto_comp_compress(tfm, ctemplate[i].input,
ilen, result, &dlen);
if (ret) {
printk(KERN_ERR "alg: comp: compression failed "
"on test %d for %s: ret=%d\n", i + 1, algo,
-ret);
goto out;
}
if (dlen != ctemplate[i].outlen) {
printk(KERN_ERR "alg: comp: Compression test %d "
"failed for %s: output len = %d\n", i + 1, algo,
dlen);
ret = -EINVAL;
goto out;
}
if (memcmp(result, ctemplate[i].output, dlen)) {
printk(KERN_ERR "alg: comp: Compression test %d "
"failed for %s\n", i + 1, algo);
hexdump(result, dlen);
ret = -EINVAL;
goto out;
}
}
for (i = 0; i < dtcount; i++) {
int ilen;
unsigned int dlen = COMP_BUF_SIZE;
memset(result, 0, sizeof (result));
ilen = dtemplate[i].inlen;
ret = crypto_comp_decompress(tfm, dtemplate[i].input,
ilen, result, &dlen);
if (ret) {
printk(KERN_ERR "alg: comp: decompression failed "
"on test %d for %s: ret=%d\n", i + 1, algo,
-ret);
goto out;
}
if (dlen != dtemplate[i].outlen) {
printk(KERN_ERR "alg: comp: Decompression test %d "
"failed for %s: output len = %d\n", i + 1, algo,
dlen);
ret = -EINVAL;
goto out;
}
if (memcmp(result, dtemplate[i].output, dlen)) {
printk(KERN_ERR "alg: comp: Decompression test %d "
"failed for %s\n", i + 1, algo);
hexdump(result, dlen);
ret = -EINVAL;
goto out;
}
}
ret = 0;
out:
return ret;
}
static int test_pcomp(struct crypto_pcomp *tfm,
struct pcomp_testvec *ctemplate,
struct pcomp_testvec *dtemplate, int ctcount,
int dtcount)
{
const char *algo = crypto_tfm_alg_driver_name(crypto_pcomp_tfm(tfm));
unsigned int i;
char result[COMP_BUF_SIZE];
int res;
for (i = 0; i < ctcount; i++) {
struct comp_request req;
unsigned int produced = 0;
res = crypto_compress_setup(tfm, ctemplate[i].params,
ctemplate[i].paramsize);
if (res) {
pr_err("alg: pcomp: compression setup failed on test "
"%d for %s: error=%d\n", i + 1, algo, res);
return res;
}
res = crypto_compress_init(tfm);
if (res) {
pr_err("alg: pcomp: compression init failed on test "
"%d for %s: error=%d\n", i + 1, algo, res);
return res;
}
memset(result, 0, sizeof(result));
req.next_in = ctemplate[i].input;
req.avail_in = ctemplate[i].inlen / 2;
req.next_out = result;
req.avail_out = ctemplate[i].outlen / 2;
res = crypto_compress_update(tfm, &req);
if (res < 0 && (res != -EAGAIN || req.avail_in)) {
pr_err("alg: pcomp: compression update failed on test "
"%d for %s: error=%d\n", i + 1, algo, res);
return res;
}
if (res > 0)
produced += res;
/* Add remaining input data */
req.avail_in += (ctemplate[i].inlen + 1) / 2;
res = crypto_compress_update(tfm, &req);
if (res < 0 && (res != -EAGAIN || req.avail_in)) {
pr_err("alg: pcomp: compression update failed on test "
"%d for %s: error=%d\n", i + 1, algo, res);
return res;
}
if (res > 0)
produced += res;
/* Provide remaining output space */
req.avail_out += COMP_BUF_SIZE - ctemplate[i].outlen / 2;
res = crypto_compress_final(tfm, &req);
if (res < 0) {
pr_err("alg: pcomp: compression final failed on test "
"%d for %s: error=%d\n", i + 1, algo, res);
return res;
}
produced += res;
if (COMP_BUF_SIZE - req.avail_out != ctemplate[i].outlen) {
pr_err("alg: comp: Compression test %d failed for %s: "
"output len = %d (expected %d)\n", i + 1, algo,
COMP_BUF_SIZE - req.avail_out,
ctemplate[i].outlen);
return -EINVAL;
}
if (produced != ctemplate[i].outlen) {
pr_err("alg: comp: Compression test %d failed for %s: "
"returned len = %u (expected %d)\n", i + 1,
algo, produced, ctemplate[i].outlen);
return -EINVAL;
}
if (memcmp(result, ctemplate[i].output, ctemplate[i].outlen)) {
pr_err("alg: pcomp: Compression test %d failed for "
"%s\n", i + 1, algo);
hexdump(result, ctemplate[i].outlen);
return -EINVAL;
}
}
for (i = 0; i < dtcount; i++) {
struct comp_request req;
unsigned int produced = 0;
res = crypto_decompress_setup(tfm, dtemplate[i].params,
dtemplate[i].paramsize);
if (res) {
pr_err("alg: pcomp: decompression setup failed on "
"test %d for %s: error=%d\n", i + 1, algo, res);
return res;
}
res = crypto_decompress_init(tfm);
if (res) {
pr_err("alg: pcomp: decompression init failed on test "
"%d for %s: error=%d\n", i + 1, algo, res);
return res;
}
memset(result, 0, sizeof(result));
req.next_in = dtemplate[i].input;
req.avail_in = dtemplate[i].inlen / 2;
req.next_out = result;
req.avail_out = dtemplate[i].outlen / 2;
res = crypto_decompress_update(tfm, &req);
if (res < 0 && (res != -EAGAIN || req.avail_in)) {
pr_err("alg: pcomp: decompression update failed on "
"test %d for %s: error=%d\n", i + 1, algo, res);
return res;
}
if (res > 0)
produced += res;
/* Add remaining input data */
req.avail_in += (dtemplate[i].inlen + 1) / 2;
res = crypto_decompress_update(tfm, &req);
if (res < 0 && (res != -EAGAIN || req.avail_in)) {
pr_err("alg: pcomp: decompression update failed on "
"test %d for %s: error=%d\n", i + 1, algo, res);
return res;
}
if (res > 0)
produced += res;
/* Provide remaining output space */
req.avail_out += COMP_BUF_SIZE - dtemplate[i].outlen / 2;
res = crypto_decompress_final(tfm, &req);
if (res < 0 && (res != -EAGAIN || req.avail_in)) {
pr_err("alg: pcomp: decompression final failed on "
"test %d for %s: error=%d\n", i + 1, algo, res);
return res;
}
if (res > 0)
produced += res;
if (COMP_BUF_SIZE - req.avail_out != dtemplate[i].outlen) {
pr_err("alg: comp: Decompression test %d failed for "
"%s: output len = %d (expected %d)\n", i + 1,
algo, COMP_BUF_SIZE - req.avail_out,
dtemplate[i].outlen);
return -EINVAL;
}
if (produced != dtemplate[i].outlen) {
pr_err("alg: comp: Decompression test %d failed for "
"%s: returned len = %u (expected %d)\n", i + 1,
algo, produced, dtemplate[i].outlen);
return -EINVAL;
}
if (memcmp(result, dtemplate[i].output, dtemplate[i].outlen)) {
pr_err("alg: pcomp: Decompression test %d failed for "
"%s\n", i + 1, algo);
hexdump(result, dtemplate[i].outlen);
return -EINVAL;
}
}
return 0;
}
static int test_cprng(struct crypto_rng *tfm, struct cprng_testvec *template,
unsigned int tcount)
{
const char *algo = crypto_tfm_alg_driver_name(crypto_rng_tfm(tfm));
int err = 0, i, j, seedsize;
u8 *seed;
char result[32];
seedsize = crypto_rng_seedsize(tfm);
seed = kmalloc(seedsize, GFP_KERNEL);
if (!seed) {
printk(KERN_ERR "alg: cprng: Failed to allocate seed space "
"for %s\n", algo);
return -ENOMEM;
}
for (i = 0; i < tcount; i++) {
memset(result, 0, 32);
memcpy(seed, template[i].v, template[i].vlen);
memcpy(seed + template[i].vlen, template[i].key,
template[i].klen);
memcpy(seed + template[i].vlen + template[i].klen,
template[i].dt, template[i].dtlen);
err = crypto_rng_reset(tfm, seed, seedsize);
if (err) {
printk(KERN_ERR "alg: cprng: Failed to reset rng "
"for %s\n", algo);
goto out;
}
for (j = 0; j < template[i].loops; j++) {
err = crypto_rng_get_bytes(tfm, result,
template[i].rlen);
if (err != template[i].rlen) {
printk(KERN_ERR "alg: cprng: Failed to obtain "
"the correct amount of random data for "
"%s (requested %d, got %d)\n", algo,
template[i].rlen, err);
goto out;
}
}
err = memcmp(result, template[i].result,
template[i].rlen);
if (err) {
printk(KERN_ERR "alg: cprng: Test %d failed for %s\n",
i, algo);
hexdump(result, template[i].rlen);
err = -EINVAL;
goto out;
}
}
out:
kfree(seed);
return err;
}
static int alg_test_aead(const struct alg_test_desc *desc, const char *driver,
u32 type, u32 mask)
{
struct crypto_aead *tfm;
int err = 0;
tfm = crypto_alloc_aead(driver, type, mask);
if (IS_ERR(tfm)) {
printk(KERN_ERR "alg: aead: Failed to load transform for %s: "
"%ld\n", driver, PTR_ERR(tfm));
return PTR_ERR(tfm);
}
if (desc->suite.aead.enc.vecs) {
err = test_aead(tfm, ENCRYPT, desc->suite.aead.enc.vecs,
desc->suite.aead.enc.count);
if (err)
goto out;
}
if (!err && desc->suite.aead.dec.vecs)
err = test_aead(tfm, DECRYPT, desc->suite.aead.dec.vecs,
desc->suite.aead.dec.count);
out:
crypto_free_aead(tfm);
return err;
}
static int alg_test_cipher(const struct alg_test_desc *desc,
const char *driver, u32 type, u32 mask)
{
struct crypto_cipher *tfm;
int err = 0;
tfm = crypto_alloc_cipher(driver, type, mask);
if (IS_ERR(tfm)) {
printk(KERN_ERR "alg: cipher: Failed to load transform for "
"%s: %ld\n", driver, PTR_ERR(tfm));
return PTR_ERR(tfm);
}
if (desc->suite.cipher.enc.vecs) {
err = test_cipher(tfm, ENCRYPT, desc->suite.cipher.enc.vecs,
desc->suite.cipher.enc.count);
if (err)
goto out;
}
if (desc->suite.cipher.dec.vecs)
err = test_cipher(tfm, DECRYPT, desc->suite.cipher.dec.vecs,
desc->suite.cipher.dec.count);
out:
crypto_free_cipher(tfm);
return err;
}
static int alg_test_skcipher(const struct alg_test_desc *desc,
const char *driver, u32 type, u32 mask)
{
struct crypto_ablkcipher *tfm;
int err = 0;
tfm = crypto_alloc_ablkcipher(driver, type, mask);
if (IS_ERR(tfm)) {
printk(KERN_ERR "alg: skcipher: Failed to load transform for "
"%s: %ld\n", driver, PTR_ERR(tfm));
return PTR_ERR(tfm);
}
if (desc->suite.cipher.enc.vecs) {
err = test_skcipher(tfm, ENCRYPT, desc->suite.cipher.enc.vecs,
desc->suite.cipher.enc.count);
if (err)
goto out;
}
if (desc->suite.cipher.dec.vecs)
err = test_skcipher(tfm, DECRYPT, desc->suite.cipher.dec.vecs,
desc->suite.cipher.dec.count);
out:
crypto_free_ablkcipher(tfm);
return err;
}
static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
u32 type, u32 mask)
{
struct crypto_comp *tfm;
int err;
tfm = crypto_alloc_comp(driver, type, mask);
if (IS_ERR(tfm)) {
printk(KERN_ERR "alg: comp: Failed to load transform for %s: "
"%ld\n", driver, PTR_ERR(tfm));
return PTR_ERR(tfm);
}
err = test_comp(tfm, desc->suite.comp.comp.vecs,
desc->suite.comp.decomp.vecs,
desc->suite.comp.comp.count,
desc->suite.comp.decomp.count);
crypto_free_comp(tfm);
return err;
}
static int alg_test_pcomp(const struct alg_test_desc *desc, const char *driver,
u32 type, u32 mask)
{
struct crypto_pcomp *tfm;
int err;
tfm = crypto_alloc_pcomp(driver, type, mask);
if (IS_ERR(tfm)) {
pr_err("alg: pcomp: Failed to load transform for %s: %ld\n",
driver, PTR_ERR(tfm));
return PTR_ERR(tfm);
}
err = test_pcomp(tfm, desc->suite.pcomp.comp.vecs,
desc->suite.pcomp.decomp.vecs,
desc->suite.pcomp.comp.count,
desc->suite.pcomp.decomp.count);
crypto_free_pcomp(tfm);
return err;
}
static int alg_test_hash(const struct alg_test_desc *desc, const char *driver,
u32 type, u32 mask)
{
struct crypto_ahash *tfm;
int err;
tfm = crypto_alloc_ahash(driver, type, mask);
if (IS_ERR(tfm)) {
printk(KERN_ERR "alg: hash: Failed to load transform for %s: "
"%ld\n", driver, PTR_ERR(tfm));
return PTR_ERR(tfm);
}
err = test_hash(tfm, desc->suite.hash.vecs,
desc->suite.hash.count, true);
if (!err)
err = test_hash(tfm, desc->suite.hash.vecs,
desc->suite.hash.count, false);
crypto_free_ahash(tfm);
return err;
}
static int alg_test_crc32c(const struct alg_test_desc *desc,
const char *driver, u32 type, u32 mask)
{
struct crypto_shash *tfm;
u32 val;
int err;
err = alg_test_hash(desc, driver, type, mask);
if (err)
goto out;
tfm = crypto_alloc_shash(driver, type, mask);
if (IS_ERR(tfm)) {
printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: "
"%ld\n", driver, PTR_ERR(tfm));
err = PTR_ERR(tfm);
goto out;
}
do {
struct {
struct shash_desc shash;
char ctx[crypto_shash_descsize(tfm)];
} sdesc;
sdesc.shash.tfm = tfm;
sdesc.shash.flags = 0;
*(u32 *)sdesc.ctx = le32_to_cpu(420553207);
err = crypto_shash_final(&sdesc.shash, (u8 *)&val);
if (err) {
printk(KERN_ERR "alg: crc32c: Operation failed for "
"%s: %d\n", driver, err);
break;
}
if (val != ~420553207) {
printk(KERN_ERR "alg: crc32c: Test failed for %s: "
"%d\n", driver, val);
err = -EINVAL;
}
} while (0);
crypto_free_shash(tfm);
out:
return err;
}
static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver,
u32 type, u32 mask)
{
struct crypto_rng *rng;
int err;
rng = crypto_alloc_rng(driver, type, mask);
if (IS_ERR(rng)) {
printk(KERN_ERR "alg: cprng: Failed to load transform for %s: "
"%ld\n", driver, PTR_ERR(rng));
return PTR_ERR(rng);
}
err = test_cprng(rng, desc->suite.cprng.vecs, desc->suite.cprng.count);
crypto_free_rng(rng);
return err;
}
static int alg_test_null(const struct alg_test_desc *desc,
const char *driver, u32 type, u32 mask)
{
return 0;
}
/* Please keep this list sorted by algorithm name. */
static const struct alg_test_desc alg_test_descs[] = {
{
.alg = "__driver-cbc-aes-aesni",
.test = alg_test_null,
.suite = {
.cipher = {
.enc = {
.vecs = NULL,
.count = 0
},
.dec = {
.vecs = NULL,
.count = 0
}
}
}
}, {
.alg = "__driver-ecb-aes-aesni",
.test = alg_test_null,
.suite = {
.cipher = {
.enc = {
.vecs = NULL,
.count = 0
},
.dec = {
.vecs = NULL,
.count = 0
}
}
}
}, {
.alg = "__ghash-pclmulqdqni",
.test = alg_test_null,
.suite = {
.hash = {
.vecs = NULL,
.count = 0
}
}
}, {
.alg = "ansi_cprng",
.test = alg_test_cprng,
.fips_allowed = 1,
.suite = {
.cprng = {
.vecs = ansi_cprng_aes_tv_template,
.count = ANSI_CPRNG_AES_TEST_VECTORS
}
}
}, {
.alg = "cbc(aes)",
.test = alg_test_skcipher,
.fips_allowed = 1,
.suite = {
.cipher = {
.enc = {
.vecs = aes_cbc_enc_tv_template,
.count = AES_CBC_ENC_TEST_VECTORS
},
.dec = {
.vecs = aes_cbc_dec_tv_template,
.count = AES_CBC_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "cbc(anubis)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = anubis_cbc_enc_tv_template,
.count = ANUBIS_CBC_ENC_TEST_VECTORS
},
.dec = {
.vecs = anubis_cbc_dec_tv_template,
.count = ANUBIS_CBC_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "cbc(blowfish)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = bf_cbc_enc_tv_template,
.count = BF_CBC_ENC_TEST_VECTORS
},
.dec = {
.vecs = bf_cbc_dec_tv_template,
.count = BF_CBC_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "cbc(camellia)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = camellia_cbc_enc_tv_template,
.count = CAMELLIA_CBC_ENC_TEST_VECTORS
},
.dec = {
.vecs = camellia_cbc_dec_tv_template,
.count = CAMELLIA_CBC_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "cbc(des)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = des_cbc_enc_tv_template,
.count = DES_CBC_ENC_TEST_VECTORS
},
.dec = {
.vecs = des_cbc_dec_tv_template,
.count = DES_CBC_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "cbc(des3_ede)",
.test = alg_test_skcipher,
.fips_allowed = 1,
.suite = {
.cipher = {
.enc = {
.vecs = des3_ede_cbc_enc_tv_template,
.count = DES3_EDE_CBC_ENC_TEST_VECTORS
},
.dec = {
.vecs = des3_ede_cbc_dec_tv_template,
.count = DES3_EDE_CBC_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "cbc(twofish)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = tf_cbc_enc_tv_template,
.count = TF_CBC_ENC_TEST_VECTORS
},
.dec = {
.vecs = tf_cbc_dec_tv_template,
.count = TF_CBC_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "ccm(aes)",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
.aead = {
.enc = {
.vecs = aes_ccm_enc_tv_template,
.count = AES_CCM_ENC_TEST_VECTORS
},
.dec = {
.vecs = aes_ccm_dec_tv_template,
.count = AES_CCM_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "crc32c",
.test = alg_test_crc32c,
.fips_allowed = 1,
.suite = {
.hash = {
.vecs = crc32c_tv_template,
.count = CRC32C_TEST_VECTORS
}
}
}, {
.alg = "cryptd(__driver-ecb-aes-aesni)",
.test = alg_test_null,
.suite = {
.cipher = {
.enc = {
.vecs = NULL,
.count = 0
},
.dec = {
.vecs = NULL,
.count = 0
}
}
}
}, {
.alg = "cryptd(__ghash-pclmulqdqni)",
.test = alg_test_null,
.suite = {
.hash = {
.vecs = NULL,
.count = 0
}
}
}, {
.alg = "ctr(aes)",
.test = alg_test_skcipher,
.fips_allowed = 1,
.suite = {
.cipher = {
.enc = {
.vecs = aes_ctr_enc_tv_template,
.count = AES_CTR_ENC_TEST_VECTORS
},
.dec = {
.vecs = aes_ctr_dec_tv_template,
.count = AES_CTR_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "cts(cbc(aes))",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = cts_mode_enc_tv_template,
.count = CTS_MODE_ENC_TEST_VECTORS
},
.dec = {
.vecs = cts_mode_dec_tv_template,
.count = CTS_MODE_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "deflate",
.test = alg_test_comp,
.suite = {
.comp = {
.comp = {
.vecs = deflate_comp_tv_template,
.count = DEFLATE_COMP_TEST_VECTORS
},
.decomp = {
.vecs = deflate_decomp_tv_template,
.count = DEFLATE_DECOMP_TEST_VECTORS
}
}
}
}, {
.alg = "ecb(__aes-aesni)",
.test = alg_test_null,
.suite = {
.cipher = {
.enc = {
.vecs = NULL,
.count = 0
},
.dec = {
.vecs = NULL,
.count = 0
}
}
}
}, {
.alg = "ecb(aes)",
.test = alg_test_skcipher,
.fips_allowed = 1,
.suite = {
.cipher = {
.enc = {
.vecs = aes_enc_tv_template,
.count = AES_ENC_TEST_VECTORS
},
.dec = {
.vecs = aes_dec_tv_template,
.count = AES_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "ecb(anubis)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = anubis_enc_tv_template,
.count = ANUBIS_ENC_TEST_VECTORS
},
.dec = {
.vecs = anubis_dec_tv_template,
.count = ANUBIS_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "ecb(arc4)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = arc4_enc_tv_template,
.count = ARC4_ENC_TEST_VECTORS
},
.dec = {
.vecs = arc4_dec_tv_template,
.count = ARC4_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "ecb(blowfish)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = bf_enc_tv_template,
.count = BF_ENC_TEST_VECTORS
},
.dec = {
.vecs = bf_dec_tv_template,
.count = BF_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "ecb(camellia)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = camellia_enc_tv_template,
.count = CAMELLIA_ENC_TEST_VECTORS
},
.dec = {
.vecs = camellia_dec_tv_template,
.count = CAMELLIA_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "ecb(cast5)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = cast5_enc_tv_template,
.count = CAST5_ENC_TEST_VECTORS
},
.dec = {
.vecs = cast5_dec_tv_template,
.count = CAST5_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "ecb(cast6)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = cast6_enc_tv_template,
.count = CAST6_ENC_TEST_VECTORS
},
.dec = {
.vecs = cast6_dec_tv_template,
.count = CAST6_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "ecb(des)",
.test = alg_test_skcipher,
.fips_allowed = 1,
.suite = {
.cipher = {
.enc = {
.vecs = des_enc_tv_template,
.count = DES_ENC_TEST_VECTORS
},
.dec = {
.vecs = des_dec_tv_template,
.count = DES_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "ecb(des3_ede)",
.test = alg_test_skcipher,
.fips_allowed = 1,
.suite = {
.cipher = {
.enc = {
.vecs = des3_ede_enc_tv_template,
.count = DES3_EDE_ENC_TEST_VECTORS
},
.dec = {
.vecs = des3_ede_dec_tv_template,
.count = DES3_EDE_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "ecb(khazad)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = khazad_enc_tv_template,
.count = KHAZAD_ENC_TEST_VECTORS
},
.dec = {
.vecs = khazad_dec_tv_template,
.count = KHAZAD_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "ecb(seed)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = seed_enc_tv_template,
.count = SEED_ENC_TEST_VECTORS
},
.dec = {
.vecs = seed_dec_tv_template,
.count = SEED_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "ecb(serpent)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = serpent_enc_tv_template,
.count = SERPENT_ENC_TEST_VECTORS
},
.dec = {
.vecs = serpent_dec_tv_template,
.count = SERPENT_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "ecb(tea)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = tea_enc_tv_template,
.count = TEA_ENC_TEST_VECTORS
},
.dec = {
.vecs = tea_dec_tv_template,
.count = TEA_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "ecb(tnepres)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = tnepres_enc_tv_template,
.count = TNEPRES_ENC_TEST_VECTORS
},
.dec = {
.vecs = tnepres_dec_tv_template,
.count = TNEPRES_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "ecb(twofish)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = tf_enc_tv_template,
.count = TF_ENC_TEST_VECTORS
},
.dec = {
.vecs = tf_dec_tv_template,
.count = TF_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "ecb(xeta)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = xeta_enc_tv_template,
.count = XETA_ENC_TEST_VECTORS
},
.dec = {
.vecs = xeta_dec_tv_template,
.count = XETA_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "ecb(xtea)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = xtea_enc_tv_template,
.count = XTEA_ENC_TEST_VECTORS
},
.dec = {
.vecs = xtea_dec_tv_template,
.count = XTEA_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "gcm(aes)",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
.aead = {
.enc = {
.vecs = aes_gcm_enc_tv_template,
.count = AES_GCM_ENC_TEST_VECTORS
},
.dec = {
.vecs = aes_gcm_dec_tv_template,
.count = AES_GCM_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "ghash",
.test = alg_test_hash,
.suite = {
.hash = {
.vecs = ghash_tv_template,
.count = GHASH_TEST_VECTORS
}
}
}, {
.alg = "hmac(md5)",
.test = alg_test_hash,
.suite = {
.hash = {
.vecs = hmac_md5_tv_template,
.count = HMAC_MD5_TEST_VECTORS
}
}
}, {
.alg = "hmac(rmd128)",
.test = alg_test_hash,
.suite = {
.hash = {
.vecs = hmac_rmd128_tv_template,
.count = HMAC_RMD128_TEST_VECTORS
}
}
}, {
.alg = "hmac(rmd160)",
.test = alg_test_hash,
.suite = {
.hash = {
.vecs = hmac_rmd160_tv_template,
.count = HMAC_RMD160_TEST_VECTORS
}
}
}, {
.alg = "hmac(sha1)",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
.hash = {
.vecs = hmac_sha1_tv_template,
.count = HMAC_SHA1_TEST_VECTORS
}
}
}, {
.alg = "hmac(sha224)",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
.hash = {
.vecs = hmac_sha224_tv_template,
.count = HMAC_SHA224_TEST_VECTORS
}
}
}, {
.alg = "hmac(sha256)",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
.hash = {
.vecs = hmac_sha256_tv_template,
.count = HMAC_SHA256_TEST_VECTORS
}
}
}, {
.alg = "hmac(sha384)",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
.hash = {
.vecs = hmac_sha384_tv_template,
.count = HMAC_SHA384_TEST_VECTORS
}
}
}, {
.alg = "hmac(sha512)",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
.hash = {
.vecs = hmac_sha512_tv_template,
.count = HMAC_SHA512_TEST_VECTORS
}
}
}, {
.alg = "lrw(aes)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = aes_lrw_enc_tv_template,
.count = AES_LRW_ENC_TEST_VECTORS
},
.dec = {
.vecs = aes_lrw_dec_tv_template,
.count = AES_LRW_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "lzo",
.test = alg_test_comp,
.suite = {
.comp = {
.comp = {
.vecs = lzo_comp_tv_template,
.count = LZO_COMP_TEST_VECTORS
},
.decomp = {
.vecs = lzo_decomp_tv_template,
.count = LZO_DECOMP_TEST_VECTORS
}
}
}
}, {
.alg = "md4",
.test = alg_test_hash,
.suite = {
.hash = {
.vecs = md4_tv_template,
.count = MD4_TEST_VECTORS
}
}
}, {
.alg = "md5",
.test = alg_test_hash,
.suite = {
.hash = {
.vecs = md5_tv_template,
.count = MD5_TEST_VECTORS
}
}
}, {
.alg = "michael_mic",
.test = alg_test_hash,
.suite = {
.hash = {
.vecs = michael_mic_tv_template,
.count = MICHAEL_MIC_TEST_VECTORS
}
}
}, {
.alg = "pcbc(fcrypt)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = fcrypt_pcbc_enc_tv_template,
.count = FCRYPT_ENC_TEST_VECTORS
},
.dec = {
.vecs = fcrypt_pcbc_dec_tv_template,
.count = FCRYPT_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "rfc3686(ctr(aes))",
.test = alg_test_skcipher,
.fips_allowed = 1,
.suite = {
.cipher = {
.enc = {
.vecs = aes_ctr_rfc3686_enc_tv_template,
.count = AES_CTR_3686_ENC_TEST_VECTORS
},
.dec = {
.vecs = aes_ctr_rfc3686_dec_tv_template,
.count = AES_CTR_3686_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "rfc4309(ccm(aes))",
.test = alg_test_aead,
.fips_allowed = 1,
.suite = {
.aead = {
.enc = {
.vecs = aes_ccm_rfc4309_enc_tv_template,
.count = AES_CCM_4309_ENC_TEST_VECTORS
},
.dec = {
.vecs = aes_ccm_rfc4309_dec_tv_template,
.count = AES_CCM_4309_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "rmd128",
.test = alg_test_hash,
.suite = {
.hash = {
.vecs = rmd128_tv_template,
.count = RMD128_TEST_VECTORS
}
}
}, {
.alg = "rmd160",
.test = alg_test_hash,
.suite = {
.hash = {
.vecs = rmd160_tv_template,
.count = RMD160_TEST_VECTORS
}
}
}, {
.alg = "rmd256",
.test = alg_test_hash,
.suite = {
.hash = {
.vecs = rmd256_tv_template,
.count = RMD256_TEST_VECTORS
}
}
}, {
.alg = "rmd320",
.test = alg_test_hash,
.suite = {
.hash = {
.vecs = rmd320_tv_template,
.count = RMD320_TEST_VECTORS
}
}
}, {
.alg = "salsa20",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = salsa20_stream_enc_tv_template,
.count = SALSA20_STREAM_ENC_TEST_VECTORS
}
}
}
}, {
.alg = "sha1",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
.hash = {
.vecs = sha1_tv_template,
.count = SHA1_TEST_VECTORS
}
}
}, {
.alg = "sha224",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
.hash = {
.vecs = sha224_tv_template,
.count = SHA224_TEST_VECTORS
}
}
}, {
.alg = "sha256",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
.hash = {
.vecs = sha256_tv_template,
.count = SHA256_TEST_VECTORS
}
}
}, {
.alg = "sha384",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
.hash = {
.vecs = sha384_tv_template,
.count = SHA384_TEST_VECTORS
}
}
}, {
.alg = "sha512",
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
.hash = {
.vecs = sha512_tv_template,
.count = SHA512_TEST_VECTORS
}
}
}, {
.alg = "tgr128",
.test = alg_test_hash,
.suite = {
.hash = {
.vecs = tgr128_tv_template,
.count = TGR128_TEST_VECTORS
}
}
}, {
.alg = "tgr160",
.test = alg_test_hash,
.suite = {
.hash = {
.vecs = tgr160_tv_template,
.count = TGR160_TEST_VECTORS
}
}
}, {
.alg = "tgr192",
.test = alg_test_hash,
.suite = {
.hash = {
.vecs = tgr192_tv_template,
.count = TGR192_TEST_VECTORS
}
}
}, {
.alg = "vmac(aes)",
.test = alg_test_hash,
.suite = {
.hash = {
.vecs = aes_vmac128_tv_template,
.count = VMAC_AES_TEST_VECTORS
}
}
}, {
.alg = "wp256",
.test = alg_test_hash,
.suite = {
.hash = {
.vecs = wp256_tv_template,
.count = WP256_TEST_VECTORS
}
}
}, {
.alg = "wp384",
.test = alg_test_hash,
.suite = {
.hash = {
.vecs = wp384_tv_template,
.count = WP384_TEST_VECTORS
}
}
}, {
.alg = "wp512",
.test = alg_test_hash,
.suite = {
.hash = {
.vecs = wp512_tv_template,
.count = WP512_TEST_VECTORS
}
}
}, {
.alg = "xcbc(aes)",
.test = alg_test_hash,
.suite = {
.hash = {
.vecs = aes_xcbc128_tv_template,
.count = XCBC_AES_TEST_VECTORS
}
}
}, {
.alg = "xts(aes)",
.test = alg_test_skcipher,
.suite = {
.cipher = {
.enc = {
.vecs = aes_xts_enc_tv_template,
.count = AES_XTS_ENC_TEST_VECTORS
},
.dec = {
.vecs = aes_xts_dec_tv_template,
.count = AES_XTS_DEC_TEST_VECTORS
}
}
}
}, {
.alg = "zlib",
.test = alg_test_pcomp,
.suite = {
.pcomp = {
.comp = {
.vecs = zlib_comp_tv_template,
.count = ZLIB_COMP_TEST_VECTORS
},
.decomp = {
.vecs = zlib_decomp_tv_template,
.count = ZLIB_DECOMP_TEST_VECTORS
}
}
}
}
};
static int alg_find_test(const char *alg)
{
int start = 0;
int end = ARRAY_SIZE(alg_test_descs);
while (start < end) {
int i = (start + end) / 2;
int diff = strcmp(alg_test_descs[i].alg, alg);
if (diff > 0) {
end = i;
continue;
}
if (diff < 0) {
start = i + 1;
continue;
}
return i;
}
return -1;
}
int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
{
int i;
int j;
int rc;
if ((type & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_CIPHER) {
char nalg[CRYPTO_MAX_ALG_NAME];
if (snprintf(nalg, sizeof(nalg), "ecb(%s)", alg) >=
sizeof(nalg))
return -ENAMETOOLONG;
i = alg_find_test(nalg);
if (i < 0)
goto notest;
if (fips_enabled && !alg_test_descs[i].fips_allowed)
goto non_fips_alg;
rc = alg_test_cipher(alg_test_descs + i, driver, type, mask);
goto test_done;
}
i = alg_find_test(alg);
j = alg_find_test(driver);
if (i < 0 && j < 0)
goto notest;
if (fips_enabled && ((i >= 0 && !alg_test_descs[i].fips_allowed) ||
(j >= 0 && !alg_test_descs[j].fips_allowed)))
goto non_fips_alg;
rc = 0;
if (i >= 0)
rc |= alg_test_descs[i].test(alg_test_descs + i, driver,
type, mask);
if (j >= 0)
rc |= alg_test_descs[j].test(alg_test_descs + j, driver,
type, mask);
test_done:
if (fips_enabled && rc)
panic("%s: %s alg self test failed in fips mode!\n", driver, alg);
if (fips_enabled && !rc)
printk(KERN_INFO "alg: self-tests for %s (%s) passed\n",
driver, alg);
return rc;
notest:
printk(KERN_INFO "alg: No test for %s (%s)\n", alg, driver);
return 0;
non_fips_alg:
return -EINVAL;
}
#endif /* CONFIG_CRYPTO_MANAGER_TESTS */
EXPORT_SYMBOL_GPL(alg_test);
| gpl-2.0 |
bachtk/linux | drivers/media/usb/dvb-usb-v2/dvb_usb_core.c | 737 | 28349 | /*
* DVB USB framework
*
* Copyright (C) 2004-6 Patrick Boettcher <patrick.boettcher@desy.de>
* Copyright (C) 2012 Antti Palosaari <crope@iki.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "dvb_usb_common.h"
static int dvb_usbv2_disable_rc_polling;
module_param_named(disable_rc_polling, dvb_usbv2_disable_rc_polling, int, 0644);
MODULE_PARM_DESC(disable_rc_polling,
"disable remote control polling (default: 0)");
static int dvb_usb_force_pid_filter_usage;
module_param_named(force_pid_filter_usage, dvb_usb_force_pid_filter_usage,
int, 0444);
MODULE_PARM_DESC(force_pid_filter_usage,
"force all DVB USB devices to use a PID filter, if any (default: 0)");
static int dvb_usbv2_download_firmware(struct dvb_usb_device *d,
const char *name)
{
int ret;
const struct firmware *fw;
dev_dbg(&d->udev->dev, "%s:\n", __func__);
if (!d->props->download_firmware) {
ret = -EINVAL;
goto err;
}
ret = request_firmware(&fw, name, &d->udev->dev);
if (ret < 0) {
dev_err(&d->udev->dev,
"%s: Did not find the firmware file '%s'. Please see linux/Documentation/dvb/ for more details on firmware-problems. Status %d\n",
KBUILD_MODNAME, name, ret);
goto err;
}
dev_info(&d->udev->dev, "%s: downloading firmware from file '%s'\n",
KBUILD_MODNAME, name);
ret = d->props->download_firmware(d, fw);
release_firmware(fw);
if (ret < 0)
goto err;
return ret;
err:
dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
static int dvb_usbv2_i2c_init(struct dvb_usb_device *d)
{
int ret;
dev_dbg(&d->udev->dev, "%s:\n", __func__);
if (!d->props->i2c_algo)
return 0;
strlcpy(d->i2c_adap.name, d->name, sizeof(d->i2c_adap.name));
d->i2c_adap.algo = d->props->i2c_algo;
d->i2c_adap.dev.parent = &d->udev->dev;
i2c_set_adapdata(&d->i2c_adap, d);
ret = i2c_add_adapter(&d->i2c_adap);
if (ret < 0) {
d->i2c_adap.algo = NULL;
dev_err(&d->udev->dev, "%s: i2c_add_adapter() failed=%d\n",
KBUILD_MODNAME, ret);
goto err;
}
return 0;
err:
dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
static int dvb_usbv2_i2c_exit(struct dvb_usb_device *d)
{
dev_dbg(&d->udev->dev, "%s:\n", __func__);
if (d->i2c_adap.algo)
i2c_del_adapter(&d->i2c_adap);
return 0;
}
#if IS_ENABLED(CONFIG_RC_CORE)
static void dvb_usb_read_remote_control(struct work_struct *work)
{
struct dvb_usb_device *d = container_of(work,
struct dvb_usb_device, rc_query_work.work);
int ret;
/*
* When the parameter has been set to 1 via sysfs while the
* driver was running, or when bulk mode is enabled after IR init.
*/
if (dvb_usbv2_disable_rc_polling || d->rc.bulk_mode) {
d->rc_polling_active = false;
return;
}
ret = d->rc.query(d);
if (ret < 0) {
dev_err(&d->udev->dev, "%s: rc.query() failed=%d\n",
KBUILD_MODNAME, ret);
d->rc_polling_active = false;
return; /* stop polling */
}
schedule_delayed_work(&d->rc_query_work,
msecs_to_jiffies(d->rc.interval));
}
static int dvb_usbv2_remote_init(struct dvb_usb_device *d)
{
int ret;
struct rc_dev *dev;
dev_dbg(&d->udev->dev, "%s:\n", __func__);
if (dvb_usbv2_disable_rc_polling || !d->props->get_rc_config)
return 0;
d->rc.map_name = d->rc_map;
ret = d->props->get_rc_config(d, &d->rc);
if (ret < 0)
goto err;
/* disable rc when there is no keymap defined */
if (!d->rc.map_name)
return 0;
dev = rc_allocate_device();
if (!dev) {
ret = -ENOMEM;
goto err;
}
dev->dev.parent = &d->udev->dev;
dev->input_name = d->name;
usb_make_path(d->udev, d->rc_phys, sizeof(d->rc_phys));
strlcat(d->rc_phys, "/ir0", sizeof(d->rc_phys));
dev->input_phys = d->rc_phys;
usb_to_input_id(d->udev, &dev->input_id);
/* TODO: likely RC-core should took const char * */
dev->driver_name = (char *) d->props->driver_name;
dev->map_name = d->rc.map_name;
dev->driver_type = d->rc.driver_type;
dev->allowed_protocols = d->rc.allowed_protos;
dev->change_protocol = d->rc.change_protocol;
dev->priv = d;
ret = rc_register_device(dev);
if (ret < 0) {
rc_free_device(dev);
goto err;
}
d->rc_dev = dev;
/* start polling if needed */
if (d->rc.query && !d->rc.bulk_mode) {
/* initialize a work queue for handling polling */
INIT_DELAYED_WORK(&d->rc_query_work,
dvb_usb_read_remote_control);
dev_info(&d->udev->dev,
"%s: schedule remote query interval to %d msecs\n",
KBUILD_MODNAME, d->rc.interval);
schedule_delayed_work(&d->rc_query_work,
msecs_to_jiffies(d->rc.interval));
d->rc_polling_active = true;
}
return 0;
err:
dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
static int dvb_usbv2_remote_exit(struct dvb_usb_device *d)
{
dev_dbg(&d->udev->dev, "%s:\n", __func__);
if (d->rc_dev) {
cancel_delayed_work_sync(&d->rc_query_work);
rc_unregister_device(d->rc_dev);
d->rc_dev = NULL;
}
return 0;
}
#else
#define dvb_usbv2_remote_init(args...) 0
#define dvb_usbv2_remote_exit(args...)
#endif
static void dvb_usb_data_complete(struct usb_data_stream *stream, u8 *buf,
size_t len)
{
struct dvb_usb_adapter *adap = stream->user_priv;
dvb_dmx_swfilter(&adap->demux, buf, len);
}
static void dvb_usb_data_complete_204(struct usb_data_stream *stream, u8 *buf,
size_t len)
{
struct dvb_usb_adapter *adap = stream->user_priv;
dvb_dmx_swfilter_204(&adap->demux, buf, len);
}
static void dvb_usb_data_complete_raw(struct usb_data_stream *stream, u8 *buf,
size_t len)
{
struct dvb_usb_adapter *adap = stream->user_priv;
dvb_dmx_swfilter_raw(&adap->demux, buf, len);
}
static int dvb_usbv2_adapter_stream_init(struct dvb_usb_adapter *adap)
{
dev_dbg(&adap_to_d(adap)->udev->dev, "%s: adap=%d\n", __func__,
adap->id);
adap->stream.udev = adap_to_d(adap)->udev;
adap->stream.user_priv = adap;
adap->stream.complete = dvb_usb_data_complete;
return usb_urb_initv2(&adap->stream, &adap->props->stream);
}
static int dvb_usbv2_adapter_stream_exit(struct dvb_usb_adapter *adap)
{
dev_dbg(&adap_to_d(adap)->udev->dev, "%s: adap=%d\n", __func__,
adap->id);
return usb_urb_exitv2(&adap->stream);
}
static int dvb_usb_start_feed(struct dvb_demux_feed *dvbdmxfeed)
{
struct dvb_usb_adapter *adap = dvbdmxfeed->demux->priv;
struct dvb_usb_device *d = adap_to_d(adap);
int ret = 0;
struct usb_data_stream_properties stream_props;
dev_dbg(&d->udev->dev,
"%s: adap=%d active_fe=%d feed_type=%d setting pid [%s]: %04x (%04d) at index %d\n",
__func__, adap->id, adap->active_fe, dvbdmxfeed->type,
adap->pid_filtering ? "yes" : "no", dvbdmxfeed->pid,
dvbdmxfeed->pid, dvbdmxfeed->index);
/* wait init is done */
wait_on_bit(&adap->state_bits, ADAP_INIT, TASK_UNINTERRUPTIBLE);
if (adap->active_fe == -1)
return -EINVAL;
/* skip feed setup if we are already feeding */
if (adap->feed_count++ > 0)
goto skip_feed_start;
/* set 'streaming' status bit */
set_bit(ADAP_STREAMING, &adap->state_bits);
/* resolve input and output streaming parameters */
if (d->props->get_stream_config) {
memcpy(&stream_props, &adap->props->stream,
sizeof(struct usb_data_stream_properties));
ret = d->props->get_stream_config(adap->fe[adap->active_fe],
&adap->ts_type, &stream_props);
if (ret)
dev_err(&d->udev->dev,
"%s: get_stream_config() failed=%d\n",
KBUILD_MODNAME, ret);
} else {
stream_props = adap->props->stream;
}
switch (adap->ts_type) {
case DVB_USB_FE_TS_TYPE_204:
adap->stream.complete = dvb_usb_data_complete_204;
break;
case DVB_USB_FE_TS_TYPE_RAW:
adap->stream.complete = dvb_usb_data_complete_raw;
break;
case DVB_USB_FE_TS_TYPE_188:
default:
adap->stream.complete = dvb_usb_data_complete;
break;
}
/* submit USB streaming packets */
usb_urb_submitv2(&adap->stream, &stream_props);
/* enable HW PID filter */
if (adap->pid_filtering && adap->props->pid_filter_ctrl) {
ret = adap->props->pid_filter_ctrl(adap, 1);
if (ret)
dev_err(&d->udev->dev,
"%s: pid_filter_ctrl() failed=%d\n",
KBUILD_MODNAME, ret);
}
/* ask device to start streaming */
if (d->props->streaming_ctrl) {
ret = d->props->streaming_ctrl(adap->fe[adap->active_fe], 1);
if (ret)
dev_err(&d->udev->dev,
"%s: streaming_ctrl() failed=%d\n",
KBUILD_MODNAME, ret);
}
skip_feed_start:
/* add PID to device HW PID filter */
if (adap->pid_filtering && adap->props->pid_filter) {
ret = adap->props->pid_filter(adap, dvbdmxfeed->index,
dvbdmxfeed->pid, 1);
if (ret)
dev_err(&d->udev->dev, "%s: pid_filter() failed=%d\n",
KBUILD_MODNAME, ret);
}
if (ret)
dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
static int dvb_usb_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
{
struct dvb_usb_adapter *adap = dvbdmxfeed->demux->priv;
struct dvb_usb_device *d = adap_to_d(adap);
int ret = 0;
dev_dbg(&d->udev->dev,
"%s: adap=%d active_fe=%d feed_type=%d setting pid [%s]: %04x (%04d) at index %d\n",
__func__, adap->id, adap->active_fe, dvbdmxfeed->type,
adap->pid_filtering ? "yes" : "no", dvbdmxfeed->pid,
dvbdmxfeed->pid, dvbdmxfeed->index);
if (adap->active_fe == -1)
return -EINVAL;
/* remove PID from device HW PID filter */
if (adap->pid_filtering && adap->props->pid_filter) {
ret = adap->props->pid_filter(adap, dvbdmxfeed->index,
dvbdmxfeed->pid, 0);
if (ret)
dev_err(&d->udev->dev, "%s: pid_filter() failed=%d\n",
KBUILD_MODNAME, ret);
}
/* we cannot stop streaming until last PID is removed */
if (--adap->feed_count > 0)
goto skip_feed_stop;
/* ask device to stop streaming */
if (d->props->streaming_ctrl) {
ret = d->props->streaming_ctrl(adap->fe[adap->active_fe], 0);
if (ret)
dev_err(&d->udev->dev,
"%s: streaming_ctrl() failed=%d\n",
KBUILD_MODNAME, ret);
}
/* disable HW PID filter */
if (adap->pid_filtering && adap->props->pid_filter_ctrl) {
ret = adap->props->pid_filter_ctrl(adap, 0);
if (ret)
dev_err(&d->udev->dev,
"%s: pid_filter_ctrl() failed=%d\n",
KBUILD_MODNAME, ret);
}
/* kill USB streaming packets */
usb_urb_killv2(&adap->stream);
/* clear 'streaming' status bit */
clear_bit(ADAP_STREAMING, &adap->state_bits);
smp_mb__after_atomic();
wake_up_bit(&adap->state_bits, ADAP_STREAMING);
skip_feed_stop:
if (ret)
dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
static void dvb_usbv2_media_device_register(struct dvb_usb_adapter *adap)
{
#ifdef CONFIG_MEDIA_CONTROLLER_DVB
struct media_device *mdev;
struct dvb_usb_device *d = adap_to_d(adap);
struct usb_device *udev = d->udev;
int ret;
mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
if (!mdev)
return;
mdev->dev = &udev->dev;
strlcpy(mdev->model, d->name, sizeof(mdev->model));
if (udev->serial)
strlcpy(mdev->serial, udev->serial, sizeof(mdev->serial));
strcpy(mdev->bus_info, udev->devpath);
mdev->hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
mdev->driver_version = LINUX_VERSION_CODE;
ret = media_device_register(mdev);
if (ret) {
dev_err(&d->udev->dev,
"Couldn't create a media device. Error: %d\n",
ret);
kfree(mdev);
return;
}
dvb_register_media_controller(&adap->dvb_adap, mdev);
dev_info(&d->udev->dev, "media controller created\n");
#endif
}
static void dvb_usbv2_media_device_unregister(struct dvb_usb_adapter *adap)
{
#ifdef CONFIG_MEDIA_CONTROLLER_DVB
if (!adap->dvb_adap.mdev)
return;
media_device_unregister(adap->dvb_adap.mdev);
kfree(adap->dvb_adap.mdev);
adap->dvb_adap.mdev = NULL;
#endif
}
static int dvb_usbv2_adapter_dvb_init(struct dvb_usb_adapter *adap)
{
int ret;
struct dvb_usb_device *d = adap_to_d(adap);
dev_dbg(&d->udev->dev, "%s: adap=%d\n", __func__, adap->id);
ret = dvb_register_adapter(&adap->dvb_adap, d->name, d->props->owner,
&d->udev->dev, d->props->adapter_nr);
if (ret < 0) {
dev_dbg(&d->udev->dev, "%s: dvb_register_adapter() failed=%d\n",
__func__, ret);
goto err_dvb_register_adapter;
}
adap->dvb_adap.priv = adap;
dvb_usbv2_media_device_register(adap);
if (d->props->read_mac_address) {
ret = d->props->read_mac_address(adap,
adap->dvb_adap.proposed_mac);
if (ret < 0)
goto err_dvb_dmx_init;
dev_info(&d->udev->dev, "%s: MAC address: %pM\n",
KBUILD_MODNAME, adap->dvb_adap.proposed_mac);
}
adap->demux.dmx.capabilities = DMX_TS_FILTERING | DMX_SECTION_FILTERING;
adap->demux.priv = adap;
adap->demux.filternum = 0;
adap->demux.filternum = adap->max_feed_count;
adap->demux.feednum = adap->demux.filternum;
adap->demux.start_feed = dvb_usb_start_feed;
adap->demux.stop_feed = dvb_usb_stop_feed;
adap->demux.write_to_decoder = NULL;
ret = dvb_dmx_init(&adap->demux);
if (ret < 0) {
dev_err(&d->udev->dev, "%s: dvb_dmx_init() failed=%d\n",
KBUILD_MODNAME, ret);
goto err_dvb_dmx_init;
}
adap->dmxdev.filternum = adap->demux.filternum;
adap->dmxdev.demux = &adap->demux.dmx;
adap->dmxdev.capabilities = 0;
ret = dvb_dmxdev_init(&adap->dmxdev, &adap->dvb_adap);
if (ret < 0) {
dev_err(&d->udev->dev, "%s: dvb_dmxdev_init() failed=%d\n",
KBUILD_MODNAME, ret);
goto err_dvb_dmxdev_init;
}
ret = dvb_net_init(&adap->dvb_adap, &adap->dvb_net, &adap->demux.dmx);
if (ret < 0) {
dev_err(&d->udev->dev, "%s: dvb_net_init() failed=%d\n",
KBUILD_MODNAME, ret);
goto err_dvb_net_init;
}
return 0;
err_dvb_net_init:
dvb_dmxdev_release(&adap->dmxdev);
err_dvb_dmxdev_init:
dvb_dmx_release(&adap->demux);
err_dvb_dmx_init:
dvb_usbv2_media_device_unregister(adap);
dvb_unregister_adapter(&adap->dvb_adap);
err_dvb_register_adapter:
adap->dvb_adap.priv = NULL;
return ret;
}
static int dvb_usbv2_adapter_dvb_exit(struct dvb_usb_adapter *adap)
{
dev_dbg(&adap_to_d(adap)->udev->dev, "%s: adap=%d\n", __func__,
adap->id);
if (adap->dvb_adap.priv) {
dvb_net_release(&adap->dvb_net);
adap->demux.dmx.close(&adap->demux.dmx);
dvb_dmxdev_release(&adap->dmxdev);
dvb_dmx_release(&adap->demux);
dvb_usbv2_media_device_unregister(adap);
dvb_unregister_adapter(&adap->dvb_adap);
}
return 0;
}
static int dvb_usbv2_device_power_ctrl(struct dvb_usb_device *d, int onoff)
{
int ret;
if (onoff)
d->powered++;
else
d->powered--;
if (d->powered == 0 || (onoff && d->powered == 1)) {
/* when switching from 1 to 0 or from 0 to 1 */
dev_dbg(&d->udev->dev, "%s: power=%d\n", __func__, onoff);
if (d->props->power_ctrl) {
ret = d->props->power_ctrl(d, onoff);
if (ret < 0)
goto err;
}
}
return 0;
err:
dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
static int dvb_usb_fe_init(struct dvb_frontend *fe)
{
int ret;
struct dvb_usb_adapter *adap = fe->dvb->priv;
struct dvb_usb_device *d = adap_to_d(adap);
dev_dbg(&d->udev->dev, "%s: adap=%d fe=%d\n", __func__, adap->id,
fe->id);
if (!adap->suspend_resume_active) {
adap->active_fe = fe->id;
set_bit(ADAP_INIT, &adap->state_bits);
}
ret = dvb_usbv2_device_power_ctrl(d, 1);
if (ret < 0)
goto err;
if (d->props->frontend_ctrl) {
ret = d->props->frontend_ctrl(fe, 1);
if (ret < 0)
goto err;
}
if (adap->fe_init[fe->id]) {
ret = adap->fe_init[fe->id](fe);
if (ret < 0)
goto err;
}
err:
if (!adap->suspend_resume_active) {
clear_bit(ADAP_INIT, &adap->state_bits);
smp_mb__after_atomic();
wake_up_bit(&adap->state_bits, ADAP_INIT);
}
dev_dbg(&d->udev->dev, "%s: ret=%d\n", __func__, ret);
return ret;
}
static int dvb_usb_fe_sleep(struct dvb_frontend *fe)
{
int ret;
struct dvb_usb_adapter *adap = fe->dvb->priv;
struct dvb_usb_device *d = adap_to_d(adap);
dev_dbg(&d->udev->dev, "%s: adap=%d fe=%d\n", __func__, adap->id,
fe->id);
if (!adap->suspend_resume_active) {
set_bit(ADAP_SLEEP, &adap->state_bits);
wait_on_bit(&adap->state_bits, ADAP_STREAMING,
TASK_UNINTERRUPTIBLE);
}
if (adap->fe_sleep[fe->id]) {
ret = adap->fe_sleep[fe->id](fe);
if (ret < 0)
goto err;
}
if (d->props->frontend_ctrl) {
ret = d->props->frontend_ctrl(fe, 0);
if (ret < 0)
goto err;
}
ret = dvb_usbv2_device_power_ctrl(d, 0);
if (ret < 0)
goto err;
err:
if (!adap->suspend_resume_active) {
adap->active_fe = -1;
clear_bit(ADAP_SLEEP, &adap->state_bits);
smp_mb__after_atomic();
wake_up_bit(&adap->state_bits, ADAP_SLEEP);
}
dev_dbg(&d->udev->dev, "%s: ret=%d\n", __func__, ret);
return ret;
}
static int dvb_usbv2_adapter_frontend_init(struct dvb_usb_adapter *adap)
{
int ret, i, count_registered = 0;
struct dvb_usb_device *d = adap_to_d(adap);
dev_dbg(&d->udev->dev, "%s: adap=%d\n", __func__, adap->id);
memset(adap->fe, 0, sizeof(adap->fe));
adap->active_fe = -1;
if (d->props->frontend_attach) {
ret = d->props->frontend_attach(adap);
if (ret < 0) {
dev_dbg(&d->udev->dev,
"%s: frontend_attach() failed=%d\n",
__func__, ret);
goto err_dvb_frontend_detach;
}
} else {
dev_dbg(&d->udev->dev, "%s: frontend_attach() do not exists\n",
__func__);
ret = 0;
goto err;
}
for (i = 0; i < MAX_NO_OF_FE_PER_ADAP && adap->fe[i]; i++) {
adap->fe[i]->id = i;
/* re-assign sleep and wakeup functions */
adap->fe_init[i] = adap->fe[i]->ops.init;
adap->fe[i]->ops.init = dvb_usb_fe_init;
adap->fe_sleep[i] = adap->fe[i]->ops.sleep;
adap->fe[i]->ops.sleep = dvb_usb_fe_sleep;
ret = dvb_register_frontend(&adap->dvb_adap, adap->fe[i]);
if (ret < 0) {
dev_err(&d->udev->dev,
"%s: frontend%d registration failed\n",
KBUILD_MODNAME, i);
goto err_dvb_unregister_frontend;
}
count_registered++;
}
if (d->props->tuner_attach) {
ret = d->props->tuner_attach(adap);
if (ret < 0) {
dev_dbg(&d->udev->dev, "%s: tuner_attach() failed=%d\n",
__func__, ret);
goto err_dvb_unregister_frontend;
}
}
dvb_create_media_graph(&adap->dvb_adap);
return 0;
err_dvb_unregister_frontend:
for (i = count_registered - 1; i >= 0; i--)
dvb_unregister_frontend(adap->fe[i]);
err_dvb_frontend_detach:
for (i = MAX_NO_OF_FE_PER_ADAP - 1; i >= 0; i--) {
if (adap->fe[i]) {
dvb_frontend_detach(adap->fe[i]);
adap->fe[i] = NULL;
}
}
err:
dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
static int dvb_usbv2_adapter_frontend_exit(struct dvb_usb_adapter *adap)
{
int ret, i;
struct dvb_usb_device *d = adap_to_d(adap);
dev_dbg(&d->udev->dev, "%s: adap=%d\n", __func__, adap->id);
for (i = MAX_NO_OF_FE_PER_ADAP - 1; i >= 0; i--) {
if (adap->fe[i]) {
dvb_unregister_frontend(adap->fe[i]);
dvb_frontend_detach(adap->fe[i]);
}
}
if (d->props->tuner_detach) {
ret = d->props->tuner_detach(adap);
if (ret < 0) {
dev_dbg(&d->udev->dev, "%s: tuner_detach() failed=%d\n",
__func__, ret);
}
}
if (d->props->frontend_detach) {
ret = d->props->frontend_detach(adap);
if (ret < 0) {
dev_dbg(&d->udev->dev,
"%s: frontend_detach() failed=%d\n",
__func__, ret);
}
}
return 0;
}
static int dvb_usbv2_adapter_init(struct dvb_usb_device *d)
{
struct dvb_usb_adapter *adap;
int ret, i, adapter_count;
/* resolve adapter count */
adapter_count = d->props->num_adapters;
if (d->props->get_adapter_count) {
ret = d->props->get_adapter_count(d);
if (ret < 0)
goto err;
adapter_count = ret;
}
for (i = 0; i < adapter_count; i++) {
adap = &d->adapter[i];
adap->id = i;
adap->props = &d->props->adapter[i];
/* speed - when running at FULL speed we need a HW PID filter */
if (d->udev->speed == USB_SPEED_FULL &&
!(adap->props->caps & DVB_USB_ADAP_HAS_PID_FILTER)) {
dev_err(&d->udev->dev,
"%s: this USB2.0 device cannot be run on a USB1.1 port (it lacks a hardware PID filter)\n",
KBUILD_MODNAME);
ret = -ENODEV;
goto err;
} else if ((d->udev->speed == USB_SPEED_FULL &&
adap->props->caps & DVB_USB_ADAP_HAS_PID_FILTER) ||
(adap->props->caps & DVB_USB_ADAP_NEED_PID_FILTERING)) {
dev_info(&d->udev->dev,
"%s: will use the device's hardware PID filter (table count: %d)\n",
KBUILD_MODNAME,
adap->props->pid_filter_count);
adap->pid_filtering = 1;
adap->max_feed_count = adap->props->pid_filter_count;
} else {
dev_info(&d->udev->dev,
"%s: will pass the complete MPEG2 transport stream to the software demuxer\n",
KBUILD_MODNAME);
adap->pid_filtering = 0;
adap->max_feed_count = 255;
}
if (!adap->pid_filtering && dvb_usb_force_pid_filter_usage &&
adap->props->caps & DVB_USB_ADAP_HAS_PID_FILTER) {
dev_info(&d->udev->dev,
"%s: PID filter enabled by module option\n",
KBUILD_MODNAME);
adap->pid_filtering = 1;
adap->max_feed_count = adap->props->pid_filter_count;
}
ret = dvb_usbv2_adapter_stream_init(adap);
if (ret)
goto err;
ret = dvb_usbv2_adapter_dvb_init(adap);
if (ret)
goto err;
ret = dvb_usbv2_adapter_frontend_init(adap);
if (ret)
goto err;
/* use exclusive FE lock if there is multiple shared FEs */
if (adap->fe[1])
adap->dvb_adap.mfe_shared = 1;
}
return 0;
err:
dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
static int dvb_usbv2_adapter_exit(struct dvb_usb_device *d)
{
int i;
dev_dbg(&d->udev->dev, "%s:\n", __func__);
for (i = MAX_NO_OF_ADAPTER_PER_DEVICE - 1; i >= 0; i--) {
if (d->adapter[i].props) {
dvb_usbv2_adapter_dvb_exit(&d->adapter[i]);
dvb_usbv2_adapter_stream_exit(&d->adapter[i]);
dvb_usbv2_adapter_frontend_exit(&d->adapter[i]);
}
}
return 0;
}
/* general initialization functions */
static int dvb_usbv2_exit(struct dvb_usb_device *d)
{
dev_dbg(&d->udev->dev, "%s:\n", __func__);
dvb_usbv2_remote_exit(d);
dvb_usbv2_adapter_exit(d);
dvb_usbv2_i2c_exit(d);
kfree(d->priv);
kfree(d);
return 0;
}
static int dvb_usbv2_init(struct dvb_usb_device *d)
{
int ret;
dev_dbg(&d->udev->dev, "%s:\n", __func__);
dvb_usbv2_device_power_ctrl(d, 1);
if (d->props->read_config) {
ret = d->props->read_config(d);
if (ret < 0)
goto err;
}
ret = dvb_usbv2_i2c_init(d);
if (ret < 0)
goto err;
ret = dvb_usbv2_adapter_init(d);
if (ret < 0)
goto err;
if (d->props->init) {
ret = d->props->init(d);
if (ret < 0)
goto err;
}
ret = dvb_usbv2_remote_init(d);
if (ret < 0)
goto err;
dvb_usbv2_device_power_ctrl(d, 0);
return 0;
err:
dvb_usbv2_device_power_ctrl(d, 0);
dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
int dvb_usbv2_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
int ret;
struct dvb_usb_device *d;
struct usb_device *udev = interface_to_usbdev(intf);
struct dvb_usb_driver_info *driver_info =
(struct dvb_usb_driver_info *) id->driver_info;
dev_dbg(&udev->dev, "%s: bInterfaceNumber=%d\n", __func__,
intf->cur_altsetting->desc.bInterfaceNumber);
if (!id->driver_info) {
dev_err(&udev->dev, "%s: driver_info failed\n", KBUILD_MODNAME);
ret = -ENODEV;
goto err;
}
d = kzalloc(sizeof(struct dvb_usb_device), GFP_KERNEL);
if (!d) {
dev_err(&udev->dev, "%s: kzalloc() failed\n", KBUILD_MODNAME);
ret = -ENOMEM;
goto err;
}
d->intf = intf;
d->name = driver_info->name;
d->rc_map = driver_info->rc_map;
d->udev = udev;
d->props = driver_info->props;
if (intf->cur_altsetting->desc.bInterfaceNumber !=
d->props->bInterfaceNumber) {
ret = -ENODEV;
goto err_free_all;
}
mutex_init(&d->usb_mutex);
mutex_init(&d->i2c_mutex);
if (d->props->size_of_priv) {
d->priv = kzalloc(d->props->size_of_priv, GFP_KERNEL);
if (!d->priv) {
dev_err(&d->udev->dev, "%s: kzalloc() failed\n",
KBUILD_MODNAME);
ret = -ENOMEM;
goto err_free_all;
}
}
if (d->props->identify_state) {
const char *name = NULL;
ret = d->props->identify_state(d, &name);
if (ret == 0) {
;
} else if (ret == COLD) {
dev_info(&d->udev->dev,
"%s: found a '%s' in cold state\n",
KBUILD_MODNAME, d->name);
if (!name)
name = d->props->firmware;
ret = dvb_usbv2_download_firmware(d, name);
if (ret == 0) {
/* device is warm, continue initialization */
;
} else if (ret == RECONNECTS_USB) {
/*
* USB core will call disconnect() and then
* probe() as device reconnects itself from the
* USB bus. disconnect() will release all driver
* resources and probe() is called for 'new'
* device. As 'new' device is warm we should
* never go here again.
*/
goto exit;
} else {
goto err_free_all;
}
} else {
goto err_free_all;
}
}
dev_info(&d->udev->dev, "%s: found a '%s' in warm state\n",
KBUILD_MODNAME, d->name);
ret = dvb_usbv2_init(d);
if (ret < 0)
goto err_free_all;
dev_info(&d->udev->dev,
"%s: '%s' successfully initialized and connected\n",
KBUILD_MODNAME, d->name);
exit:
usb_set_intfdata(intf, d);
return 0;
err_free_all:
dvb_usbv2_exit(d);
err:
dev_dbg(&udev->dev, "%s: failed=%d\n", __func__, ret);
return ret;
}
EXPORT_SYMBOL(dvb_usbv2_probe);
void dvb_usbv2_disconnect(struct usb_interface *intf)
{
struct dvb_usb_device *d = usb_get_intfdata(intf);
const char *name = d->name;
struct device dev = d->udev->dev;
dev_dbg(&d->udev->dev, "%s: bInterfaceNumber=%d\n", __func__,
intf->cur_altsetting->desc.bInterfaceNumber);
if (d->props->exit)
d->props->exit(d);
dvb_usbv2_exit(d);
dev_info(&dev, "%s: '%s' successfully deinitialized and disconnected\n",
KBUILD_MODNAME, name);
}
EXPORT_SYMBOL(dvb_usbv2_disconnect);
int dvb_usbv2_suspend(struct usb_interface *intf, pm_message_t msg)
{
struct dvb_usb_device *d = usb_get_intfdata(intf);
int ret = 0, i, active_fe;
struct dvb_frontend *fe;
dev_dbg(&d->udev->dev, "%s:\n", __func__);
/* stop remote controller poll */
if (d->rc_polling_active)
cancel_delayed_work_sync(&d->rc_query_work);
for (i = MAX_NO_OF_ADAPTER_PER_DEVICE - 1; i >= 0; i--) {
active_fe = d->adapter[i].active_fe;
if (d->adapter[i].dvb_adap.priv && active_fe != -1) {
fe = d->adapter[i].fe[active_fe];
d->adapter[i].suspend_resume_active = true;
if (d->props->streaming_ctrl)
d->props->streaming_ctrl(fe, 0);
/* stop usb streaming */
usb_urb_killv2(&d->adapter[i].stream);
ret = dvb_frontend_suspend(fe);
}
}
return ret;
}
EXPORT_SYMBOL(dvb_usbv2_suspend);
static int dvb_usbv2_resume_common(struct dvb_usb_device *d)
{
int ret = 0, i, active_fe;
struct dvb_frontend *fe;
dev_dbg(&d->udev->dev, "%s:\n", __func__);
for (i = 0; i < MAX_NO_OF_ADAPTER_PER_DEVICE; i++) {
active_fe = d->adapter[i].active_fe;
if (d->adapter[i].dvb_adap.priv && active_fe != -1) {
fe = d->adapter[i].fe[active_fe];
ret = dvb_frontend_resume(fe);
/* resume usb streaming */
usb_urb_submitv2(&d->adapter[i].stream, NULL);
if (d->props->streaming_ctrl)
d->props->streaming_ctrl(fe, 1);
d->adapter[i].suspend_resume_active = false;
}
}
/* start remote controller poll */
if (d->rc_polling_active)
schedule_delayed_work(&d->rc_query_work,
msecs_to_jiffies(d->rc.interval));
return ret;
}
int dvb_usbv2_resume(struct usb_interface *intf)
{
struct dvb_usb_device *d = usb_get_intfdata(intf);
dev_dbg(&d->udev->dev, "%s:\n", __func__);
return dvb_usbv2_resume_common(d);
}
EXPORT_SYMBOL(dvb_usbv2_resume);
int dvb_usbv2_reset_resume(struct usb_interface *intf)
{
struct dvb_usb_device *d = usb_get_intfdata(intf);
int ret;
dev_dbg(&d->udev->dev, "%s:\n", __func__);
dvb_usbv2_device_power_ctrl(d, 1);
if (d->props->init)
d->props->init(d);
ret = dvb_usbv2_resume_common(d);
dvb_usbv2_device_power_ctrl(d, 0);
return ret;
}
EXPORT_SYMBOL(dvb_usbv2_reset_resume);
MODULE_VERSION("2.0");
MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
MODULE_DESCRIPTION("DVB USB common");
MODULE_LICENSE("GPL");
| gpl-2.0 |
loxdegio/linux-zenx | arch/mips/dec/platform.c | 2017 | 1120 | /*
* DEC platform devices.
*
* Copyright (c) 2014 Maciej W. Rozycki
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/mc146818rtc.h>
#include <linux/platform_device.h>
static struct resource dec_rtc_resources[] = {
{
.name = "rtc",
.flags = IORESOURCE_MEM,
},
};
static struct cmos_rtc_board_info dec_rtc_info = {
.flags = CMOS_RTC_FLAGS_NOFREQ,
.address_space = 64,
};
static struct platform_device dec_rtc_device = {
.name = "rtc_cmos",
.id = PLATFORM_DEVID_NONE,
.dev.platform_data = &dec_rtc_info,
.resource = dec_rtc_resources,
.num_resources = ARRAY_SIZE(dec_rtc_resources),
};
static int __init dec_add_devices(void)
{
dec_rtc_resources[0].start = RTC_PORT(0);
dec_rtc_resources[0].end = RTC_PORT(0) + dec_kn_slot_size - 1;
return platform_device_register(&dec_rtc_device);
}
device_initcall(dec_add_devices);
| gpl-2.0 |
robacklin/linux-2.6.39.4 | drivers/infiniband/hw/mthca/mthca_main.c | 2529 | 37418 | /*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/gfp.h>
#include "mthca_dev.h"
#include "mthca_config_reg.h"
#include "mthca_cmd.h"
#include "mthca_profile.h"
#include "mthca_memfree.h"
#include "mthca_wqe.h"
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("Mellanox InfiniBand HCA low-level driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
#ifdef CONFIG_INFINIBAND_MTHCA_DEBUG
int mthca_debug_level = 0;
module_param_named(debug_level, mthca_debug_level, int, 0644);
MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
#endif /* CONFIG_INFINIBAND_MTHCA_DEBUG */
#ifdef CONFIG_PCI_MSI
static int msi_x = 1;
module_param(msi_x, int, 0444);
MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
#else /* CONFIG_PCI_MSI */
#define msi_x (0)
#endif /* CONFIG_PCI_MSI */
static int tune_pci = 0;
module_param(tune_pci, int, 0444);
MODULE_PARM_DESC(tune_pci, "increase PCI burst from the default set by BIOS if nonzero");
DEFINE_MUTEX(mthca_device_mutex);
#define MTHCA_DEFAULT_NUM_QP (1 << 16)
#define MTHCA_DEFAULT_RDB_PER_QP (1 << 2)
#define MTHCA_DEFAULT_NUM_CQ (1 << 16)
#define MTHCA_DEFAULT_NUM_MCG (1 << 13)
#define MTHCA_DEFAULT_NUM_MPT (1 << 17)
#define MTHCA_DEFAULT_NUM_MTT (1 << 20)
#define MTHCA_DEFAULT_NUM_UDAV (1 << 15)
#define MTHCA_DEFAULT_NUM_RESERVED_MTTS (1 << 18)
#define MTHCA_DEFAULT_NUM_UARC_SIZE (1 << 18)
static struct mthca_profile hca_profile = {
.num_qp = MTHCA_DEFAULT_NUM_QP,
.rdb_per_qp = MTHCA_DEFAULT_RDB_PER_QP,
.num_cq = MTHCA_DEFAULT_NUM_CQ,
.num_mcg = MTHCA_DEFAULT_NUM_MCG,
.num_mpt = MTHCA_DEFAULT_NUM_MPT,
.num_mtt = MTHCA_DEFAULT_NUM_MTT,
.num_udav = MTHCA_DEFAULT_NUM_UDAV, /* Tavor only */
.fmr_reserved_mtts = MTHCA_DEFAULT_NUM_RESERVED_MTTS, /* Tavor only */
.uarc_size = MTHCA_DEFAULT_NUM_UARC_SIZE, /* Arbel only */
};
module_param_named(num_qp, hca_profile.num_qp, int, 0444);
MODULE_PARM_DESC(num_qp, "maximum number of QPs per HCA");
module_param_named(rdb_per_qp, hca_profile.rdb_per_qp, int, 0444);
MODULE_PARM_DESC(rdb_per_qp, "number of RDB buffers per QP");
module_param_named(num_cq, hca_profile.num_cq, int, 0444);
MODULE_PARM_DESC(num_cq, "maximum number of CQs per HCA");
module_param_named(num_mcg, hca_profile.num_mcg, int, 0444);
MODULE_PARM_DESC(num_mcg, "maximum number of multicast groups per HCA");
module_param_named(num_mpt, hca_profile.num_mpt, int, 0444);
MODULE_PARM_DESC(num_mpt,
"maximum number of memory protection table entries per HCA");
module_param_named(num_mtt, hca_profile.num_mtt, int, 0444);
MODULE_PARM_DESC(num_mtt,
"maximum number of memory translation table segments per HCA");
module_param_named(num_udav, hca_profile.num_udav, int, 0444);
MODULE_PARM_DESC(num_udav, "maximum number of UD address vectors per HCA");
module_param_named(fmr_reserved_mtts, hca_profile.fmr_reserved_mtts, int, 0444);
MODULE_PARM_DESC(fmr_reserved_mtts,
"number of memory translation table segments reserved for FMR");
static int log_mtts_per_seg = ilog2(MTHCA_MTT_SEG_SIZE / 8);
module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)");
static char mthca_version[] __devinitdata =
DRV_NAME ": Mellanox InfiniBand HCA driver v"
DRV_VERSION " (" DRV_RELDATE ")\n";
static int mthca_tune_pci(struct mthca_dev *mdev)
{
if (!tune_pci)
return 0;
/* First try to max out Read Byte Count */
if (pci_find_capability(mdev->pdev, PCI_CAP_ID_PCIX)) {
if (pcix_set_mmrbc(mdev->pdev, pcix_get_max_mmrbc(mdev->pdev))) {
mthca_err(mdev, "Couldn't set PCI-X max read count, "
"aborting.\n");
return -ENODEV;
}
} else if (!(mdev->mthca_flags & MTHCA_FLAG_PCIE))
mthca_info(mdev, "No PCI-X capability, not setting RBC.\n");
if (pci_find_capability(mdev->pdev, PCI_CAP_ID_EXP)) {
if (pcie_set_readrq(mdev->pdev, 4096)) {
mthca_err(mdev, "Couldn't write PCI Express read request, "
"aborting.\n");
return -ENODEV;
}
} else if (mdev->mthca_flags & MTHCA_FLAG_PCIE)
mthca_info(mdev, "No PCI Express capability, "
"not setting Max Read Request Size.\n");
return 0;
}
static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim)
{
int err;
u8 status;
mdev->limits.mtt_seg_size = (1 << log_mtts_per_seg) * 8;
err = mthca_QUERY_DEV_LIM(mdev, dev_lim, &status);
if (err) {
mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n");
return err;
}
if (status) {
mthca_err(mdev, "QUERY_DEV_LIM returned status 0x%02x, "
"aborting.\n", status);
return -EINVAL;
}
if (dev_lim->min_page_sz > PAGE_SIZE) {
mthca_err(mdev, "HCA minimum page size of %d bigger than "
"kernel PAGE_SIZE of %ld, aborting.\n",
dev_lim->min_page_sz, PAGE_SIZE);
return -ENODEV;
}
if (dev_lim->num_ports > MTHCA_MAX_PORTS) {
mthca_err(mdev, "HCA has %d ports, but we only support %d, "
"aborting.\n",
dev_lim->num_ports, MTHCA_MAX_PORTS);
return -ENODEV;
}
if (dev_lim->uar_size > pci_resource_len(mdev->pdev, 2)) {
mthca_err(mdev, "HCA reported UAR size of 0x%x bigger than "
"PCI resource 2 size of 0x%llx, aborting.\n",
dev_lim->uar_size,
(unsigned long long)pci_resource_len(mdev->pdev, 2));
return -ENODEV;
}
mdev->limits.num_ports = dev_lim->num_ports;
mdev->limits.vl_cap = dev_lim->max_vl;
mdev->limits.mtu_cap = dev_lim->max_mtu;
mdev->limits.gid_table_len = dev_lim->max_gids;
mdev->limits.pkey_table_len = dev_lim->max_pkeys;
mdev->limits.local_ca_ack_delay = dev_lim->local_ca_ack_delay;
/*
* Need to allow for worst case send WQE overhead and check
* whether max_desc_sz imposes a lower limit than max_sg; UD
* send has the biggest overhead.
*/
mdev->limits.max_sg = min_t(int, dev_lim->max_sg,
(dev_lim->max_desc_sz -
sizeof (struct mthca_next_seg) -
(mthca_is_memfree(mdev) ?
sizeof (struct mthca_arbel_ud_seg) :
sizeof (struct mthca_tavor_ud_seg))) /
sizeof (struct mthca_data_seg));
mdev->limits.max_wqes = dev_lim->max_qp_sz;
mdev->limits.max_qp_init_rdma = dev_lim->max_requester_per_qp;
mdev->limits.reserved_qps = dev_lim->reserved_qps;
mdev->limits.max_srq_wqes = dev_lim->max_srq_sz;
mdev->limits.reserved_srqs = dev_lim->reserved_srqs;
mdev->limits.reserved_eecs = dev_lim->reserved_eecs;
mdev->limits.max_desc_sz = dev_lim->max_desc_sz;
mdev->limits.max_srq_sge = mthca_max_srq_sge(mdev);
/*
* Subtract 1 from the limit because we need to allocate a
* spare CQE so the HCA HW can tell the difference between an
* empty CQ and a full CQ.
*/
mdev->limits.max_cqes = dev_lim->max_cq_sz - 1;
mdev->limits.reserved_cqs = dev_lim->reserved_cqs;
mdev->limits.reserved_eqs = dev_lim->reserved_eqs;
mdev->limits.reserved_mtts = dev_lim->reserved_mtts;
mdev->limits.reserved_mrws = dev_lim->reserved_mrws;
mdev->limits.reserved_uars = dev_lim->reserved_uars;
mdev->limits.reserved_pds = dev_lim->reserved_pds;
mdev->limits.port_width_cap = dev_lim->max_port_width;
mdev->limits.page_size_cap = ~(u32) (dev_lim->min_page_sz - 1);
mdev->limits.flags = dev_lim->flags;
/*
* For old FW that doesn't return static rate support, use a
* value of 0x3 (only static rate values of 0 or 1 are handled),
* except on Sinai, where even old FW can handle static rate
* values of 2 and 3.
*/
if (dev_lim->stat_rate_support)
mdev->limits.stat_rate_support = dev_lim->stat_rate_support;
else if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
mdev->limits.stat_rate_support = 0xf;
else
mdev->limits.stat_rate_support = 0x3;
/* IB_DEVICE_RESIZE_MAX_WR not supported by driver.
May be doable since hardware supports it for SRQ.
IB_DEVICE_N_NOTIFY_CQ is supported by hardware but not by driver.
IB_DEVICE_SRQ_RESIZE is supported by hardware but SRQ is not
supported by driver. */
mdev->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
IB_DEVICE_PORT_ACTIVE_EVENT |
IB_DEVICE_SYS_IMAGE_GUID |
IB_DEVICE_RC_RNR_NAK_GEN;
if (dev_lim->flags & DEV_LIM_FLAG_BAD_PKEY_CNTR)
mdev->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
if (dev_lim->flags & DEV_LIM_FLAG_BAD_QKEY_CNTR)
mdev->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
if (dev_lim->flags & DEV_LIM_FLAG_RAW_MULTI)
mdev->device_cap_flags |= IB_DEVICE_RAW_MULTI;
if (dev_lim->flags & DEV_LIM_FLAG_AUTO_PATH_MIG)
mdev->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
if (dev_lim->flags & DEV_LIM_FLAG_UD_AV_PORT_ENFORCE)
mdev->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
if (dev_lim->flags & DEV_LIM_FLAG_SRQ)
mdev->mthca_flags |= MTHCA_FLAG_SRQ;
if (mthca_is_memfree(mdev))
if (dev_lim->flags & DEV_LIM_FLAG_IPOIB_CSUM)
mdev->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
return 0;
}
static int mthca_init_tavor(struct mthca_dev *mdev)
{
s64 size;
u8 status;
int err;
struct mthca_dev_lim dev_lim;
struct mthca_profile profile;
struct mthca_init_hca_param init_hca;
err = mthca_SYS_EN(mdev, &status);
if (err) {
mthca_err(mdev, "SYS_EN command failed, aborting.\n");
return err;
}
if (status) {
mthca_err(mdev, "SYS_EN returned status 0x%02x, "
"aborting.\n", status);
return -EINVAL;
}
err = mthca_QUERY_FW(mdev, &status);
if (err) {
mthca_err(mdev, "QUERY_FW command failed, aborting.\n");
goto err_disable;
}
if (status) {
mthca_err(mdev, "QUERY_FW returned status 0x%02x, "
"aborting.\n", status);
err = -EINVAL;
goto err_disable;
}
err = mthca_QUERY_DDR(mdev, &status);
if (err) {
mthca_err(mdev, "QUERY_DDR command failed, aborting.\n");
goto err_disable;
}
if (status) {
mthca_err(mdev, "QUERY_DDR returned status 0x%02x, "
"aborting.\n", status);
err = -EINVAL;
goto err_disable;
}
err = mthca_dev_lim(mdev, &dev_lim);
if (err) {
mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n");
goto err_disable;
}
profile = hca_profile;
profile.num_uar = dev_lim.uar_size / PAGE_SIZE;
profile.uarc_size = 0;
if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
profile.num_srq = dev_lim.max_srqs;
size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
if (size < 0) {
err = size;
goto err_disable;
}
err = mthca_INIT_HCA(mdev, &init_hca, &status);
if (err) {
mthca_err(mdev, "INIT_HCA command failed, aborting.\n");
goto err_disable;
}
if (status) {
mthca_err(mdev, "INIT_HCA returned status 0x%02x, "
"aborting.\n", status);
err = -EINVAL;
goto err_disable;
}
return 0;
err_disable:
mthca_SYS_DIS(mdev, &status);
return err;
}
static int mthca_load_fw(struct mthca_dev *mdev)
{
u8 status;
int err;
/* FIXME: use HCA-attached memory for FW if present */
mdev->fw.arbel.fw_icm =
mthca_alloc_icm(mdev, mdev->fw.arbel.fw_pages,
GFP_HIGHUSER | __GFP_NOWARN, 0);
if (!mdev->fw.arbel.fw_icm) {
mthca_err(mdev, "Couldn't allocate FW area, aborting.\n");
return -ENOMEM;
}
err = mthca_MAP_FA(mdev, mdev->fw.arbel.fw_icm, &status);
if (err) {
mthca_err(mdev, "MAP_FA command failed, aborting.\n");
goto err_free;
}
if (status) {
mthca_err(mdev, "MAP_FA returned status 0x%02x, aborting.\n", status);
err = -EINVAL;
goto err_free;
}
err = mthca_RUN_FW(mdev, &status);
if (err) {
mthca_err(mdev, "RUN_FW command failed, aborting.\n");
goto err_unmap_fa;
}
if (status) {
mthca_err(mdev, "RUN_FW returned status 0x%02x, aborting.\n", status);
err = -EINVAL;
goto err_unmap_fa;
}
return 0;
err_unmap_fa:
mthca_UNMAP_FA(mdev, &status);
err_free:
mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);
return err;
}
static int mthca_init_icm(struct mthca_dev *mdev,
struct mthca_dev_lim *dev_lim,
struct mthca_init_hca_param *init_hca,
u64 icm_size)
{
u64 aux_pages;
u8 status;
int err;
err = mthca_SET_ICM_SIZE(mdev, icm_size, &aux_pages, &status);
if (err) {
mthca_err(mdev, "SET_ICM_SIZE command failed, aborting.\n");
return err;
}
if (status) {
mthca_err(mdev, "SET_ICM_SIZE returned status 0x%02x, "
"aborting.\n", status);
return -EINVAL;
}
mthca_dbg(mdev, "%lld KB of HCA context requires %lld KB aux memory.\n",
(unsigned long long) icm_size >> 10,
(unsigned long long) aux_pages << 2);
mdev->fw.arbel.aux_icm = mthca_alloc_icm(mdev, aux_pages,
GFP_HIGHUSER | __GFP_NOWARN, 0);
if (!mdev->fw.arbel.aux_icm) {
mthca_err(mdev, "Couldn't allocate aux memory, aborting.\n");
return -ENOMEM;
}
err = mthca_MAP_ICM_AUX(mdev, mdev->fw.arbel.aux_icm, &status);
if (err) {
mthca_err(mdev, "MAP_ICM_AUX command failed, aborting.\n");
goto err_free_aux;
}
if (status) {
mthca_err(mdev, "MAP_ICM_AUX returned status 0x%02x, aborting.\n", status);
err = -EINVAL;
goto err_free_aux;
}
err = mthca_map_eq_icm(mdev, init_hca->eqc_base);
if (err) {
mthca_err(mdev, "Failed to map EQ context memory, aborting.\n");
goto err_unmap_aux;
}
/* CPU writes to non-reserved MTTs, while HCA might DMA to reserved mtts */
mdev->limits.reserved_mtts = ALIGN(mdev->limits.reserved_mtts * mdev->limits.mtt_seg_size,
dma_get_cache_alignment()) / mdev->limits.mtt_seg_size;
mdev->mr_table.mtt_table = mthca_alloc_icm_table(mdev, init_hca->mtt_base,
mdev->limits.mtt_seg_size,
mdev->limits.num_mtt_segs,
mdev->limits.reserved_mtts,
1, 0);
if (!mdev->mr_table.mtt_table) {
mthca_err(mdev, "Failed to map MTT context memory, aborting.\n");
err = -ENOMEM;
goto err_unmap_eq;
}
mdev->mr_table.mpt_table = mthca_alloc_icm_table(mdev, init_hca->mpt_base,
dev_lim->mpt_entry_sz,
mdev->limits.num_mpts,
mdev->limits.reserved_mrws,
1, 1);
if (!mdev->mr_table.mpt_table) {
mthca_err(mdev, "Failed to map MPT context memory, aborting.\n");
err = -ENOMEM;
goto err_unmap_mtt;
}
mdev->qp_table.qp_table = mthca_alloc_icm_table(mdev, init_hca->qpc_base,
dev_lim->qpc_entry_sz,
mdev->limits.num_qps,
mdev->limits.reserved_qps,
0, 0);
if (!mdev->qp_table.qp_table) {
mthca_err(mdev, "Failed to map QP context memory, aborting.\n");
err = -ENOMEM;
goto err_unmap_mpt;
}
mdev->qp_table.eqp_table = mthca_alloc_icm_table(mdev, init_hca->eqpc_base,
dev_lim->eqpc_entry_sz,
mdev->limits.num_qps,
mdev->limits.reserved_qps,
0, 0);
if (!mdev->qp_table.eqp_table) {
mthca_err(mdev, "Failed to map EQP context memory, aborting.\n");
err = -ENOMEM;
goto err_unmap_qp;
}
mdev->qp_table.rdb_table = mthca_alloc_icm_table(mdev, init_hca->rdb_base,
MTHCA_RDB_ENTRY_SIZE,
mdev->limits.num_qps <<
mdev->qp_table.rdb_shift, 0,
0, 0);
if (!mdev->qp_table.rdb_table) {
mthca_err(mdev, "Failed to map RDB context memory, aborting\n");
err = -ENOMEM;
goto err_unmap_eqp;
}
mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base,
dev_lim->cqc_entry_sz,
mdev->limits.num_cqs,
mdev->limits.reserved_cqs,
0, 0);
if (!mdev->cq_table.table) {
mthca_err(mdev, "Failed to map CQ context memory, aborting.\n");
err = -ENOMEM;
goto err_unmap_rdb;
}
if (mdev->mthca_flags & MTHCA_FLAG_SRQ) {
mdev->srq_table.table =
mthca_alloc_icm_table(mdev, init_hca->srqc_base,
dev_lim->srq_entry_sz,
mdev->limits.num_srqs,
mdev->limits.reserved_srqs,
0, 0);
if (!mdev->srq_table.table) {
mthca_err(mdev, "Failed to map SRQ context memory, "
"aborting.\n");
err = -ENOMEM;
goto err_unmap_cq;
}
}
/*
* It's not strictly required, but for simplicity just map the
* whole multicast group table now. The table isn't very big
* and it's a lot easier than trying to track ref counts.
*/
mdev->mcg_table.table = mthca_alloc_icm_table(mdev, init_hca->mc_base,
MTHCA_MGM_ENTRY_SIZE,
mdev->limits.num_mgms +
mdev->limits.num_amgms,
mdev->limits.num_mgms +
mdev->limits.num_amgms,
0, 0);
if (!mdev->mcg_table.table) {
mthca_err(mdev, "Failed to map MCG context memory, aborting.\n");
err = -ENOMEM;
goto err_unmap_srq;
}
return 0;
err_unmap_srq:
if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
mthca_free_icm_table(mdev, mdev->srq_table.table);
err_unmap_cq:
mthca_free_icm_table(mdev, mdev->cq_table.table);
err_unmap_rdb:
mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
err_unmap_eqp:
mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
err_unmap_qp:
mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
err_unmap_mpt:
mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
err_unmap_mtt:
mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
err_unmap_eq:
mthca_unmap_eq_icm(mdev);
err_unmap_aux:
mthca_UNMAP_ICM_AUX(mdev, &status);
err_free_aux:
mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0);
return err;
}
static void mthca_free_icms(struct mthca_dev *mdev)
{
u8 status;
mthca_free_icm_table(mdev, mdev->mcg_table.table);
if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
mthca_free_icm_table(mdev, mdev->srq_table.table);
mthca_free_icm_table(mdev, mdev->cq_table.table);
mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
mthca_unmap_eq_icm(mdev);
mthca_UNMAP_ICM_AUX(mdev, &status);
mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0);
}
static int mthca_init_arbel(struct mthca_dev *mdev)
{
struct mthca_dev_lim dev_lim;
struct mthca_profile profile;
struct mthca_init_hca_param init_hca;
s64 icm_size;
u8 status;
int err;
err = mthca_QUERY_FW(mdev, &status);
if (err) {
mthca_err(mdev, "QUERY_FW command failed, aborting.\n");
return err;
}
if (status) {
mthca_err(mdev, "QUERY_FW returned status 0x%02x, "
"aborting.\n", status);
return -EINVAL;
}
err = mthca_ENABLE_LAM(mdev, &status);
if (err) {
mthca_err(mdev, "ENABLE_LAM command failed, aborting.\n");
return err;
}
if (status == MTHCA_CMD_STAT_LAM_NOT_PRE) {
mthca_dbg(mdev, "No HCA-attached memory (running in MemFree mode)\n");
mdev->mthca_flags |= MTHCA_FLAG_NO_LAM;
} else if (status) {
mthca_err(mdev, "ENABLE_LAM returned status 0x%02x, "
"aborting.\n", status);
return -EINVAL;
}
err = mthca_load_fw(mdev);
if (err) {
mthca_err(mdev, "Failed to start FW, aborting.\n");
goto err_disable;
}
err = mthca_dev_lim(mdev, &dev_lim);
if (err) {
mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n");
goto err_stop_fw;
}
profile = hca_profile;
profile.num_uar = dev_lim.uar_size / PAGE_SIZE;
profile.num_udav = 0;
if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
profile.num_srq = dev_lim.max_srqs;
icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
if (icm_size < 0) {
err = icm_size;
goto err_stop_fw;
}
err = mthca_init_icm(mdev, &dev_lim, &init_hca, icm_size);
if (err)
goto err_stop_fw;
err = mthca_INIT_HCA(mdev, &init_hca, &status);
if (err) {
mthca_err(mdev, "INIT_HCA command failed, aborting.\n");
goto err_free_icm;
}
if (status) {
mthca_err(mdev, "INIT_HCA returned status 0x%02x, "
"aborting.\n", status);
err = -EINVAL;
goto err_free_icm;
}
return 0;
err_free_icm:
mthca_free_icms(mdev);
err_stop_fw:
mthca_UNMAP_FA(mdev, &status);
mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);
err_disable:
if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
mthca_DISABLE_LAM(mdev, &status);
return err;
}
static void mthca_close_hca(struct mthca_dev *mdev)
{
u8 status;
mthca_CLOSE_HCA(mdev, 0, &status);
if (mthca_is_memfree(mdev)) {
mthca_free_icms(mdev);
mthca_UNMAP_FA(mdev, &status);
mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);
if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
mthca_DISABLE_LAM(mdev, &status);
} else
mthca_SYS_DIS(mdev, &status);
}
static int mthca_init_hca(struct mthca_dev *mdev)
{
u8 status;
int err;
struct mthca_adapter adapter;
if (mthca_is_memfree(mdev))
err = mthca_init_arbel(mdev);
else
err = mthca_init_tavor(mdev);
if (err)
return err;
err = mthca_QUERY_ADAPTER(mdev, &adapter, &status);
if (err) {
mthca_err(mdev, "QUERY_ADAPTER command failed, aborting.\n");
goto err_close;
}
if (status) {
mthca_err(mdev, "QUERY_ADAPTER returned status 0x%02x, "
"aborting.\n", status);
err = -EINVAL;
goto err_close;
}
mdev->eq_table.inta_pin = adapter.inta_pin;
if (!mthca_is_memfree(mdev))
mdev->rev_id = adapter.revision_id;
memcpy(mdev->board_id, adapter.board_id, sizeof mdev->board_id);
return 0;
err_close:
mthca_close_hca(mdev);
return err;
}
static int mthca_setup_hca(struct mthca_dev *dev)
{
int err;
u8 status;
MTHCA_INIT_DOORBELL_LOCK(&dev->doorbell_lock);
err = mthca_init_uar_table(dev);
if (err) {
mthca_err(dev, "Failed to initialize "
"user access region table, aborting.\n");
return err;
}
err = mthca_uar_alloc(dev, &dev->driver_uar);
if (err) {
mthca_err(dev, "Failed to allocate driver access region, "
"aborting.\n");
goto err_uar_table_free;
}
dev->kar = ioremap((phys_addr_t) dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
if (!dev->kar) {
mthca_err(dev, "Couldn't map kernel access region, "
"aborting.\n");
err = -ENOMEM;
goto err_uar_free;
}
err = mthca_init_pd_table(dev);
if (err) {
mthca_err(dev, "Failed to initialize "
"protection domain table, aborting.\n");
goto err_kar_unmap;
}
err = mthca_init_mr_table(dev);
if (err) {
mthca_err(dev, "Failed to initialize "
"memory region table, aborting.\n");
goto err_pd_table_free;
}
err = mthca_pd_alloc(dev, 1, &dev->driver_pd);
if (err) {
mthca_err(dev, "Failed to create driver PD, "
"aborting.\n");
goto err_mr_table_free;
}
err = mthca_init_eq_table(dev);
if (err) {
mthca_err(dev, "Failed to initialize "
"event queue table, aborting.\n");
goto err_pd_free;
}
err = mthca_cmd_use_events(dev);
if (err) {
mthca_err(dev, "Failed to switch to event-driven "
"firmware commands, aborting.\n");
goto err_eq_table_free;
}
err = mthca_NOP(dev, &status);
if (err || status) {
if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
mthca_warn(dev, "NOP command failed to generate interrupt "
"(IRQ %d).\n",
dev->eq_table.eq[MTHCA_EQ_CMD].msi_x_vector);
mthca_warn(dev, "Trying again with MSI-X disabled.\n");
} else {
mthca_err(dev, "NOP command failed to generate interrupt "
"(IRQ %d), aborting.\n",
dev->pdev->irq);
mthca_err(dev, "BIOS or ACPI interrupt routing problem?\n");
}
goto err_cmd_poll;
}
mthca_dbg(dev, "NOP command IRQ test passed\n");
err = mthca_init_cq_table(dev);
if (err) {
mthca_err(dev, "Failed to initialize "
"completion queue table, aborting.\n");
goto err_cmd_poll;
}
err = mthca_init_srq_table(dev);
if (err) {
mthca_err(dev, "Failed to initialize "
"shared receive queue table, aborting.\n");
goto err_cq_table_free;
}
err = mthca_init_qp_table(dev);
if (err) {
mthca_err(dev, "Failed to initialize "
"queue pair table, aborting.\n");
goto err_srq_table_free;
}
err = mthca_init_av_table(dev);
if (err) {
mthca_err(dev, "Failed to initialize "
"address vector table, aborting.\n");
goto err_qp_table_free;
}
err = mthca_init_mcg_table(dev);
if (err) {
mthca_err(dev, "Failed to initialize "
"multicast group table, aborting.\n");
goto err_av_table_free;
}
return 0;
err_av_table_free:
mthca_cleanup_av_table(dev);
err_qp_table_free:
mthca_cleanup_qp_table(dev);
err_srq_table_free:
mthca_cleanup_srq_table(dev);
err_cq_table_free:
mthca_cleanup_cq_table(dev);
err_cmd_poll:
mthca_cmd_use_polling(dev);
err_eq_table_free:
mthca_cleanup_eq_table(dev);
err_pd_free:
mthca_pd_free(dev, &dev->driver_pd);
err_mr_table_free:
mthca_cleanup_mr_table(dev);
err_pd_table_free:
mthca_cleanup_pd_table(dev);
err_kar_unmap:
iounmap(dev->kar);
err_uar_free:
mthca_uar_free(dev, &dev->driver_uar);
err_uar_table_free:
mthca_cleanup_uar_table(dev);
return err;
}
static int mthca_enable_msi_x(struct mthca_dev *mdev)
{
struct msix_entry entries[3];
int err;
entries[0].entry = 0;
entries[1].entry = 1;
entries[2].entry = 2;
err = pci_enable_msix(mdev->pdev, entries, ARRAY_SIZE(entries));
if (err) {
if (err > 0)
mthca_info(mdev, "Only %d MSI-X vectors available, "
"not using MSI-X\n", err);
return err;
}
mdev->eq_table.eq[MTHCA_EQ_COMP ].msi_x_vector = entries[0].vector;
mdev->eq_table.eq[MTHCA_EQ_ASYNC].msi_x_vector = entries[1].vector;
mdev->eq_table.eq[MTHCA_EQ_CMD ].msi_x_vector = entries[2].vector;
return 0;
}
/* Types of supported HCA */
enum {
TAVOR, /* MT23108 */
ARBEL_COMPAT, /* MT25208 in Tavor compat mode */
ARBEL_NATIVE, /* MT25208 with extended features */
SINAI /* MT25204 */
};
#define MTHCA_FW_VER(major, minor, subminor) \
(((u64) (major) << 32) | ((u64) (minor) << 16) | (u64) (subminor))
static struct {
u64 latest_fw;
u32 flags;
} mthca_hca_table[] = {
[TAVOR] = { .latest_fw = MTHCA_FW_VER(3, 5, 0),
.flags = 0 },
[ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 8, 200),
.flags = MTHCA_FLAG_PCIE },
[ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 3, 0),
.flags = MTHCA_FLAG_MEMFREE |
MTHCA_FLAG_PCIE },
[SINAI] = { .latest_fw = MTHCA_FW_VER(1, 2, 0),
.flags = MTHCA_FLAG_MEMFREE |
MTHCA_FLAG_PCIE |
MTHCA_FLAG_SINAI_OPT }
};
static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
{
int ddr_hidden = 0;
int err;
struct mthca_dev *mdev;
printk(KERN_INFO PFX "Initializing %s\n",
pci_name(pdev));
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "Cannot enable PCI device, "
"aborting.\n");
return err;
}
/*
* Check for BARs. We expect 0: 1MB, 2: 8MB, 4: DDR (may not
* be present)
*/
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
pci_resource_len(pdev, 0) != 1 << 20) {
dev_err(&pdev->dev, "Missing DCS, aborting.\n");
err = -ENODEV;
goto err_disable_pdev;
}
if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
dev_err(&pdev->dev, "Missing UAR, aborting.\n");
err = -ENODEV;
goto err_disable_pdev;
}
if (!(pci_resource_flags(pdev, 4) & IORESOURCE_MEM))
ddr_hidden = 1;
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
dev_err(&pdev->dev, "Cannot obtain PCI resources, "
"aborting.\n");
goto err_disable_pdev;
}
pci_set_master(pdev);
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
goto err_free_res;
}
}
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
"consistent PCI DMA mask.\n");
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
"aborting.\n");
goto err_free_res;
}
}
/* We can handle large RDMA requests, so allow larger segments. */
dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
mdev = (struct mthca_dev *) ib_alloc_device(sizeof *mdev);
if (!mdev) {
dev_err(&pdev->dev, "Device struct alloc failed, "
"aborting.\n");
err = -ENOMEM;
goto err_free_res;
}
mdev->pdev = pdev;
mdev->mthca_flags = mthca_hca_table[hca_type].flags;
if (ddr_hidden)
mdev->mthca_flags |= MTHCA_FLAG_DDR_HIDDEN;
/*
* Now reset the HCA before we touch the PCI capabilities or
* attempt a firmware command, since a boot ROM may have left
* the HCA in an undefined state.
*/
err = mthca_reset(mdev);
if (err) {
mthca_err(mdev, "Failed to reset HCA, aborting.\n");
goto err_free_dev;
}
if (mthca_cmd_init(mdev)) {
mthca_err(mdev, "Failed to init command interface, aborting.\n");
goto err_free_dev;
}
err = mthca_tune_pci(mdev);
if (err)
goto err_cmd;
err = mthca_init_hca(mdev);
if (err)
goto err_cmd;
if (mdev->fw_ver < mthca_hca_table[hca_type].latest_fw) {
mthca_warn(mdev, "HCA FW version %d.%d.%03d is old (%d.%d.%03d is current).\n",
(int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff,
(int) (mdev->fw_ver & 0xffff),
(int) (mthca_hca_table[hca_type].latest_fw >> 32),
(int) (mthca_hca_table[hca_type].latest_fw >> 16) & 0xffff,
(int) (mthca_hca_table[hca_type].latest_fw & 0xffff));
mthca_warn(mdev, "If you have problems, try updating your HCA FW.\n");
}
if (msi_x && !mthca_enable_msi_x(mdev))
mdev->mthca_flags |= MTHCA_FLAG_MSI_X;
err = mthca_setup_hca(mdev);
if (err == -EBUSY && (mdev->mthca_flags & MTHCA_FLAG_MSI_X)) {
if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
pci_disable_msix(pdev);
mdev->mthca_flags &= ~MTHCA_FLAG_MSI_X;
err = mthca_setup_hca(mdev);
}
if (err)
goto err_close;
err = mthca_register_device(mdev);
if (err)
goto err_cleanup;
err = mthca_create_agents(mdev);
if (err)
goto err_unregister;
pci_set_drvdata(pdev, mdev);
mdev->hca_type = hca_type;
mdev->active = true;
return 0;
err_unregister:
mthca_unregister_device(mdev);
err_cleanup:
mthca_cleanup_mcg_table(mdev);
mthca_cleanup_av_table(mdev);
mthca_cleanup_qp_table(mdev);
mthca_cleanup_srq_table(mdev);
mthca_cleanup_cq_table(mdev);
mthca_cmd_use_polling(mdev);
mthca_cleanup_eq_table(mdev);
mthca_pd_free(mdev, &mdev->driver_pd);
mthca_cleanup_mr_table(mdev);
mthca_cleanup_pd_table(mdev);
mthca_cleanup_uar_table(mdev);
err_close:
if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
pci_disable_msix(pdev);
mthca_close_hca(mdev);
err_cmd:
mthca_cmd_cleanup(mdev);
err_free_dev:
ib_dealloc_device(&mdev->ib_dev);
err_free_res:
pci_release_regions(pdev);
err_disable_pdev:
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
return err;
}
static void __mthca_remove_one(struct pci_dev *pdev)
{
struct mthca_dev *mdev = pci_get_drvdata(pdev);
u8 status;
int p;
if (mdev) {
mthca_free_agents(mdev);
mthca_unregister_device(mdev);
for (p = 1; p <= mdev->limits.num_ports; ++p)
mthca_CLOSE_IB(mdev, p, &status);
mthca_cleanup_mcg_table(mdev);
mthca_cleanup_av_table(mdev);
mthca_cleanup_qp_table(mdev);
mthca_cleanup_srq_table(mdev);
mthca_cleanup_cq_table(mdev);
mthca_cmd_use_polling(mdev);
mthca_cleanup_eq_table(mdev);
mthca_pd_free(mdev, &mdev->driver_pd);
mthca_cleanup_mr_table(mdev);
mthca_cleanup_pd_table(mdev);
iounmap(mdev->kar);
mthca_uar_free(mdev, &mdev->driver_uar);
mthca_cleanup_uar_table(mdev);
mthca_close_hca(mdev);
mthca_cmd_cleanup(mdev);
if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
pci_disable_msix(pdev);
ib_dealloc_device(&mdev->ib_dev);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
}
int __mthca_restart_one(struct pci_dev *pdev)
{
struct mthca_dev *mdev;
int hca_type;
mdev = pci_get_drvdata(pdev);
if (!mdev)
return -ENODEV;
hca_type = mdev->hca_type;
__mthca_remove_one(pdev);
return __mthca_init_one(pdev, hca_type);
}
static int __devinit mthca_init_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
int ret;
mutex_lock(&mthca_device_mutex);
printk_once(KERN_INFO "%s", mthca_version);
if (id->driver_data >= ARRAY_SIZE(mthca_hca_table)) {
printk(KERN_ERR PFX "%s has invalid driver data %lx\n",
pci_name(pdev), id->driver_data);
mutex_unlock(&mthca_device_mutex);
return -ENODEV;
}
ret = __mthca_init_one(pdev, id->driver_data);
mutex_unlock(&mthca_device_mutex);
return ret;
}
static void __devexit mthca_remove_one(struct pci_dev *pdev)
{
mutex_lock(&mthca_device_mutex);
__mthca_remove_one(pdev);
mutex_unlock(&mthca_device_mutex);
}
static struct pci_device_id mthca_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR),
.driver_data = TAVOR },
{ PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_TAVOR),
.driver_data = TAVOR },
{ PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT),
.driver_data = ARBEL_COMPAT },
{ PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT),
.driver_data = ARBEL_COMPAT },
{ PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_ARBEL),
.driver_data = ARBEL_NATIVE },
{ PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_ARBEL),
.driver_data = ARBEL_NATIVE },
{ PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_SINAI),
.driver_data = SINAI },
{ PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_SINAI),
.driver_data = SINAI },
{ PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_SINAI_OLD),
.driver_data = SINAI },
{ PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_SINAI_OLD),
.driver_data = SINAI },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, mthca_pci_table);
static struct pci_driver mthca_driver = {
.name = DRV_NAME,
.id_table = mthca_pci_table,
.probe = mthca_init_one,
.remove = __devexit_p(mthca_remove_one)
};
static void __init __mthca_check_profile_val(const char *name, int *pval,
int pval_default)
{
/* value must be positive and power of 2 */
int old_pval = *pval;
if (old_pval <= 0)
*pval = pval_default;
else
*pval = roundup_pow_of_two(old_pval);
if (old_pval != *pval) {
printk(KERN_WARNING PFX "Invalid value %d for %s in module parameter.\n",
old_pval, name);
printk(KERN_WARNING PFX "Corrected %s to %d.\n", name, *pval);
}
}
#define mthca_check_profile_val(name, default) \
__mthca_check_profile_val(#name, &hca_profile.name, default)
static void __init mthca_validate_profile(void)
{
mthca_check_profile_val(num_qp, MTHCA_DEFAULT_NUM_QP);
mthca_check_profile_val(rdb_per_qp, MTHCA_DEFAULT_RDB_PER_QP);
mthca_check_profile_val(num_cq, MTHCA_DEFAULT_NUM_CQ);
mthca_check_profile_val(num_mcg, MTHCA_DEFAULT_NUM_MCG);
mthca_check_profile_val(num_mpt, MTHCA_DEFAULT_NUM_MPT);
mthca_check_profile_val(num_mtt, MTHCA_DEFAULT_NUM_MTT);
mthca_check_profile_val(num_udav, MTHCA_DEFAULT_NUM_UDAV);
mthca_check_profile_val(fmr_reserved_mtts, MTHCA_DEFAULT_NUM_RESERVED_MTTS);
if (hca_profile.fmr_reserved_mtts >= hca_profile.num_mtt) {
printk(KERN_WARNING PFX "Invalid fmr_reserved_mtts module parameter %d.\n",
hca_profile.fmr_reserved_mtts);
printk(KERN_WARNING PFX "(Must be smaller than num_mtt %d)\n",
hca_profile.num_mtt);
hca_profile.fmr_reserved_mtts = hca_profile.num_mtt / 2;
printk(KERN_WARNING PFX "Corrected fmr_reserved_mtts to %d.\n",
hca_profile.fmr_reserved_mtts);
}
if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) {
printk(KERN_WARNING PFX "bad log_mtts_per_seg (%d). Using default - %d\n",
log_mtts_per_seg, ilog2(MTHCA_MTT_SEG_SIZE / 8));
log_mtts_per_seg = ilog2(MTHCA_MTT_SEG_SIZE / 8);
}
}
static int __init mthca_init(void)
{
int ret;
mthca_validate_profile();
ret = mthca_catas_init();
if (ret)
return ret;
ret = pci_register_driver(&mthca_driver);
if (ret < 0) {
mthca_catas_cleanup();
return ret;
}
return 0;
}
static void __exit mthca_cleanup(void)
{
pci_unregister_driver(&mthca_driver);
mthca_catas_cleanup();
}
module_init(mthca_init);
module_exit(mthca_cleanup);
| gpl-2.0 |
Oi-Android/android_kernel_xiaomi_ferrari | drivers/s390/net/ctcm_dbug.c | 2529 | 1873 | /*
* Copyright IBM Corp. 2001, 2007
* Authors: Peter Tiedemann (ptiedem@de.ibm.com)
*
*/
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/ctype.h>
#include <linux/sysctl.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/debugfs.h>
#include "ctcm_dbug.h"
/*
* Debug Facility Stuff
*/
struct ctcm_dbf_info ctcm_dbf[CTCM_DBF_INFOS] = {
[CTCM_DBF_SETUP] = {"ctc_setup", 8, 1, 64, CTC_DBF_INFO, NULL},
[CTCM_DBF_ERROR] = {"ctc_error", 8, 1, 64, CTC_DBF_ERROR, NULL},
[CTCM_DBF_TRACE] = {"ctc_trace", 8, 1, 64, CTC_DBF_ERROR, NULL},
[CTCM_DBF_MPC_SETUP] = {"mpc_setup", 8, 1, 80, CTC_DBF_INFO, NULL},
[CTCM_DBF_MPC_ERROR] = {"mpc_error", 8, 1, 80, CTC_DBF_ERROR, NULL},
[CTCM_DBF_MPC_TRACE] = {"mpc_trace", 8, 1, 80, CTC_DBF_ERROR, NULL},
};
void ctcm_unregister_dbf_views(void)
{
int x;
for (x = 0; x < CTCM_DBF_INFOS; x++) {
debug_unregister(ctcm_dbf[x].id);
ctcm_dbf[x].id = NULL;
}
}
int ctcm_register_dbf_views(void)
{
int x;
for (x = 0; x < CTCM_DBF_INFOS; x++) {
/* register the areas */
ctcm_dbf[x].id = debug_register(ctcm_dbf[x].name,
ctcm_dbf[x].pages,
ctcm_dbf[x].areas,
ctcm_dbf[x].len);
if (ctcm_dbf[x].id == NULL) {
ctcm_unregister_dbf_views();
return -ENOMEM;
}
/* register a view */
debug_register_view(ctcm_dbf[x].id, &debug_hex_ascii_view);
/* set a passing level */
debug_set_level(ctcm_dbf[x].id, ctcm_dbf[x].level);
}
return 0;
}
void ctcm_dbf_longtext(enum ctcm_dbf_names dbf_nix, int level, char *fmt, ...)
{
char dbf_txt_buf[64];
va_list args;
if (level > (ctcm_dbf[dbf_nix].id)->level)
return;
va_start(args, fmt);
vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
va_end(args);
debug_text_event(ctcm_dbf[dbf_nix].id, level, dbf_txt_buf);
}
| gpl-2.0 |
MoKee/android_kernel_samsung_crespo | arch/mips/jz4740/gpio.c | 2529 | 15170 | /*
* Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
* JZ4740 platform GPIO support
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/syscore_ops.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <asm/mach-jz4740/base.h>
#define JZ4740_GPIO_BASE_A (32*0)
#define JZ4740_GPIO_BASE_B (32*1)
#define JZ4740_GPIO_BASE_C (32*2)
#define JZ4740_GPIO_BASE_D (32*3)
#define JZ4740_GPIO_NUM_A 32
#define JZ4740_GPIO_NUM_B 32
#define JZ4740_GPIO_NUM_C 31
#define JZ4740_GPIO_NUM_D 32
#define JZ4740_IRQ_GPIO_BASE_A (JZ4740_IRQ_GPIO(0) + JZ4740_GPIO_BASE_A)
#define JZ4740_IRQ_GPIO_BASE_B (JZ4740_IRQ_GPIO(0) + JZ4740_GPIO_BASE_B)
#define JZ4740_IRQ_GPIO_BASE_C (JZ4740_IRQ_GPIO(0) + JZ4740_GPIO_BASE_C)
#define JZ4740_IRQ_GPIO_BASE_D (JZ4740_IRQ_GPIO(0) + JZ4740_GPIO_BASE_D)
#define JZ_REG_GPIO_PIN 0x00
#define JZ_REG_GPIO_DATA 0x10
#define JZ_REG_GPIO_DATA_SET 0x14
#define JZ_REG_GPIO_DATA_CLEAR 0x18
#define JZ_REG_GPIO_MASK 0x20
#define JZ_REG_GPIO_MASK_SET 0x24
#define JZ_REG_GPIO_MASK_CLEAR 0x28
#define JZ_REG_GPIO_PULL 0x30
#define JZ_REG_GPIO_PULL_SET 0x34
#define JZ_REG_GPIO_PULL_CLEAR 0x38
#define JZ_REG_GPIO_FUNC 0x40
#define JZ_REG_GPIO_FUNC_SET 0x44
#define JZ_REG_GPIO_FUNC_CLEAR 0x48
#define JZ_REG_GPIO_SELECT 0x50
#define JZ_REG_GPIO_SELECT_SET 0x54
#define JZ_REG_GPIO_SELECT_CLEAR 0x58
#define JZ_REG_GPIO_DIRECTION 0x60
#define JZ_REG_GPIO_DIRECTION_SET 0x64
#define JZ_REG_GPIO_DIRECTION_CLEAR 0x68
#define JZ_REG_GPIO_TRIGGER 0x70
#define JZ_REG_GPIO_TRIGGER_SET 0x74
#define JZ_REG_GPIO_TRIGGER_CLEAR 0x78
#define JZ_REG_GPIO_FLAG 0x80
#define JZ_REG_GPIO_FLAG_CLEAR 0x14
#define GPIO_TO_BIT(gpio) BIT(gpio & 0x1f)
#define GPIO_TO_REG(gpio, reg) (gpio_to_jz_gpio_chip(gpio)->base + (reg))
#define CHIP_TO_REG(chip, reg) (gpio_chip_to_jz_gpio_chip(chip)->base + (reg))
struct jz_gpio_chip {
unsigned int irq;
unsigned int irq_base;
uint32_t wakeup;
uint32_t suspend_mask;
uint32_t edge_trigger_both;
void __iomem *base;
spinlock_t lock;
struct gpio_chip gpio_chip;
};
static struct jz_gpio_chip jz4740_gpio_chips[];
static inline struct jz_gpio_chip *gpio_to_jz_gpio_chip(unsigned int gpio)
{
return &jz4740_gpio_chips[gpio >> 5];
}
static inline struct jz_gpio_chip *gpio_chip_to_jz_gpio_chip(struct gpio_chip *gpio_chip)
{
return container_of(gpio_chip, struct jz_gpio_chip, gpio_chip);
}
static inline struct jz_gpio_chip *irq_to_jz_gpio_chip(struct irq_data *data)
{
return irq_data_get_irq_chip_data(data);
}
static inline void jz_gpio_write_bit(unsigned int gpio, unsigned int reg)
{
writel(GPIO_TO_BIT(gpio), GPIO_TO_REG(gpio, reg));
}
int jz_gpio_set_function(int gpio, enum jz_gpio_function function)
{
if (function == JZ_GPIO_FUNC_NONE) {
jz_gpio_write_bit(gpio, JZ_REG_GPIO_FUNC_CLEAR);
jz_gpio_write_bit(gpio, JZ_REG_GPIO_SELECT_CLEAR);
jz_gpio_write_bit(gpio, JZ_REG_GPIO_TRIGGER_CLEAR);
} else {
jz_gpio_write_bit(gpio, JZ_REG_GPIO_FUNC_SET);
jz_gpio_write_bit(gpio, JZ_REG_GPIO_TRIGGER_CLEAR);
switch (function) {
case JZ_GPIO_FUNC1:
jz_gpio_write_bit(gpio, JZ_REG_GPIO_SELECT_CLEAR);
break;
case JZ_GPIO_FUNC3:
jz_gpio_write_bit(gpio, JZ_REG_GPIO_TRIGGER_SET);
case JZ_GPIO_FUNC2: /* Falltrough */
jz_gpio_write_bit(gpio, JZ_REG_GPIO_SELECT_SET);
break;
default:
BUG();
break;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(jz_gpio_set_function);
int jz_gpio_bulk_request(const struct jz_gpio_bulk_request *request, size_t num)
{
size_t i;
int ret;
for (i = 0; i < num; ++i, ++request) {
ret = gpio_request(request->gpio, request->name);
if (ret)
goto err;
jz_gpio_set_function(request->gpio, request->function);
}
return 0;
err:
for (--request; i > 0; --i, --request) {
gpio_free(request->gpio);
jz_gpio_set_function(request->gpio, JZ_GPIO_FUNC_NONE);
}
return ret;
}
EXPORT_SYMBOL_GPL(jz_gpio_bulk_request);
void jz_gpio_bulk_free(const struct jz_gpio_bulk_request *request, size_t num)
{
size_t i;
for (i = 0; i < num; ++i, ++request) {
gpio_free(request->gpio);
jz_gpio_set_function(request->gpio, JZ_GPIO_FUNC_NONE);
}
}
EXPORT_SYMBOL_GPL(jz_gpio_bulk_free);
void jz_gpio_bulk_suspend(const struct jz_gpio_bulk_request *request, size_t num)
{
size_t i;
for (i = 0; i < num; ++i, ++request) {
jz_gpio_set_function(request->gpio, JZ_GPIO_FUNC_NONE);
jz_gpio_write_bit(request->gpio, JZ_REG_GPIO_DIRECTION_CLEAR);
jz_gpio_write_bit(request->gpio, JZ_REG_GPIO_PULL_SET);
}
}
EXPORT_SYMBOL_GPL(jz_gpio_bulk_suspend);
void jz_gpio_bulk_resume(const struct jz_gpio_bulk_request *request, size_t num)
{
size_t i;
for (i = 0; i < num; ++i, ++request)
jz_gpio_set_function(request->gpio, request->function);
}
EXPORT_SYMBOL_GPL(jz_gpio_bulk_resume);
void jz_gpio_enable_pullup(unsigned gpio)
{
jz_gpio_write_bit(gpio, JZ_REG_GPIO_PULL_CLEAR);
}
EXPORT_SYMBOL_GPL(jz_gpio_enable_pullup);
void jz_gpio_disable_pullup(unsigned gpio)
{
jz_gpio_write_bit(gpio, JZ_REG_GPIO_PULL_SET);
}
EXPORT_SYMBOL_GPL(jz_gpio_disable_pullup);
static int jz_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
{
return !!(readl(CHIP_TO_REG(chip, JZ_REG_GPIO_PIN)) & BIT(gpio));
}
static void jz_gpio_set_value(struct gpio_chip *chip, unsigned gpio, int value)
{
uint32_t __iomem *reg = CHIP_TO_REG(chip, JZ_REG_GPIO_DATA_SET);
reg += !value;
writel(BIT(gpio), reg);
}
static int jz_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
int value)
{
writel(BIT(gpio), CHIP_TO_REG(chip, JZ_REG_GPIO_DIRECTION_SET));
jz_gpio_set_value(chip, gpio, value);
return 0;
}
static int jz_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
{
writel(BIT(gpio), CHIP_TO_REG(chip, JZ_REG_GPIO_DIRECTION_CLEAR));
return 0;
}
int jz_gpio_port_direction_input(int port, uint32_t mask)
{
writel(mask, GPIO_TO_REG(port, JZ_REG_GPIO_DIRECTION_CLEAR));
return 0;
}
EXPORT_SYMBOL(jz_gpio_port_direction_input);
int jz_gpio_port_direction_output(int port, uint32_t mask)
{
writel(mask, GPIO_TO_REG(port, JZ_REG_GPIO_DIRECTION_SET));
return 0;
}
EXPORT_SYMBOL(jz_gpio_port_direction_output);
void jz_gpio_port_set_value(int port, uint32_t value, uint32_t mask)
{
writel(~value & mask, GPIO_TO_REG(port, JZ_REG_GPIO_DATA_CLEAR));
writel(value & mask, GPIO_TO_REG(port, JZ_REG_GPIO_DATA_SET));
}
EXPORT_SYMBOL(jz_gpio_port_set_value);
uint32_t jz_gpio_port_get_value(int port, uint32_t mask)
{
uint32_t value = readl(GPIO_TO_REG(port, JZ_REG_GPIO_PIN));
return value & mask;
}
EXPORT_SYMBOL(jz_gpio_port_get_value);
int gpio_to_irq(unsigned gpio)
{
return JZ4740_IRQ_GPIO(0) + gpio;
}
EXPORT_SYMBOL_GPL(gpio_to_irq);
int irq_to_gpio(unsigned irq)
{
return irq - JZ4740_IRQ_GPIO(0);
}
EXPORT_SYMBOL_GPL(irq_to_gpio);
#define IRQ_TO_BIT(irq) BIT(irq_to_gpio(irq) & 0x1f)
static void jz_gpio_check_trigger_both(struct jz_gpio_chip *chip, unsigned int irq)
{
uint32_t value;
void __iomem *reg;
uint32_t mask = IRQ_TO_BIT(irq);
if (!(chip->edge_trigger_both & mask))
return;
reg = chip->base;
value = readl(chip->base + JZ_REG_GPIO_PIN);
if (value & mask)
reg += JZ_REG_GPIO_DIRECTION_CLEAR;
else
reg += JZ_REG_GPIO_DIRECTION_SET;
writel(mask, reg);
}
static void jz_gpio_irq_demux_handler(unsigned int irq, struct irq_desc *desc)
{
uint32_t flag;
unsigned int gpio_irq;
unsigned int gpio_bank;
struct jz_gpio_chip *chip = irq_desc_get_handler_data(desc);
gpio_bank = JZ4740_IRQ_GPIO0 - irq;
flag = readl(chip->base + JZ_REG_GPIO_FLAG);
if (!flag)
return;
gpio_irq = __fls(flag);
jz_gpio_check_trigger_both(chip, irq);
gpio_irq += (gpio_bank << 5) + JZ4740_IRQ_GPIO(0);
generic_handle_irq(gpio_irq);
};
static inline void jz_gpio_set_irq_bit(struct irq_data *data, unsigned int reg)
{
struct jz_gpio_chip *chip = irq_to_jz_gpio_chip(data);
writel(IRQ_TO_BIT(data->irq), chip->base + reg);
}
static void jz_gpio_irq_mask(struct irq_data *data)
{
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_MASK_SET);
};
static void jz_gpio_irq_unmask(struct irq_data *data)
{
struct jz_gpio_chip *chip = irq_to_jz_gpio_chip(data);
jz_gpio_check_trigger_both(chip, data->irq);
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_MASK_CLEAR);
};
/* TODO: Check if function is gpio */
static unsigned int jz_gpio_irq_startup(struct irq_data *data)
{
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_SELECT_SET);
jz_gpio_irq_unmask(data);
return 0;
}
static void jz_gpio_irq_shutdown(struct irq_data *data)
{
jz_gpio_irq_mask(data);
/* Set direction to input */
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_DIRECTION_CLEAR);
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_SELECT_CLEAR);
}
static void jz_gpio_irq_ack(struct irq_data *data)
{
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_FLAG_CLEAR);
};
static int jz_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type)
{
struct jz_gpio_chip *chip = irq_to_jz_gpio_chip(data);
unsigned int irq = data->irq;
if (flow_type == IRQ_TYPE_EDGE_BOTH) {
uint32_t value = readl(chip->base + JZ_REG_GPIO_PIN);
if (value & IRQ_TO_BIT(irq))
flow_type = IRQ_TYPE_EDGE_FALLING;
else
flow_type = IRQ_TYPE_EDGE_RISING;
chip->edge_trigger_both |= IRQ_TO_BIT(irq);
} else {
chip->edge_trigger_both &= ~IRQ_TO_BIT(irq);
}
switch (flow_type) {
case IRQ_TYPE_EDGE_RISING:
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_DIRECTION_SET);
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_TRIGGER_SET);
break;
case IRQ_TYPE_EDGE_FALLING:
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_DIRECTION_CLEAR);
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_TRIGGER_SET);
break;
case IRQ_TYPE_LEVEL_HIGH:
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_DIRECTION_SET);
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_TRIGGER_CLEAR);
break;
case IRQ_TYPE_LEVEL_LOW:
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_DIRECTION_CLEAR);
jz_gpio_set_irq_bit(data, JZ_REG_GPIO_TRIGGER_CLEAR);
break;
default:
return -EINVAL;
}
return 0;
}
static int jz_gpio_irq_set_wake(struct irq_data *data, unsigned int on)
{
struct jz_gpio_chip *chip = irq_to_jz_gpio_chip(data);
spin_lock(&chip->lock);
if (on)
chip->wakeup |= IRQ_TO_BIT(data->irq);
else
chip->wakeup &= ~IRQ_TO_BIT(data->irq);
spin_unlock(&chip->lock);
irq_set_irq_wake(chip->irq, on);
return 0;
}
static struct irq_chip jz_gpio_irq_chip = {
.name = "GPIO",
.irq_mask = jz_gpio_irq_mask,
.irq_unmask = jz_gpio_irq_unmask,
.irq_ack = jz_gpio_irq_ack,
.irq_startup = jz_gpio_irq_startup,
.irq_shutdown = jz_gpio_irq_shutdown,
.irq_set_type = jz_gpio_irq_set_type,
.irq_set_wake = jz_gpio_irq_set_wake,
.flags = IRQCHIP_SET_TYPE_MASKED,
};
/*
* This lock class tells lockdep that GPIO irqs are in a different
* category than their parents, so it won't report false recursion.
*/
static struct lock_class_key gpio_lock_class;
#define JZ4740_GPIO_CHIP(_bank) { \
.irq_base = JZ4740_IRQ_GPIO_BASE_ ## _bank, \
.gpio_chip = { \
.label = "Bank " # _bank, \
.owner = THIS_MODULE, \
.set = jz_gpio_set_value, \
.get = jz_gpio_get_value, \
.direction_output = jz_gpio_direction_output, \
.direction_input = jz_gpio_direction_input, \
.base = JZ4740_GPIO_BASE_ ## _bank, \
.ngpio = JZ4740_GPIO_NUM_ ## _bank, \
}, \
}
static struct jz_gpio_chip jz4740_gpio_chips[] = {
JZ4740_GPIO_CHIP(A),
JZ4740_GPIO_CHIP(B),
JZ4740_GPIO_CHIP(C),
JZ4740_GPIO_CHIP(D),
};
static void jz4740_gpio_suspend_chip(struct jz_gpio_chip *chip)
{
chip->suspend_mask = readl(chip->base + JZ_REG_GPIO_MASK);
writel(~(chip->wakeup), chip->base + JZ_REG_GPIO_MASK_SET);
writel(chip->wakeup, chip->base + JZ_REG_GPIO_MASK_CLEAR);
}
static int jz4740_gpio_suspend(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); i++)
jz4740_gpio_suspend_chip(&jz4740_gpio_chips[i]);
return 0;
}
static void jz4740_gpio_resume_chip(struct jz_gpio_chip *chip)
{
uint32_t mask = chip->suspend_mask;
writel(~mask, chip->base + JZ_REG_GPIO_MASK_CLEAR);
writel(mask, chip->base + JZ_REG_GPIO_MASK_SET);
}
static void jz4740_gpio_resume(void)
{
int i;
for (i = ARRAY_SIZE(jz4740_gpio_chips) - 1; i >= 0 ; i--)
jz4740_gpio_resume_chip(&jz4740_gpio_chips[i]);
}
static struct syscore_ops jz4740_gpio_syscore_ops = {
.suspend = jz4740_gpio_suspend,
.resume = jz4740_gpio_resume,
};
static void jz4740_gpio_chip_init(struct jz_gpio_chip *chip, unsigned int id)
{
int irq;
spin_lock_init(&chip->lock);
chip->base = ioremap(JZ4740_GPIO_BASE_ADDR + (id * 0x100), 0x100);
gpiochip_add(&chip->gpio_chip);
chip->irq = JZ4740_IRQ_INTC_GPIO(id);
irq_set_handler_data(chip->irq, chip);
irq_set_chained_handler(chip->irq, jz_gpio_irq_demux_handler);
for (irq = chip->irq_base; irq < chip->irq_base + chip->gpio_chip.ngpio; ++irq) {
irq_set_lockdep_class(irq, &gpio_lock_class);
irq_set_chip_data(irq, chip);
irq_set_chip_and_handler(irq, &jz_gpio_irq_chip,
handle_level_irq);
}
}
static int __init jz4740_gpio_init(void)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); ++i)
jz4740_gpio_chip_init(&jz4740_gpio_chips[i], i);
register_syscore_ops(&jz4740_gpio_syscore_ops);
printk(KERN_INFO "JZ4740 GPIO initialized\n");
return 0;
}
arch_initcall(jz4740_gpio_init);
#ifdef CONFIG_DEBUG_FS
static inline void gpio_seq_reg(struct seq_file *s, struct jz_gpio_chip *chip,
const char *name, unsigned int reg)
{
seq_printf(s, "\t%s: %08x\n", name, readl(chip->base + reg));
}
static int gpio_regs_show(struct seq_file *s, void *unused)
{
struct jz_gpio_chip *chip = jz4740_gpio_chips;
int i;
for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); ++i, ++chip) {
seq_printf(s, "==GPIO %d==\n", i);
gpio_seq_reg(s, chip, "Pin", JZ_REG_GPIO_PIN);
gpio_seq_reg(s, chip, "Data", JZ_REG_GPIO_DATA);
gpio_seq_reg(s, chip, "Mask", JZ_REG_GPIO_MASK);
gpio_seq_reg(s, chip, "Pull", JZ_REG_GPIO_PULL);
gpio_seq_reg(s, chip, "Func", JZ_REG_GPIO_FUNC);
gpio_seq_reg(s, chip, "Select", JZ_REG_GPIO_SELECT);
gpio_seq_reg(s, chip, "Direction", JZ_REG_GPIO_DIRECTION);
gpio_seq_reg(s, chip, "Trigger", JZ_REG_GPIO_TRIGGER);
gpio_seq_reg(s, chip, "Flag", JZ_REG_GPIO_FLAG);
}
return 0;
}
static int gpio_regs_open(struct inode *inode, struct file *file)
{
return single_open(file, gpio_regs_show, NULL);
}
static const struct file_operations gpio_regs_operations = {
.open = gpio_regs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int __init gpio_debugfs_init(void)
{
(void) debugfs_create_file("jz_regs_gpio", S_IFREG | S_IRUGO,
NULL, NULL, &gpio_regs_operations);
return 0;
}
subsys_initcall(gpio_debugfs_init);
#endif
| gpl-2.0 |
latlontude/linux | drivers/net/wireless/cw1200/bh.c | 2529 | 14821 | /*
* Device handling thread implementation for mac80211 ST-Ericsson CW1200 drivers
*
* Copyright (c) 2010, ST-Ericsson
* Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
*
* Based on:
* ST-Ericsson UMAC CW1200 driver, which is
* Copyright (c) 2010, ST-Ericsson
* Author: Ajitpal Singh <ajitpal.singh@stericsson.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <net/mac80211.h>
#include <linux/kthread.h>
#include <linux/timer.h>
#include "cw1200.h"
#include "bh.h"
#include "hwio.h"
#include "wsm.h"
#include "hwbus.h"
#include "debug.h"
#include "fwio.h"
static int cw1200_bh(void *arg);
#define DOWNLOAD_BLOCK_SIZE_WR (0x1000 - 4)
/* an SPI message cannot be bigger than (2"12-1)*2 bytes
* "*2" to cvt to bytes
*/
#define MAX_SZ_RD_WR_BUFFERS (DOWNLOAD_BLOCK_SIZE_WR*2)
#define PIGGYBACK_CTRL_REG (2)
#define EFFECTIVE_BUF_SIZE (MAX_SZ_RD_WR_BUFFERS - PIGGYBACK_CTRL_REG)
/* Suspend state privates */
enum cw1200_bh_pm_state {
CW1200_BH_RESUMED = 0,
CW1200_BH_SUSPEND,
CW1200_BH_SUSPENDED,
CW1200_BH_RESUME,
};
typedef int (*cw1200_wsm_handler)(struct cw1200_common *priv,
u8 *data, size_t size);
static void cw1200_bh_work(struct work_struct *work)
{
struct cw1200_common *priv =
container_of(work, struct cw1200_common, bh_work);
cw1200_bh(priv);
}
int cw1200_register_bh(struct cw1200_common *priv)
{
int err = 0;
/* Realtime workqueue */
priv->bh_workqueue = alloc_workqueue("cw1200_bh",
WQ_MEM_RECLAIM | WQ_HIGHPRI
| WQ_CPU_INTENSIVE, 1);
if (!priv->bh_workqueue)
return -ENOMEM;
INIT_WORK(&priv->bh_work, cw1200_bh_work);
pr_debug("[BH] register.\n");
atomic_set(&priv->bh_rx, 0);
atomic_set(&priv->bh_tx, 0);
atomic_set(&priv->bh_term, 0);
atomic_set(&priv->bh_suspend, CW1200_BH_RESUMED);
priv->bh_error = 0;
priv->hw_bufs_used = 0;
priv->buf_id_tx = 0;
priv->buf_id_rx = 0;
init_waitqueue_head(&priv->bh_wq);
init_waitqueue_head(&priv->bh_evt_wq);
err = !queue_work(priv->bh_workqueue, &priv->bh_work);
WARN_ON(err);
return err;
}
void cw1200_unregister_bh(struct cw1200_common *priv)
{
atomic_add(1, &priv->bh_term);
wake_up(&priv->bh_wq);
flush_workqueue(priv->bh_workqueue);
destroy_workqueue(priv->bh_workqueue);
priv->bh_workqueue = NULL;
pr_debug("[BH] unregistered.\n");
}
void cw1200_irq_handler(struct cw1200_common *priv)
{
pr_debug("[BH] irq.\n");
/* Disable Interrupts! */
/* NOTE: hwbus_ops->lock already held */
__cw1200_irq_enable(priv, 0);
if (/* WARN_ON */(priv->bh_error))
return;
if (atomic_add_return(1, &priv->bh_rx) == 1)
wake_up(&priv->bh_wq);
}
EXPORT_SYMBOL_GPL(cw1200_irq_handler);
void cw1200_bh_wakeup(struct cw1200_common *priv)
{
pr_debug("[BH] wakeup.\n");
if (priv->bh_error) {
pr_err("[BH] wakeup failed (BH error)\n");
return;
}
if (atomic_add_return(1, &priv->bh_tx) == 1)
wake_up(&priv->bh_wq);
}
int cw1200_bh_suspend(struct cw1200_common *priv)
{
pr_debug("[BH] suspend.\n");
if (priv->bh_error) {
wiphy_warn(priv->hw->wiphy, "BH error -- can't suspend\n");
return -EINVAL;
}
atomic_set(&priv->bh_suspend, CW1200_BH_SUSPEND);
wake_up(&priv->bh_wq);
return wait_event_timeout(priv->bh_evt_wq, priv->bh_error ||
(CW1200_BH_SUSPENDED == atomic_read(&priv->bh_suspend)),
1 * HZ) ? 0 : -ETIMEDOUT;
}
int cw1200_bh_resume(struct cw1200_common *priv)
{
pr_debug("[BH] resume.\n");
if (priv->bh_error) {
wiphy_warn(priv->hw->wiphy, "BH error -- can't resume\n");
return -EINVAL;
}
atomic_set(&priv->bh_suspend, CW1200_BH_RESUME);
wake_up(&priv->bh_wq);
return wait_event_timeout(priv->bh_evt_wq, priv->bh_error ||
(CW1200_BH_RESUMED == atomic_read(&priv->bh_suspend)),
1 * HZ) ? 0 : -ETIMEDOUT;
}
static inline void wsm_alloc_tx_buffer(struct cw1200_common *priv)
{
++priv->hw_bufs_used;
}
int wsm_release_tx_buffer(struct cw1200_common *priv, int count)
{
int ret = 0;
int hw_bufs_used = priv->hw_bufs_used;
priv->hw_bufs_used -= count;
if (WARN_ON(priv->hw_bufs_used < 0))
ret = -1;
else if (hw_bufs_used >= priv->wsm_caps.input_buffers)
ret = 1;
if (!priv->hw_bufs_used)
wake_up(&priv->bh_evt_wq);
return ret;
}
static int cw1200_bh_read_ctrl_reg(struct cw1200_common *priv,
u16 *ctrl_reg)
{
int ret;
ret = cw1200_reg_read_16(priv,
ST90TDS_CONTROL_REG_ID, ctrl_reg);
if (ret) {
ret = cw1200_reg_read_16(priv,
ST90TDS_CONTROL_REG_ID, ctrl_reg);
if (ret)
pr_err("[BH] Failed to read control register.\n");
}
return ret;
}
static int cw1200_device_wakeup(struct cw1200_common *priv)
{
u16 ctrl_reg;
int ret;
pr_debug("[BH] Device wakeup.\n");
/* First, set the dpll register */
ret = cw1200_reg_write_32(priv, ST90TDS_TSET_GEN_R_W_REG_ID,
cw1200_dpll_from_clk(priv->hw_refclk));
if (WARN_ON(ret))
return ret;
/* To force the device to be always-on, the host sets WLAN_UP to 1 */
ret = cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID,
ST90TDS_CONT_WUP_BIT);
if (WARN_ON(ret))
return ret;
ret = cw1200_bh_read_ctrl_reg(priv, &ctrl_reg);
if (WARN_ON(ret))
return ret;
/* If the device returns WLAN_RDY as 1, the device is active and will
* remain active.
*/
if (ctrl_reg & ST90TDS_CONT_RDY_BIT) {
pr_debug("[BH] Device awake.\n");
return 1;
}
return 0;
}
/* Must be called from BH thraed. */
void cw1200_enable_powersave(struct cw1200_common *priv,
bool enable)
{
pr_debug("[BH] Powerave is %s.\n",
enable ? "enabled" : "disabled");
priv->powersave_enabled = enable;
}
static int cw1200_bh_rx_helper(struct cw1200_common *priv,
uint16_t *ctrl_reg,
int *tx)
{
size_t read_len = 0;
struct sk_buff *skb_rx = NULL;
struct wsm_hdr *wsm;
size_t wsm_len;
u16 wsm_id;
u8 wsm_seq;
int rx_resync = 1;
size_t alloc_len;
u8 *data;
read_len = (*ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) * 2;
if (!read_len)
return 0; /* No more work */
if (WARN_ON((read_len < sizeof(struct wsm_hdr)) ||
(read_len > EFFECTIVE_BUF_SIZE))) {
pr_debug("Invalid read len: %zu (%04x)",
read_len, *ctrl_reg);
goto err;
}
/* Add SIZE of PIGGYBACK reg (CONTROL Reg)
* to the NEXT Message length + 2 Bytes for SKB
*/
read_len = read_len + 2;
alloc_len = priv->hwbus_ops->align_size(
priv->hwbus_priv, read_len);
/* Check if not exceeding CW1200 capabilities */
if (WARN_ON_ONCE(alloc_len > EFFECTIVE_BUF_SIZE)) {
pr_debug("Read aligned len: %zu\n",
alloc_len);
}
skb_rx = dev_alloc_skb(alloc_len);
if (WARN_ON(!skb_rx))
goto err;
skb_trim(skb_rx, 0);
skb_put(skb_rx, read_len);
data = skb_rx->data;
if (WARN_ON(!data))
goto err;
if (WARN_ON(cw1200_data_read(priv, data, alloc_len))) {
pr_err("rx blew up, len %zu\n", alloc_len);
goto err;
}
/* Piggyback */
*ctrl_reg = __le16_to_cpu(
((__le16 *)data)[alloc_len / 2 - 1]);
wsm = (struct wsm_hdr *)data;
wsm_len = __le16_to_cpu(wsm->len);
if (WARN_ON(wsm_len > read_len))
goto err;
if (priv->wsm_enable_wsm_dumps)
print_hex_dump_bytes("<-- ",
DUMP_PREFIX_NONE,
data, wsm_len);
wsm_id = __le16_to_cpu(wsm->id) & 0xFFF;
wsm_seq = (__le16_to_cpu(wsm->id) >> 13) & 7;
skb_trim(skb_rx, wsm_len);
if (wsm_id == 0x0800) {
wsm_handle_exception(priv,
&data[sizeof(*wsm)],
wsm_len - sizeof(*wsm));
goto err;
} else if (!rx_resync) {
if (WARN_ON(wsm_seq != priv->wsm_rx_seq))
goto err;
}
priv->wsm_rx_seq = (wsm_seq + 1) & 7;
rx_resync = 0;
if (wsm_id & 0x0400) {
int rc = wsm_release_tx_buffer(priv, 1);
if (WARN_ON(rc < 0))
return rc;
else if (rc > 0)
*tx = 1;
}
/* cw1200_wsm_rx takes care on SKB livetime */
if (WARN_ON(wsm_handle_rx(priv, wsm_id, wsm, &skb_rx)))
goto err;
if (skb_rx) {
dev_kfree_skb(skb_rx);
skb_rx = NULL;
}
return 0;
err:
if (skb_rx) {
dev_kfree_skb(skb_rx);
skb_rx = NULL;
}
return -1;
}
static int cw1200_bh_tx_helper(struct cw1200_common *priv,
int *pending_tx,
int *tx_burst)
{
size_t tx_len;
u8 *data;
int ret;
struct wsm_hdr *wsm;
if (priv->device_can_sleep) {
ret = cw1200_device_wakeup(priv);
if (WARN_ON(ret < 0)) { /* Error in wakeup */
*pending_tx = 1;
return 0;
} else if (ret) { /* Woke up */
priv->device_can_sleep = false;
} else { /* Did not awake */
*pending_tx = 1;
return 0;
}
}
wsm_alloc_tx_buffer(priv);
ret = wsm_get_tx(priv, &data, &tx_len, tx_burst);
if (ret <= 0) {
wsm_release_tx_buffer(priv, 1);
if (WARN_ON(ret < 0))
return ret; /* Error */
return 0; /* No work */
}
wsm = (struct wsm_hdr *)data;
BUG_ON(tx_len < sizeof(*wsm));
BUG_ON(__le16_to_cpu(wsm->len) != tx_len);
atomic_add(1, &priv->bh_tx);
tx_len = priv->hwbus_ops->align_size(
priv->hwbus_priv, tx_len);
/* Check if not exceeding CW1200 capabilities */
if (WARN_ON_ONCE(tx_len > EFFECTIVE_BUF_SIZE))
pr_debug("Write aligned len: %zu\n", tx_len);
wsm->id &= __cpu_to_le16(0xffff ^ WSM_TX_SEQ(WSM_TX_SEQ_MAX));
wsm->id |= __cpu_to_le16(WSM_TX_SEQ(priv->wsm_tx_seq));
if (WARN_ON(cw1200_data_write(priv, data, tx_len))) {
pr_err("tx blew up, len %zu\n", tx_len);
wsm_release_tx_buffer(priv, 1);
return -1; /* Error */
}
if (priv->wsm_enable_wsm_dumps)
print_hex_dump_bytes("--> ",
DUMP_PREFIX_NONE,
data,
__le16_to_cpu(wsm->len));
wsm_txed(priv, data);
priv->wsm_tx_seq = (priv->wsm_tx_seq + 1) & WSM_TX_SEQ_MAX;
if (*tx_burst > 1) {
cw1200_debug_tx_burst(priv);
return 1; /* Work remains */
}
return 0;
}
static int cw1200_bh(void *arg)
{
struct cw1200_common *priv = arg;
int rx, tx, term, suspend;
u16 ctrl_reg = 0;
int tx_allowed;
int pending_tx = 0;
int tx_burst;
long status;
u32 dummy;
int ret;
for (;;) {
if (!priv->hw_bufs_used &&
priv->powersave_enabled &&
!priv->device_can_sleep &&
!atomic_read(&priv->recent_scan)) {
status = 1 * HZ;
pr_debug("[BH] Device wakedown. No data.\n");
cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID, 0);
priv->device_can_sleep = true;
} else if (priv->hw_bufs_used) {
/* Interrupt loss detection */
status = 1 * HZ;
} else {
status = MAX_SCHEDULE_TIMEOUT;
}
/* Dummy Read for SDIO retry mechanism*/
if ((priv->hw_type != -1) &&
(atomic_read(&priv->bh_rx) == 0) &&
(atomic_read(&priv->bh_tx) == 0))
cw1200_reg_read(priv, ST90TDS_CONFIG_REG_ID,
&dummy, sizeof(dummy));
pr_debug("[BH] waiting ...\n");
status = wait_event_interruptible_timeout(priv->bh_wq, ({
rx = atomic_xchg(&priv->bh_rx, 0);
tx = atomic_xchg(&priv->bh_tx, 0);
term = atomic_xchg(&priv->bh_term, 0);
suspend = pending_tx ?
0 : atomic_read(&priv->bh_suspend);
(rx || tx || term || suspend || priv->bh_error);
}), status);
pr_debug("[BH] - rx: %d, tx: %d, term: %d, bh_err: %d, suspend: %d, status: %ld\n",
rx, tx, term, suspend, priv->bh_error, status);
/* Did an error occur? */
if ((status < 0 && status != -ERESTARTSYS) ||
term || priv->bh_error) {
break;
}
if (!status) { /* wait_event timed out */
unsigned long timestamp = jiffies;
long timeout;
int pending = 0;
int i;
/* Check to see if we have any outstanding frames */
if (priv->hw_bufs_used && (!rx || !tx)) {
wiphy_warn(priv->hw->wiphy,
"Missed interrupt? (%d frames outstanding)\n",
priv->hw_bufs_used);
rx = 1;
/* Get a timestamp of "oldest" frame */
for (i = 0; i < 4; ++i)
pending += cw1200_queue_get_xmit_timestamp(
&priv->tx_queue[i],
×tamp,
priv->pending_frame_id);
/* Check if frame transmission is timed out.
* Add an extra second with respect to possible
* interrupt loss.
*/
timeout = timestamp +
WSM_CMD_LAST_CHANCE_TIMEOUT +
1 * HZ -
jiffies;
/* And terminate BH thread if the frame is "stuck" */
if (pending && timeout < 0) {
wiphy_warn(priv->hw->wiphy,
"Timeout waiting for TX confirm (%d/%d pending, %ld vs %lu).\n",
priv->hw_bufs_used, pending,
timestamp, jiffies);
break;
}
} else if (!priv->device_can_sleep &&
!atomic_read(&priv->recent_scan)) {
pr_debug("[BH] Device wakedown. Timeout.\n");
cw1200_reg_write_16(priv,
ST90TDS_CONTROL_REG_ID, 0);
priv->device_can_sleep = true;
}
goto done;
} else if (suspend) {
pr_debug("[BH] Device suspend.\n");
if (priv->powersave_enabled) {
pr_debug("[BH] Device wakedown. Suspend.\n");
cw1200_reg_write_16(priv,
ST90TDS_CONTROL_REG_ID, 0);
priv->device_can_sleep = true;
}
atomic_set(&priv->bh_suspend, CW1200_BH_SUSPENDED);
wake_up(&priv->bh_evt_wq);
status = wait_event_interruptible(priv->bh_wq,
CW1200_BH_RESUME == atomic_read(&priv->bh_suspend));
if (status < 0) {
wiphy_err(priv->hw->wiphy,
"Failed to wait for resume: %ld.\n",
status);
break;
}
pr_debug("[BH] Device resume.\n");
atomic_set(&priv->bh_suspend, CW1200_BH_RESUMED);
wake_up(&priv->bh_evt_wq);
atomic_add(1, &priv->bh_rx);
goto done;
}
rx:
tx += pending_tx;
pending_tx = 0;
if (cw1200_bh_read_ctrl_reg(priv, &ctrl_reg))
break;
/* Don't bother trying to rx unless we have data to read */
if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) {
ret = cw1200_bh_rx_helper(priv, &ctrl_reg, &tx);
if (ret < 0)
break;
/* Double up here if there's more data.. */
if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) {
ret = cw1200_bh_rx_helper(priv, &ctrl_reg, &tx);
if (ret < 0)
break;
}
}
tx:
if (tx) {
tx = 0;
BUG_ON(priv->hw_bufs_used > priv->wsm_caps.input_buffers);
tx_burst = priv->wsm_caps.input_buffers - priv->hw_bufs_used;
tx_allowed = tx_burst > 0;
if (!tx_allowed) {
/* Buffers full. Ensure we process tx
* after we handle rx..
*/
pending_tx = tx;
goto done_rx;
}
ret = cw1200_bh_tx_helper(priv, &pending_tx, &tx_burst);
if (ret < 0)
break;
if (ret > 0) /* More to transmit */
tx = ret;
/* Re-read ctrl reg */
if (cw1200_bh_read_ctrl_reg(priv, &ctrl_reg))
break;
}
done_rx:
if (priv->bh_error)
break;
if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK)
goto rx;
if (tx)
goto tx;
done:
/* Re-enable device interrupts */
priv->hwbus_ops->lock(priv->hwbus_priv);
__cw1200_irq_enable(priv, 1);
priv->hwbus_ops->unlock(priv->hwbus_priv);
}
/* Explicitly disable device interrupts */
priv->hwbus_ops->lock(priv->hwbus_priv);
__cw1200_irq_enable(priv, 0);
priv->hwbus_ops->unlock(priv->hwbus_priv);
if (!term) {
pr_err("[BH] Fatal error, exiting.\n");
priv->bh_error = 1;
/* TODO: schedule_work(recovery) */
}
return 0;
}
| gpl-2.0 |
Dee-UK/D33_KK_Kernel | drivers/staging/comedi/drivers/pcl812.c | 2529 | 47561 | /*
* comedi/drivers/pcl812.c
*
* Author: Michal Dobes <dobes@tesnet.cz>
*
* hardware driver for Advantech cards
* card: PCL-812, PCL-812PG, PCL-813, PCL-813B
* driver: pcl812, pcl812pg, pcl813, pcl813b
* and for ADlink cards
* card: ACL-8112DG, ACL-8112HG, ACL-8112PG, ACL-8113, ACL-8216
* driver: acl8112dg, acl8112hg, acl8112pg, acl8113, acl8216
* and for ICP DAS cards
* card: ISO-813, A-821PGH, A-821PGL, A-821PGL-NDA, A-822PGH, A-822PGL,
* driver: iso813, a821pgh, a-821pgl, a-821pglnda, a822pgh, a822pgl,
* card: A-823PGH, A-823PGL, A-826PG
* driver: a823pgh, a823pgl, a826pg
*/
/*
* Driver: pcl812
* Description: Advantech PCL-812/PG, PCL-813/B,
* ADLink ACL-8112DG/HG/PG, ACL-8113, ACL-8216,
* ICP DAS A-821PGH/PGL/PGL-NDA, A-822PGH/PGL, A-823PGH/PGL, A-826PG,
* ICP DAS ISO-813
* Author: Michal Dobes <dobes@tesnet.cz>
* Devices: [Advantech] PCL-812 (pcl812), PCL-812PG (pcl812pg),
* PCL-813 (pcl813), PCL-813B (pcl813b), [ADLink] ACL-8112DG (acl8112dg),
* ACL-8112HG (acl8112hg), ACL-8113 (acl-8113), ACL-8216 (acl8216),
* [ICP] ISO-813 (iso813), A-821PGH (a821pgh), A-821PGL (a821pgl),
* A-821PGL-NDA (a821pclnda), A-822PGH (a822pgh), A-822PGL (a822pgl),
* A-823PGH (a823pgh), A-823PGL (a823pgl), A-826PG (a826pg)
* Updated: Mon, 06 Aug 2007 12:03:15 +0100
* Status: works (I hope. My board fire up under my hands
* and I cann't test all features.)
*
* This driver supports insn and cmd interfaces. Some boards support only insn
* because their hardware don't allow more (PCL-813/B, ACL-8113, ISO-813).
* Data transfer over DMA is supported only when you measure only one
* channel, this is too hardware limitation of these boards.
*
* Options for PCL-812:
* [0] - IO Base
* [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7; 10, 11, 12, 14, 15)
* [2] - DMA (0=disable, 1, 3)
* [3] - 0=trigger source is internal 8253 with 2MHz clock
* 1=trigger source is external
* [4] - 0=A/D input range is +/-10V
* 1=A/D input range is +/-5V
* 2=A/D input range is +/-2.5V
* 3=A/D input range is +/-1.25V
* 4=A/D input range is +/-0.625V
* 5=A/D input range is +/-0.3125V
* [5] - 0=D/A outputs 0-5V (internal reference -5V)
* 1=D/A outputs 0-10V (internal reference -10V)
* 2=D/A outputs unknown (external reference)
*
* Options for PCL-812PG, ACL-8112PG:
* [0] - IO Base
* [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7; 10, 11, 12, 14, 15)
* [2] - DMA (0=disable, 1, 3)
* [3] - 0=trigger source is internal 8253 with 2MHz clock
* 1=trigger source is external
* [4] - 0=A/D have max +/-5V input
* 1=A/D have max +/-10V input
* [5] - 0=D/A outputs 0-5V (internal reference -5V)
* 1=D/A outputs 0-10V (internal reference -10V)
* 2=D/A outputs unknown (external reference)
*
* Options for ACL-8112DG/HG, A-822PGL/PGH, A-823PGL/PGH, ACL-8216, A-826PG:
* [0] - IO Base
* [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7; 10, 11, 12, 14, 15)
* [2] - DMA (0=disable, 1, 3)
* [3] - 0=trigger source is internal 8253 with 2MHz clock
* 1=trigger source is external
* [4] - 0=A/D channels are S.E.
* 1=A/D channels are DIFF
* [5] - 0=D/A outputs 0-5V (internal reference -5V)
* 1=D/A outputs 0-10V (internal reference -10V)
* 2=D/A outputs unknown (external reference)
*
* Options for A-821PGL/PGH:
* [0] - IO Base
* [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7)
* [2] - 0=A/D channels are S.E.
* 1=A/D channels are DIFF
* [3] - 0=D/A output 0-5V (internal reference -5V)
* 1=D/A output 0-10V (internal reference -10V)
*
* Options for A-821PGL-NDA:
* [0] - IO Base
* [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7)
* [2] - 0=A/D channels are S.E.
* 1=A/D channels are DIFF
*
* Options for PCL-813:
* [0] - IO Base
*
* Options for PCL-813B:
* [0] - IO Base
* [1] - 0= bipolar inputs
* 1= unipolar inputs
*
* Options for ACL-8113, ISO-813:
* [0] - IO Base
* [1] - 0= 10V bipolar inputs
* 1= 10V unipolar inputs
* 2= 20V bipolar inputs
* 3= 20V unipolar inputs
*/
#include <linux/interrupt.h>
#include <linux/gfp.h>
#include "../comedidev.h"
#include <linux/delay.h>
#include <linux/ioport.h>
#include <asm/dma.h>
#include "8253.h"
/* if this is defined then a lot of messages is printed */
#undef PCL812_EXTDEBUG
/* hardware types of the cards */
#define boardPCL812PG 0 /* and ACL-8112PG */
#define boardPCL813B 1
#define boardPCL812 2
#define boardPCL813 3
#define boardISO813 5
#define boardACL8113 6
#define boardACL8112 7 /* ACL-8112DG/HG, A-822PGL/PGH, A-823PGL/PGH */
#define boardACL8216 8 /* and ICP DAS A-826PG */
#define boardA821 9 /* PGH, PGL, PGL/NDA versions */
#define PCLx1x_IORANGE 16
#define PCL812_CTR0 0
#define PCL812_CTR1 1
#define PCL812_CTR2 2
#define PCL812_CTRCTL 3
#define PCL812_AD_LO 4
#define PCL812_DA1_LO 4
#define PCL812_AD_HI 5
#define PCL812_DA1_HI 5
#define PCL812_DA2_LO 6
#define PCL812_DI_LO 6
#define PCL812_DA2_HI 7
#define PCL812_DI_HI 7
#define PCL812_CLRINT 8
#define PCL812_GAIN 9
#define PCL812_MUX 10
#define PCL812_MODE 11
#define PCL812_CNTENABLE 10
#define PCL812_SOFTTRIG 12
#define PCL812_DO_LO 13
#define PCL812_DO_HI 14
#define PCL812_DRDY 0x10 /* =0 data ready */
#define ACL8216_STATUS 8 /* 5. bit signalize data ready */
#define ACL8216_DRDY 0x20 /* =0 data ready */
#define MAX_CHANLIST_LEN 256 /* length of scan list */
static const struct comedi_lrange range_pcl812pg_ai = { 5, {
BIP_RANGE(5),
BIP_RANGE(2.5),
BIP_RANGE(1.25),
BIP_RANGE(0.625),
BIP_RANGE(0.3125),
}
};
static const struct comedi_lrange range_pcl812pg2_ai = { 5, {
BIP_RANGE(10),
BIP_RANGE(5),
BIP_RANGE(2.5),
BIP_RANGE(1.25),
BIP_RANGE(0.625),
}
};
static const struct comedi_lrange range812_bipolar1_25 = { 1, {
BIP_RANGE(1.25),
}
};
static const struct comedi_lrange range812_bipolar0_625 = { 1, {
BIP_RANGE
(0.625),
}
};
static const struct comedi_lrange range812_bipolar0_3125 = { 1, {
BIP_RANGE
(0.3125),
}
};
static const struct comedi_lrange range_pcl813b_ai = { 4, {
BIP_RANGE(5),
BIP_RANGE(2.5),
BIP_RANGE(1.25),
BIP_RANGE(0.625),
}
};
static const struct comedi_lrange range_pcl813b2_ai = { 4, {
UNI_RANGE(10),
UNI_RANGE(5),
UNI_RANGE(2.5),
UNI_RANGE(1.25),
}
};
static const struct comedi_lrange range_iso813_1_ai = { 5, {
BIP_RANGE(5),
BIP_RANGE(2.5),
BIP_RANGE(1.25),
BIP_RANGE(0.625),
BIP_RANGE(0.3125),
}
};
static const struct comedi_lrange range_iso813_1_2_ai = { 5, {
UNI_RANGE(10),
UNI_RANGE(5),
UNI_RANGE(2.5),
UNI_RANGE(1.25),
UNI_RANGE(0.625),
}
};
static const struct comedi_lrange range_iso813_2_ai = { 4, {
BIP_RANGE(5),
BIP_RANGE(2.5),
BIP_RANGE(1.25),
BIP_RANGE(0.625),
}
};
static const struct comedi_lrange range_iso813_2_2_ai = { 4, {
UNI_RANGE(10),
UNI_RANGE(5),
UNI_RANGE(2.5),
UNI_RANGE(1.25),
}
};
static const struct comedi_lrange range_acl8113_1_ai = { 4, {
BIP_RANGE(5),
BIP_RANGE(2.5),
BIP_RANGE(1.25),
BIP_RANGE(0.625),
}
};
static const struct comedi_lrange range_acl8113_1_2_ai = { 4, {
UNI_RANGE(10),
UNI_RANGE(5),
UNI_RANGE(2.5),
UNI_RANGE(1.25),
}
};
static const struct comedi_lrange range_acl8113_2_ai = { 3, {
BIP_RANGE(5),
BIP_RANGE(2.5),
BIP_RANGE(1.25),
}
};
static const struct comedi_lrange range_acl8113_2_2_ai = { 3, {
UNI_RANGE(10),
UNI_RANGE(5),
UNI_RANGE(2.5),
}
};
static const struct comedi_lrange range_acl8112dg_ai = { 9, {
BIP_RANGE(5),
BIP_RANGE(2.5),
BIP_RANGE(1.25),
BIP_RANGE(0.625),
UNI_RANGE(10),
UNI_RANGE(5),
UNI_RANGE(2.5),
UNI_RANGE(1.25),
BIP_RANGE(10),
}
};
static const struct comedi_lrange range_acl8112hg_ai = { 12, {
BIP_RANGE(5),
BIP_RANGE(0.5),
BIP_RANGE(0.05),
BIP_RANGE(0.005),
UNI_RANGE(10),
UNI_RANGE(1),
UNI_RANGE(0.1),
UNI_RANGE(0.01),
BIP_RANGE(10),
BIP_RANGE(1),
BIP_RANGE(0.1),
BIP_RANGE(0.01),
}
};
static const struct comedi_lrange range_a821pgh_ai = { 4, {
BIP_RANGE(5),
BIP_RANGE(0.5),
BIP_RANGE(0.05),
BIP_RANGE(0.005),
}
};
static int pcl812_attach(struct comedi_device *dev,
struct comedi_devconfig *it);
static int pcl812_detach(struct comedi_device *dev);
struct pcl812_board {
const char *name; /* board name */
int board_type; /* type of this board */
int n_aichan; /* num of AI chans in S.E. */
int n_aichan_diff; /* DIFF num of chans */
int n_aochan; /* num of DA chans */
int n_dichan; /* DI and DO chans */
int n_dochan;
int ai_maxdata; /* AI resolution */
unsigned int ai_ns_min; /* max sample speed of card v ns */
unsigned int i8254_osc_base; /* clock base */
const struct comedi_lrange *rangelist_ai; /* rangelist for A/D */
const struct comedi_lrange *rangelist_ao; /* rangelist for D/A */
unsigned int IRQbits; /* allowed IRQ */
unsigned char DMAbits; /* allowed DMA chans */
unsigned char io_range; /* iorange for this board */
unsigned char haveMPC508; /* 1=board use MPC508A multiplexor */
};
static const struct pcl812_board boardtypes[] = {
{"pcl812", boardPCL812, 16, 0, 2, 16, 16, 0x0fff,
33000, 500, &range_bipolar10, &range_unipolar5,
0xdcfc, 0x0a, PCLx1x_IORANGE, 0},
{"pcl812pg", boardPCL812PG, 16, 0, 2, 16, 16, 0x0fff,
33000, 500, &range_pcl812pg_ai, &range_unipolar5,
0xdcfc, 0x0a, PCLx1x_IORANGE, 0},
{"acl8112pg", boardPCL812PG, 16, 0, 2, 16, 16, 0x0fff,
10000, 500, &range_pcl812pg_ai, &range_unipolar5,
0xdcfc, 0x0a, PCLx1x_IORANGE, 0},
{"acl8112dg", boardACL8112, 16, 8, 2, 16, 16, 0x0fff,
10000, 500, &range_acl8112dg_ai, &range_unipolar5,
0xdcfc, 0x0a, PCLx1x_IORANGE, 1},
{"acl8112hg", boardACL8112, 16, 8, 2, 16, 16, 0x0fff,
10000, 500, &range_acl8112hg_ai, &range_unipolar5,
0xdcfc, 0x0a, PCLx1x_IORANGE, 1},
{"a821pgl", boardA821, 16, 8, 1, 16, 16, 0x0fff,
10000, 500, &range_pcl813b_ai, &range_unipolar5,
0x000c, 0x00, PCLx1x_IORANGE, 0},
{"a821pglnda", boardA821, 16, 8, 0, 0, 0, 0x0fff,
10000, 500, &range_pcl813b_ai, NULL,
0x000c, 0x00, PCLx1x_IORANGE, 0},
{"a821pgh", boardA821, 16, 8, 1, 16, 16, 0x0fff,
10000, 500, &range_a821pgh_ai, &range_unipolar5,
0x000c, 0x00, PCLx1x_IORANGE, 0},
{"a822pgl", boardACL8112, 16, 8, 2, 16, 16, 0x0fff,
10000, 500, &range_acl8112dg_ai, &range_unipolar5,
0xdcfc, 0x0a, PCLx1x_IORANGE, 0},
{"a822pgh", boardACL8112, 16, 8, 2, 16, 16, 0x0fff,
10000, 500, &range_acl8112hg_ai, &range_unipolar5,
0xdcfc, 0x0a, PCLx1x_IORANGE, 0},
{"a823pgl", boardACL8112, 16, 8, 2, 16, 16, 0x0fff,
8000, 500, &range_acl8112dg_ai, &range_unipolar5,
0xdcfc, 0x0a, PCLx1x_IORANGE, 0},
{"a823pgh", boardACL8112, 16, 8, 2, 16, 16, 0x0fff,
8000, 500, &range_acl8112hg_ai, &range_unipolar5,
0xdcfc, 0x0a, PCLx1x_IORANGE, 0},
{"pcl813", boardPCL813, 32, 0, 0, 0, 0, 0x0fff,
0, 0, &range_pcl813b_ai, NULL,
0x0000, 0x00, PCLx1x_IORANGE, 0},
{"pcl813b", boardPCL813B, 32, 0, 0, 0, 0, 0x0fff,
0, 0, &range_pcl813b_ai, NULL,
0x0000, 0x00, PCLx1x_IORANGE, 0},
{"acl8113", boardACL8113, 32, 0, 0, 0, 0, 0x0fff,
0, 0, &range_acl8113_1_ai, NULL,
0x0000, 0x00, PCLx1x_IORANGE, 0},
{"iso813", boardISO813, 32, 0, 0, 0, 0, 0x0fff,
0, 0, &range_iso813_1_ai, NULL,
0x0000, 0x00, PCLx1x_IORANGE, 0},
{"acl8216", boardACL8216, 16, 8, 2, 16, 16, 0xffff,
10000, 500, &range_pcl813b2_ai, &range_unipolar5,
0xdcfc, 0x0a, PCLx1x_IORANGE, 1},
{"a826pg", boardACL8216, 16, 8, 2, 16, 16, 0xffff,
10000, 500, &range_pcl813b2_ai, &range_unipolar5,
0xdcfc, 0x0a, PCLx1x_IORANGE, 0},
};
#define n_boardtypes (sizeof(boardtypes)/sizeof(struct pcl812_board))
#define this_board ((const struct pcl812_board *)dev->board_ptr)
static struct comedi_driver driver_pcl812 = {
.driver_name = "pcl812",
.module = THIS_MODULE,
.attach = pcl812_attach,
.detach = pcl812_detach,
.board_name = &boardtypes[0].name,
.num_names = n_boardtypes,
.offset = sizeof(struct pcl812_board),
};
static int __init driver_pcl812_init_module(void)
{
return comedi_driver_register(&driver_pcl812);
}
static void __exit driver_pcl812_cleanup_module(void)
{
comedi_driver_unregister(&driver_pcl812);
}
module_init(driver_pcl812_init_module);
module_exit(driver_pcl812_cleanup_module);
struct pcl812_private {
unsigned char valid; /* =1 device is OK */
unsigned char dma; /* >0 use dma ( usedDMA channel) */
unsigned char use_diff; /* =1 diff inputs */
unsigned char use_MPC; /* 1=board uses MPC508A multiplexor */
unsigned char use_ext_trg; /* 1=board uses external trigger */
unsigned char range_correction; /* =1 we must add 1 to range number */
unsigned char old_chan_reg; /* lastly used chan/gain pair */
unsigned char old_gain_reg;
unsigned char mode_reg_int; /* there is stored INT number for some card */
unsigned char ai_neverending; /* =1 we do unlimited AI */
unsigned char ai_eos; /* 1=EOS wake up */
unsigned char ai_dma; /* =1 we use DMA */
unsigned int ai_poll_ptr; /* how many sampes transfer poll */
unsigned int ai_scans; /* len of scanlist */
unsigned int ai_act_scan; /* how many scans we finished */
unsigned int ai_chanlist[MAX_CHANLIST_LEN]; /* our copy of channel/range list */
unsigned int ai_n_chan; /* how many channels is measured */
unsigned int ai_flags; /* flaglist */
unsigned int ai_data_len; /* len of data buffer */
short *ai_data; /* data buffer */
unsigned int ai_is16b; /* =1 we have 16 bit card */
unsigned long dmabuf[2]; /* PTR to DMA buf */
unsigned int dmapages[2]; /* how many pages we have allocated */
unsigned int hwdmaptr[2]; /* HW PTR to DMA buf */
unsigned int hwdmasize[2]; /* DMA buf size in bytes */
unsigned int dmabytestomove[2]; /* how many bytes DMA transfer */
int next_dma_buf; /* which buffer is next to use */
unsigned int dma_runs_to_end; /* how many times we must switch DMA buffers */
unsigned int last_dma_run; /* how many bytes to transfer on last DMA buffer */
unsigned int max_812_ai_mode0_rangewait; /* setling time for gain */
unsigned int ao_readback[2]; /* data for AO readback */
};
#define devpriv ((struct pcl812_private *)dev->private)
/*
==============================================================================
*/
static void start_pacer(struct comedi_device *dev, int mode,
unsigned int divisor1, unsigned int divisor2);
static void setup_range_channel(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned int rangechan, char wait);
static int pcl812_ai_cancel(struct comedi_device *dev,
struct comedi_subdevice *s);
/*
==============================================================================
*/
static int pcl812_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
int n;
int timeout, hi;
/* select software trigger */
outb(devpriv->mode_reg_int | 1, dev->iobase + PCL812_MODE);
/* select channel and renge */
setup_range_channel(dev, s, insn->chanspec, 1);
for (n = 0; n < insn->n; n++) {
/* start conversion */
outb(255, dev->iobase + PCL812_SOFTTRIG);
udelay(5);
timeout = 50; /* wait max 50us, it must finish under 33us */
while (timeout--) {
hi = inb(dev->iobase + PCL812_AD_HI);
if (!(hi & PCL812_DRDY))
goto conv_finish;
udelay(1);
}
printk
("comedi%d: pcl812: (%s at 0x%lx) A/D insn read timeout\n",
dev->minor, dev->board_name, dev->iobase);
outb(devpriv->mode_reg_int | 0, dev->iobase + PCL812_MODE);
return -ETIME;
conv_finish:
data[n] = ((hi & 0xf) << 8) | inb(dev->iobase + PCL812_AD_LO);
}
outb(devpriv->mode_reg_int | 0, dev->iobase + PCL812_MODE);
return n;
}
/*
==============================================================================
*/
static int acl8216_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
int n;
int timeout;
/* select software trigger */
outb(1, dev->iobase + PCL812_MODE);
/* select channel and renge */
setup_range_channel(dev, s, insn->chanspec, 1);
for (n = 0; n < insn->n; n++) {
/* start conversion */
outb(255, dev->iobase + PCL812_SOFTTRIG);
udelay(5);
timeout = 50; /* wait max 50us, it must finish under 33us */
while (timeout--) {
if (!(inb(dev->iobase + ACL8216_STATUS) & ACL8216_DRDY))
goto conv_finish;
udelay(1);
}
printk
("comedi%d: pcl812: (%s at 0x%lx) A/D insn read timeout\n",
dev->minor, dev->board_name, dev->iobase);
outb(0, dev->iobase + PCL812_MODE);
return -ETIME;
conv_finish:
data[n] =
(inb(dev->iobase +
PCL812_AD_HI) << 8) | inb(dev->iobase + PCL812_AD_LO);
}
outb(0, dev->iobase + PCL812_MODE);
return n;
}
/*
==============================================================================
*/
static int pcl812_ao_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
int chan = CR_CHAN(insn->chanspec);
int i;
for (i = 0; i < insn->n; i++) {
outb((data[i] & 0xff),
dev->iobase + (chan ? PCL812_DA2_LO : PCL812_DA1_LO));
outb((data[i] >> 8) & 0x0f,
dev->iobase + (chan ? PCL812_DA2_HI : PCL812_DA1_HI));
devpriv->ao_readback[chan] = data[i];
}
return i;
}
/*
==============================================================================
*/
static int pcl812_ao_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
int chan = CR_CHAN(insn->chanspec);
int i;
for (i = 0; i < insn->n; i++)
data[i] = devpriv->ao_readback[chan];
return i;
}
/*
==============================================================================
*/
static int pcl812_di_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
if (insn->n != 2)
return -EINVAL;
data[1] = inb(dev->iobase + PCL812_DI_LO);
data[1] |= inb(dev->iobase + PCL812_DI_HI) << 8;
return 2;
}
/*
==============================================================================
*/
static int pcl812_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
if (insn->n != 2)
return -EINVAL;
if (data[0]) {
s->state &= ~data[0];
s->state |= data[0] & data[1];
outb(s->state & 0xff, dev->iobase + PCL812_DO_LO);
outb((s->state >> 8), dev->iobase + PCL812_DO_HI);
}
data[1] = s->state;
return 2;
}
#ifdef PCL812_EXTDEBUG
/*
==============================================================================
*/
static void pcl812_cmdtest_out(int e, struct comedi_cmd *cmd)
{
printk(KERN_INFO "pcl812 e=%d startsrc=%x scansrc=%x convsrc=%x\n", e,
cmd->start_src, cmd->scan_begin_src, cmd->convert_src);
printk(KERN_INFO "pcl812 e=%d startarg=%d scanarg=%d convarg=%d\n", e,
cmd->start_arg, cmd->scan_begin_arg, cmd->convert_arg);
printk(KERN_INFO "pcl812 e=%d stopsrc=%x scanend=%x\n", e,
cmd->stop_src, cmd->scan_end_src);
printk(KERN_INFO "pcl812 e=%d stoparg=%d scanendarg=%d "
"chanlistlen=%d\n", e, cmd->stop_arg, cmd->scan_end_arg,
cmd->chanlist_len);
}
#endif
/*
==============================================================================
*/
static int pcl812_ai_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_cmd *cmd)
{
int err = 0;
int tmp, divisor1, divisor2;
#ifdef PCL812_EXTDEBUG
printk("pcl812 EDBG: BGN: pcl812_ai_cmdtest(...)\n");
pcl812_cmdtest_out(-1, cmd);
#endif
/* step 1: make sure trigger sources are trivially valid */
tmp = cmd->start_src;
cmd->start_src &= TRIG_NOW;
if (!cmd->start_src || tmp != cmd->start_src)
err++;
tmp = cmd->scan_begin_src;
cmd->scan_begin_src &= TRIG_FOLLOW;
if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src)
err++;
tmp = cmd->convert_src;
if (devpriv->use_ext_trg)
cmd->convert_src &= TRIG_EXT;
else
cmd->convert_src &= TRIG_TIMER;
if (!cmd->convert_src || tmp != cmd->convert_src)
err++;
tmp = cmd->scan_end_src;
cmd->scan_end_src &= TRIG_COUNT;
if (!cmd->scan_end_src || tmp != cmd->scan_end_src)
err++;
tmp = cmd->stop_src;
cmd->stop_src &= TRIG_COUNT | TRIG_NONE;
if (!cmd->stop_src || tmp != cmd->stop_src)
err++;
if (err) {
#ifdef PCL812_EXTDEBUG
pcl812_cmdtest_out(1, cmd);
printk
("pcl812 EDBG: BGN: pcl812_ai_cmdtest(...) err=%d ret=1\n",
err);
#endif
return 1;
}
/*
* step 2: make sure trigger sources are
* unique and mutually compatible
*/
if (cmd->start_src != TRIG_NOW) {
cmd->start_src = TRIG_NOW;
err++;
}
if (cmd->scan_begin_src != TRIG_FOLLOW) {
cmd->scan_begin_src = TRIG_FOLLOW;
err++;
}
if (devpriv->use_ext_trg) {
if (cmd->convert_src != TRIG_EXT) {
cmd->convert_src = TRIG_EXT;
err++;
}
} else {
if (cmd->convert_src != TRIG_TIMER) {
cmd->convert_src = TRIG_TIMER;
err++;
}
}
if (cmd->scan_end_src != TRIG_COUNT) {
cmd->scan_end_src = TRIG_COUNT;
err++;
}
if (cmd->stop_src != TRIG_NONE && cmd->stop_src != TRIG_COUNT)
err++;
if (err) {
#ifdef PCL812_EXTDEBUG
pcl812_cmdtest_out(2, cmd);
printk
("pcl812 EDBG: BGN: pcl812_ai_cmdtest(...) err=%d ret=2\n",
err);
#endif
return 2;
}
/* step 3: make sure arguments are trivially compatible */
if (cmd->start_arg != 0) {
cmd->start_arg = 0;
err++;
}
if (cmd->scan_begin_arg != 0) {
cmd->scan_begin_arg = 0;
err++;
}
if (cmd->convert_src == TRIG_TIMER) {
if (cmd->convert_arg < this_board->ai_ns_min) {
cmd->convert_arg = this_board->ai_ns_min;
err++;
}
} else { /* TRIG_EXT */
if (cmd->convert_arg != 0) {
cmd->convert_arg = 0;
err++;
}
}
if (!cmd->chanlist_len) {
cmd->chanlist_len = 1;
err++;
}
if (cmd->chanlist_len > MAX_CHANLIST_LEN) {
cmd->chanlist_len = this_board->n_aichan;
err++;
}
if (cmd->scan_end_arg != cmd->chanlist_len) {
cmd->scan_end_arg = cmd->chanlist_len;
err++;
}
if (cmd->stop_src == TRIG_COUNT) {
if (!cmd->stop_arg) {
cmd->stop_arg = 1;
err++;
}
} else { /* TRIG_NONE */
if (cmd->stop_arg != 0) {
cmd->stop_arg = 0;
err++;
}
}
if (err) {
#ifdef PCL812_EXTDEBUG
pcl812_cmdtest_out(3, cmd);
printk
("pcl812 EDBG: BGN: pcl812_ai_cmdtest(...) err=%d ret=3\n",
err);
#endif
return 3;
}
/* step 4: fix up any arguments */
if (cmd->convert_src == TRIG_TIMER) {
tmp = cmd->convert_arg;
i8253_cascade_ns_to_timer(this_board->i8254_osc_base, &divisor1,
&divisor2, &cmd->convert_arg,
cmd->flags & TRIG_ROUND_MASK);
if (cmd->convert_arg < this_board->ai_ns_min)
cmd->convert_arg = this_board->ai_ns_min;
if (tmp != cmd->convert_arg)
err++;
}
if (err) {
#ifdef PCL812_EXTDEBUG
printk
("pcl812 EDBG: BGN: pcl812_ai_cmdtest(...) err=%d ret=4\n",
err);
#endif
return 4;
}
return 0;
}
/*
==============================================================================
*/
static int pcl812_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
unsigned int divisor1 = 0, divisor2 = 0, i, dma_flags, bytes;
struct comedi_cmd *cmd = &s->async->cmd;
#ifdef PCL812_EXTDEBUG
printk(KERN_DEBUG "pcl812 EDBG: BGN: pcl812_ai_cmd(...)\n");
#endif
if (cmd->start_src != TRIG_NOW)
return -EINVAL;
if (cmd->scan_begin_src != TRIG_FOLLOW)
return -EINVAL;
if (devpriv->use_ext_trg) {
if (cmd->convert_src != TRIG_EXT)
return -EINVAL;
} else {
if (cmd->convert_src != TRIG_TIMER)
return -EINVAL;
}
if (cmd->scan_end_src != TRIG_COUNT)
return -EINVAL;
if (cmd->scan_end_arg != cmd->chanlist_len)
return -EINVAL;
if (cmd->chanlist_len > MAX_CHANLIST_LEN)
return -EINVAL;
if (cmd->convert_src == TRIG_TIMER) {
if (cmd->convert_arg < this_board->ai_ns_min)
cmd->convert_arg = this_board->ai_ns_min;
i8253_cascade_ns_to_timer(this_board->i8254_osc_base,
&divisor1, &divisor2,
&cmd->convert_arg,
cmd->flags & TRIG_ROUND_MASK);
}
start_pacer(dev, -1, 0, 0); /* stop pacer */
devpriv->ai_n_chan = cmd->chanlist_len;
memcpy(devpriv->ai_chanlist, cmd->chanlist,
sizeof(unsigned int) * cmd->scan_end_arg);
/* select first channel and range */
setup_range_channel(dev, s, devpriv->ai_chanlist[0], 1);
if (devpriv->dma) { /* check if we can use DMA transfer */
devpriv->ai_dma = 1;
for (i = 1; i < devpriv->ai_n_chan; i++)
if (devpriv->ai_chanlist[0] != devpriv->ai_chanlist[i]) {
/* we cann't use DMA :-( */
devpriv->ai_dma = 0;
break;
}
} else
devpriv->ai_dma = 0;
devpriv->ai_flags = cmd->flags;
devpriv->ai_data_len = s->async->prealloc_bufsz;
devpriv->ai_data = s->async->prealloc_buf;
if (cmd->stop_src == TRIG_COUNT) {
devpriv->ai_scans = cmd->stop_arg;
devpriv->ai_neverending = 0;
} else {
devpriv->ai_scans = 0;
devpriv->ai_neverending = 1;
}
devpriv->ai_act_scan = 0;
devpriv->ai_poll_ptr = 0;
s->async->cur_chan = 0;
/* don't we want wake up every scan? */
if ((devpriv->ai_flags & TRIG_WAKE_EOS)) {
devpriv->ai_eos = 1;
/* DMA is useless for this situation */
if (devpriv->ai_n_chan == 1)
devpriv->ai_dma = 0;
}
if (devpriv->ai_dma) {
/* we use EOS, so adapt DMA buffer to one scan */
if (devpriv->ai_eos) {
devpriv->dmabytestomove[0] =
devpriv->ai_n_chan * sizeof(short);
devpriv->dmabytestomove[1] =
devpriv->ai_n_chan * sizeof(short);
devpriv->dma_runs_to_end = 1;
} else {
devpriv->dmabytestomove[0] = devpriv->hwdmasize[0];
devpriv->dmabytestomove[1] = devpriv->hwdmasize[1];
if (devpriv->ai_data_len < devpriv->hwdmasize[0])
devpriv->dmabytestomove[0] =
devpriv->ai_data_len;
if (devpriv->ai_data_len < devpriv->hwdmasize[1])
devpriv->dmabytestomove[1] =
devpriv->ai_data_len;
if (devpriv->ai_neverending) {
devpriv->dma_runs_to_end = 1;
} else {
/* how many samples we must transfer? */
bytes = devpriv->ai_n_chan *
devpriv->ai_scans * sizeof(short);
/* how many DMA pages we must fill */
devpriv->dma_runs_to_end =
bytes / devpriv->dmabytestomove[0];
/* on last dma transfer must be moved */
devpriv->last_dma_run =
bytes % devpriv->dmabytestomove[0];
if (devpriv->dma_runs_to_end == 0)
devpriv->dmabytestomove[0] =
devpriv->last_dma_run;
devpriv->dma_runs_to_end--;
}
}
if (devpriv->dmabytestomove[0] > devpriv->hwdmasize[0]) {
devpriv->dmabytestomove[0] = devpriv->hwdmasize[0];
devpriv->ai_eos = 0;
}
if (devpriv->dmabytestomove[1] > devpriv->hwdmasize[1]) {
devpriv->dmabytestomove[1] = devpriv->hwdmasize[1];
devpriv->ai_eos = 0;
}
devpriv->next_dma_buf = 0;
set_dma_mode(devpriv->dma, DMA_MODE_READ);
dma_flags = claim_dma_lock();
clear_dma_ff(devpriv->dma);
set_dma_addr(devpriv->dma, devpriv->hwdmaptr[0]);
set_dma_count(devpriv->dma, devpriv->dmabytestomove[0]);
release_dma_lock(dma_flags);
enable_dma(devpriv->dma);
#ifdef PCL812_EXTDEBUG
printk
("pcl812 EDBG: DMA %d PTR 0x%0x/0x%0x LEN %u/%u EOS %d\n",
devpriv->dma, devpriv->hwdmaptr[0],
devpriv->hwdmaptr[1], devpriv->dmabytestomove[0],
devpriv->dmabytestomove[1], devpriv->ai_eos);
#endif
}
switch (cmd->convert_src) {
case TRIG_TIMER:
start_pacer(dev, 1, divisor1, divisor2);
break;
}
if (devpriv->ai_dma) /* let's go! */
outb(devpriv->mode_reg_int | 2, dev->iobase + PCL812_MODE);
else /* let's go! */
outb(devpriv->mode_reg_int | 6, dev->iobase + PCL812_MODE);
#ifdef PCL812_EXTDEBUG
printk(KERN_DEBUG "pcl812 EDBG: END: pcl812_ai_cmd(...)\n");
#endif
return 0;
}
/*
==============================================================================
*/
static irqreturn_t interrupt_pcl812_ai_int(int irq, void *d)
{
char err = 1;
unsigned int mask, timeout;
struct comedi_device *dev = d;
struct comedi_subdevice *s = dev->subdevices + 0;
unsigned int next_chan;
s->async->events = 0;
timeout = 50; /* wait max 50us, it must finish under 33us */
if (devpriv->ai_is16b) {
mask = 0xffff;
while (timeout--) {
if (!(inb(dev->iobase + ACL8216_STATUS) & ACL8216_DRDY)) {
err = 0;
break;
}
udelay(1);
}
} else {
mask = 0x0fff;
while (timeout--) {
if (!(inb(dev->iobase + PCL812_AD_HI) & PCL812_DRDY)) {
err = 0;
break;
}
udelay(1);
}
}
if (err) {
printk
("comedi%d: pcl812: (%s at 0x%lx) "
"A/D cmd IRQ without DRDY!\n",
dev->minor, dev->board_name, dev->iobase);
pcl812_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
comedi_event(dev, s);
return IRQ_HANDLED;
}
comedi_buf_put(s->async,
((inb(dev->iobase + PCL812_AD_HI) << 8) |
inb(dev->iobase + PCL812_AD_LO)) & mask);
/* Set up next channel. Added by abbotti 2010-01-20, but untested. */
next_chan = s->async->cur_chan + 1;
if (next_chan >= devpriv->ai_n_chan)
next_chan = 0;
if (devpriv->ai_chanlist[s->async->cur_chan] !=
devpriv->ai_chanlist[next_chan])
setup_range_channel(dev, s, devpriv->ai_chanlist[next_chan], 0);
outb(0, dev->iobase + PCL812_CLRINT); /* clear INT request */
s->async->cur_chan = next_chan;
if (next_chan == 0) { /* one scan done */
devpriv->ai_act_scan++;
if (!(devpriv->ai_neverending))
/* all data sampled */
if (devpriv->ai_act_scan >= devpriv->ai_scans) {
pcl812_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA;
}
}
comedi_event(dev, s);
return IRQ_HANDLED;
}
/*
==============================================================================
*/
static void transfer_from_dma_buf(struct comedi_device *dev,
struct comedi_subdevice *s, short *ptr,
unsigned int bufptr, unsigned int len)
{
unsigned int i;
s->async->events = 0;
for (i = len; i; i--) {
/* get one sample */
comedi_buf_put(s->async, ptr[bufptr++]);
s->async->cur_chan++;
if (s->async->cur_chan >= devpriv->ai_n_chan) {
s->async->cur_chan = 0;
devpriv->ai_act_scan++;
if (!devpriv->ai_neverending)
/* all data sampled */
if (devpriv->ai_act_scan >= devpriv->ai_scans) {
pcl812_ai_cancel(dev, s);
s->async->events |= COMEDI_CB_EOA;
break;
}
}
}
comedi_event(dev, s);
}
/*
==============================================================================
*/
static irqreturn_t interrupt_pcl812_ai_dma(int irq, void *d)
{
struct comedi_device *dev = d;
struct comedi_subdevice *s = dev->subdevices + 0;
unsigned long dma_flags;
int len, bufptr;
short *ptr;
#ifdef PCL812_EXTDEBUG
printk(KERN_DEBUG "pcl812 EDBG: BGN: interrupt_pcl812_ai_dma(...)\n");
#endif
ptr = (short *)devpriv->dmabuf[devpriv->next_dma_buf];
len = (devpriv->dmabytestomove[devpriv->next_dma_buf] >> 1) -
devpriv->ai_poll_ptr;
devpriv->next_dma_buf = 1 - devpriv->next_dma_buf;
disable_dma(devpriv->dma);
set_dma_mode(devpriv->dma, DMA_MODE_READ);
dma_flags = claim_dma_lock();
set_dma_addr(devpriv->dma, devpriv->hwdmaptr[devpriv->next_dma_buf]);
if (devpriv->ai_eos) {
set_dma_count(devpriv->dma,
devpriv->dmabytestomove[devpriv->next_dma_buf]);
} else {
if (devpriv->dma_runs_to_end) {
set_dma_count(devpriv->dma,
devpriv->dmabytestomove[devpriv->
next_dma_buf]);
} else {
set_dma_count(devpriv->dma, devpriv->last_dma_run);
}
devpriv->dma_runs_to_end--;
}
release_dma_lock(dma_flags);
enable_dma(devpriv->dma);
outb(0, dev->iobase + PCL812_CLRINT); /* clear INT request */
bufptr = devpriv->ai_poll_ptr;
devpriv->ai_poll_ptr = 0;
transfer_from_dma_buf(dev, s, ptr, bufptr, len);
#ifdef PCL812_EXTDEBUG
printk(KERN_DEBUG "pcl812 EDBG: END: interrupt_pcl812_ai_dma(...)\n");
#endif
return IRQ_HANDLED;
}
/*
==============================================================================
*/
static irqreturn_t interrupt_pcl812(int irq, void *d)
{
struct comedi_device *dev = d;
if (!dev->attached) {
comedi_error(dev, "spurious interrupt");
return IRQ_HANDLED;
}
if (devpriv->ai_dma)
return interrupt_pcl812_ai_dma(irq, d);
else
return interrupt_pcl812_ai_int(irq, d);
}
/*
==============================================================================
*/
static int pcl812_ai_poll(struct comedi_device *dev, struct comedi_subdevice *s)
{
unsigned long flags;
unsigned int top1, top2, i;
if (!devpriv->ai_dma)
return 0; /* poll is valid only for DMA transfer */
spin_lock_irqsave(&dev->spinlock, flags);
for (i = 0; i < 10; i++) {
/* where is now DMA */
top1 = get_dma_residue(devpriv->ai_dma);
top2 = get_dma_residue(devpriv->ai_dma);
if (top1 == top2)
break;
}
if (top1 != top2) {
spin_unlock_irqrestore(&dev->spinlock, flags);
return 0;
}
/* where is now DMA in buffer */
top1 = devpriv->dmabytestomove[1 - devpriv->next_dma_buf] - top1;
top1 >>= 1; /* sample position */
top2 = top1 - devpriv->ai_poll_ptr;
if (top2 < 1) { /* no new samples */
spin_unlock_irqrestore(&dev->spinlock, flags);
return 0;
}
transfer_from_dma_buf(dev, s,
(void *)devpriv->dmabuf[1 -
devpriv->next_dma_buf],
devpriv->ai_poll_ptr, top2);
devpriv->ai_poll_ptr = top1; /* new buffer position */
spin_unlock_irqrestore(&dev->spinlock, flags);
return s->async->buf_write_count - s->async->buf_read_count;
}
/*
==============================================================================
*/
static void setup_range_channel(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned int rangechan, char wait)
{
unsigned char chan_reg = CR_CHAN(rangechan); /* normal board */
/* gain index */
unsigned char gain_reg = CR_RANGE(rangechan) +
devpriv->range_correction;
if ((chan_reg == devpriv->old_chan_reg)
&& (gain_reg == devpriv->old_gain_reg))
return; /* we can return, no change */
devpriv->old_chan_reg = chan_reg;
devpriv->old_gain_reg = gain_reg;
if (devpriv->use_MPC) {
if (devpriv->use_diff) {
chan_reg = chan_reg | 0x30; /* DIFF inputs */
} else {
if (chan_reg & 0x80)
/* SE inputs 8-15 */
chan_reg = chan_reg | 0x20;
else
/* SE inputs 0-7 */
chan_reg = chan_reg | 0x10;
}
}
outb(chan_reg, dev->iobase + PCL812_MUX); /* select channel */
outb(gain_reg, dev->iobase + PCL812_GAIN); /* select gain */
if (wait)
/*
* XXX this depends on selected range and can be very long for
* some high gain ranges!
*/
udelay(devpriv->max_812_ai_mode0_rangewait);
}
/*
==============================================================================
*/
static void start_pacer(struct comedi_device *dev, int mode,
unsigned int divisor1, unsigned int divisor2)
{
#ifdef PCL812_EXTDEBUG
printk(KERN_DEBUG "pcl812 EDBG: BGN: start_pacer(%d,%u,%u)\n", mode,
divisor1, divisor2);
#endif
outb(0xb4, dev->iobase + PCL812_CTRCTL);
outb(0x74, dev->iobase + PCL812_CTRCTL);
udelay(1);
if (mode == 1) {
outb(divisor2 & 0xff, dev->iobase + PCL812_CTR2);
outb((divisor2 >> 8) & 0xff, dev->iobase + PCL812_CTR2);
outb(divisor1 & 0xff, dev->iobase + PCL812_CTR1);
outb((divisor1 >> 8) & 0xff, dev->iobase + PCL812_CTR1);
}
#ifdef PCL812_EXTDEBUG
printk(KERN_DEBUG "pcl812 EDBG: END: start_pacer(...)\n");
#endif
}
/*
==============================================================================
*/
static void free_resources(struct comedi_device *dev)
{
if (dev->private) {
if (devpriv->dmabuf[0])
free_pages(devpriv->dmabuf[0], devpriv->dmapages[0]);
if (devpriv->dmabuf[1])
free_pages(devpriv->dmabuf[1], devpriv->dmapages[1]);
if (devpriv->dma)
free_dma(devpriv->dma);
}
if (dev->irq)
free_irq(dev->irq, dev);
if (dev->iobase)
release_region(dev->iobase, this_board->io_range);
}
/*
==============================================================================
*/
static int pcl812_ai_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
#ifdef PCL812_EXTDEBUG
printk(KERN_DEBUG "pcl812 EDBG: BGN: pcl812_ai_cancel(...)\n");
#endif
if (devpriv->ai_dma)
disable_dma(devpriv->dma);
outb(0, dev->iobase + PCL812_CLRINT); /* clear INT request */
/* Stop A/D */
outb(devpriv->mode_reg_int | 0, dev->iobase + PCL812_MODE);
start_pacer(dev, -1, 0, 0); /* stop 8254 */
outb(0, dev->iobase + PCL812_CLRINT); /* clear INT request */
#ifdef PCL812_EXTDEBUG
printk(KERN_DEBUG "pcl812 EDBG: END: pcl812_ai_cancel(...)\n");
#endif
return 0;
}
/*
==============================================================================
*/
static void pcl812_reset(struct comedi_device *dev)
{
#ifdef PCL812_EXTDEBUG
printk(KERN_DEBUG "pcl812 EDBG: BGN: pcl812_reset(...)\n");
#endif
outb(0, dev->iobase + PCL812_MUX);
outb(0 + devpriv->range_correction, dev->iobase + PCL812_GAIN);
devpriv->old_chan_reg = -1; /* invalidate chain/gain memory */
devpriv->old_gain_reg = -1;
switch (this_board->board_type) {
case boardPCL812PG:
case boardPCL812:
case boardACL8112:
case boardACL8216:
outb(0, dev->iobase + PCL812_DA2_LO);
outb(0, dev->iobase + PCL812_DA2_HI);
case boardA821:
outb(0, dev->iobase + PCL812_DA1_LO);
outb(0, dev->iobase + PCL812_DA1_HI);
start_pacer(dev, -1, 0, 0); /* stop 8254 */
outb(0, dev->iobase + PCL812_DO_HI);
outb(0, dev->iobase + PCL812_DO_LO);
outb(devpriv->mode_reg_int | 0, dev->iobase + PCL812_MODE);
outb(0, dev->iobase + PCL812_CLRINT);
break;
case boardPCL813B:
case boardPCL813:
case boardISO813:
case boardACL8113:
udelay(5);
break;
}
udelay(5);
#ifdef PCL812_EXTDEBUG
printk(KERN_DEBUG "pcl812 EDBG: END: pcl812_reset(...)\n");
#endif
}
/*
==============================================================================
*/
static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
int ret, subdev;
unsigned long iobase;
unsigned int irq;
unsigned int dma;
unsigned long pages;
struct comedi_subdevice *s;
int n_subdevices;
iobase = it->options[0];
printk(KERN_INFO "comedi%d: pcl812: board=%s, ioport=0x%03lx",
dev->minor, this_board->name, iobase);
if (!request_region(iobase, this_board->io_range, "pcl812")) {
printk("I/O port conflict\n");
return -EIO;
}
dev->iobase = iobase;
ret = alloc_private(dev, sizeof(struct pcl812_private));
if (ret < 0) {
free_resources(dev);
return ret; /* Can't alloc mem */
}
dev->board_name = this_board->name;
irq = 0;
if (this_board->IRQbits != 0) { /* board support IRQ */
irq = it->options[1];
if (irq) { /* we want to use IRQ */
if (((1 << irq) & this_board->IRQbits) == 0) {
printk
(", IRQ %u is out of allowed range, "
"DISABLING IT", irq);
irq = 0; /* Bad IRQ */
} else {
if (request_irq
(irq, interrupt_pcl812, 0, "pcl812", dev)) {
printk
(", unable to allocate IRQ %u, "
"DISABLING IT", irq);
irq = 0; /* Can't use IRQ */
} else {
printk(KERN_INFO ", irq=%u", irq);
}
}
}
}
dev->irq = irq;
dma = 0;
devpriv->dma = dma;
if (!dev->irq)
goto no_dma; /* if we haven't IRQ, we can't use DMA */
if (this_board->DMAbits != 0) { /* board support DMA */
dma = it->options[2];
if (((1 << dma) & this_board->DMAbits) == 0) {
printk(", DMA is out of allowed range, FAIL!\n");
return -EINVAL; /* Bad DMA */
}
ret = request_dma(dma, "pcl812");
if (ret) {
printk(KERN_ERR ", unable to allocate DMA %u, FAIL!\n",
dma);
return -EBUSY; /* DMA isn't free */
}
devpriv->dma = dma;
printk(KERN_INFO ", dma=%u", dma);
pages = 1; /* we want 8KB */
devpriv->dmabuf[0] = __get_dma_pages(GFP_KERNEL, pages);
if (!devpriv->dmabuf[0]) {
printk(", unable to allocate DMA buffer, FAIL!\n");
/*
* maybe experiment with try_to_free_pages()
* will help ....
*/
free_resources(dev);
return -EBUSY; /* no buffer :-( */
}
devpriv->dmapages[0] = pages;
devpriv->hwdmaptr[0] = virt_to_bus((void *)devpriv->dmabuf[0]);
devpriv->hwdmasize[0] = PAGE_SIZE * (1 << pages);
devpriv->dmabuf[1] = __get_dma_pages(GFP_KERNEL, pages);
if (!devpriv->dmabuf[1]) {
printk(KERN_ERR ", unable to allocate DMA buffer, FAIL!\n");
free_resources(dev);
return -EBUSY;
}
devpriv->dmapages[1] = pages;
devpriv->hwdmaptr[1] = virt_to_bus((void *)devpriv->dmabuf[1]);
devpriv->hwdmasize[1] = PAGE_SIZE * (1 << pages);
}
no_dma:
n_subdevices = 0;
if (this_board->n_aichan > 0)
n_subdevices++;
if (this_board->n_aochan > 0)
n_subdevices++;
if (this_board->n_dichan > 0)
n_subdevices++;
if (this_board->n_dochan > 0)
n_subdevices++;
ret = alloc_subdevices(dev, n_subdevices);
if (ret < 0) {
free_resources(dev);
return ret;
}
subdev = 0;
/* analog input */
if (this_board->n_aichan > 0) {
s = dev->subdevices + subdev;
s->type = COMEDI_SUBD_AI;
s->subdev_flags = SDF_READABLE;
switch (this_board->board_type) {
case boardA821:
if (it->options[2] == 1) {
s->n_chan = this_board->n_aichan_diff;
s->subdev_flags |= SDF_DIFF;
devpriv->use_diff = 1;
} else {
s->n_chan = this_board->n_aichan;
s->subdev_flags |= SDF_GROUND;
}
break;
case boardACL8112:
case boardACL8216:
if (it->options[4] == 1) {
s->n_chan = this_board->n_aichan_diff;
s->subdev_flags |= SDF_DIFF;
devpriv->use_diff = 1;
} else {
s->n_chan = this_board->n_aichan;
s->subdev_flags |= SDF_GROUND;
}
break;
default:
s->n_chan = this_board->n_aichan;
s->subdev_flags |= SDF_GROUND;
break;
}
s->maxdata = this_board->ai_maxdata;
s->len_chanlist = MAX_CHANLIST_LEN;
s->range_table = this_board->rangelist_ai;
if (this_board->board_type == boardACL8216)
s->insn_read = acl8216_ai_insn_read;
else
s->insn_read = pcl812_ai_insn_read;
devpriv->use_MPC = this_board->haveMPC508;
s->cancel = pcl812_ai_cancel;
if (dev->irq) {
dev->read_subdev = s;
s->subdev_flags |= SDF_CMD_READ;
s->do_cmdtest = pcl812_ai_cmdtest;
s->do_cmd = pcl812_ai_cmd;
s->poll = pcl812_ai_poll;
}
switch (this_board->board_type) {
case boardPCL812PG:
if (it->options[4] == 1)
s->range_table = &range_pcl812pg2_ai;
break;
case boardPCL812:
switch (it->options[4]) {
case 0:
s->range_table = &range_bipolar10;
break;
case 1:
s->range_table = &range_bipolar5;
break;
case 2:
s->range_table = &range_bipolar2_5;
break;
case 3:
s->range_table = &range812_bipolar1_25;
break;
case 4:
s->range_table = &range812_bipolar0_625;
break;
case 5:
s->range_table = &range812_bipolar0_3125;
break;
default:
s->range_table = &range_bipolar10;
break;
printk
(", incorrect range number %d, changing "
"to 0 (+/-10V)", it->options[4]);
break;
}
break;
break;
case boardPCL813B:
if (it->options[1] == 1)
s->range_table = &range_pcl813b2_ai;
break;
case boardISO813:
switch (it->options[1]) {
case 0:
s->range_table = &range_iso813_1_ai;
break;
case 1:
s->range_table = &range_iso813_1_2_ai;
break;
case 2:
s->range_table = &range_iso813_2_ai;
devpriv->range_correction = 1;
break;
case 3:
s->range_table = &range_iso813_2_2_ai;
devpriv->range_correction = 1;
break;
default:
s->range_table = &range_iso813_1_ai;
break;
printk
(", incorrect range number %d, "
"changing to 0 ", it->options[1]);
break;
}
break;
case boardACL8113:
switch (it->options[1]) {
case 0:
s->range_table = &range_acl8113_1_ai;
break;
case 1:
s->range_table = &range_acl8113_1_2_ai;
break;
case 2:
s->range_table = &range_acl8113_2_ai;
devpriv->range_correction = 1;
break;
case 3:
s->range_table = &range_acl8113_2_2_ai;
devpriv->range_correction = 1;
break;
default:
s->range_table = &range_acl8113_1_ai;
break;
printk
(", incorrect range number %d, "
"changing to 0 ", it->options[1]);
break;
}
break;
}
subdev++;
}
/* analog output */
if (this_board->n_aochan > 0) {
s = dev->subdevices + subdev;
s->type = COMEDI_SUBD_AO;
s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
s->n_chan = this_board->n_aochan;
s->maxdata = 0xfff;
s->len_chanlist = 1;
s->range_table = this_board->rangelist_ao;
s->insn_read = pcl812_ao_insn_read;
s->insn_write = pcl812_ao_insn_write;
switch (this_board->board_type) {
case boardA821:
if (it->options[3] == 1)
s->range_table = &range_unipolar10;
break;
case boardPCL812:
case boardACL8112:
case boardPCL812PG:
case boardACL8216:
if (it->options[5] == 1)
s->range_table = &range_unipolar10;
if (it->options[5] == 2)
s->range_table = &range_unknown;
break;
}
subdev++;
}
/* digital input */
if (this_board->n_dichan > 0) {
s = dev->subdevices + subdev;
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE;
s->n_chan = this_board->n_dichan;
s->maxdata = 1;
s->len_chanlist = this_board->n_dichan;
s->range_table = &range_digital;
s->insn_bits = pcl812_di_insn_bits;
subdev++;
}
/* digital output */
if (this_board->n_dochan > 0) {
s = dev->subdevices + subdev;
s->type = COMEDI_SUBD_DO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = this_board->n_dochan;
s->maxdata = 1;
s->len_chanlist = this_board->n_dochan;
s->range_table = &range_digital;
s->insn_bits = pcl812_do_insn_bits;
subdev++;
}
switch (this_board->board_type) {
case boardACL8216:
devpriv->ai_is16b = 1;
case boardPCL812PG:
case boardPCL812:
case boardACL8112:
devpriv->max_812_ai_mode0_rangewait = 1;
if (it->options[3] > 0)
/* we use external trigger */
devpriv->use_ext_trg = 1;
case boardA821:
devpriv->max_812_ai_mode0_rangewait = 1;
devpriv->mode_reg_int = (irq << 4) & 0xf0;
break;
case boardPCL813B:
case boardPCL813:
case boardISO813:
case boardACL8113:
/* maybe there must by greatest timeout */
devpriv->max_812_ai_mode0_rangewait = 5;
break;
}
printk(KERN_INFO "\n");
devpriv->valid = 1;
pcl812_reset(dev);
return 0;
}
/*
==============================================================================
*/
static int pcl812_detach(struct comedi_device *dev)
{
#ifdef PCL812_EXTDEBUG
printk(KERN_DEBUG "comedi%d: pcl812: remove\n", dev->minor);
#endif
free_resources(dev);
return 0;
}
MODULE_AUTHOR("Comedi http://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
desalesouche/android_kernel_huawei_honor_3.4 | drivers/net/wireless/ray_cs.c | 4833 | 87155 | /*=============================================================================
*
* A PCMCIA client driver for the Raylink wireless LAN card.
* The starting point for this module was the skeleton.c in the
* PCMCIA 2.9.12 package written by David Hinds, dahinds@users.sourceforge.net
*
*
* Copyright (c) 1998 Corey Thomas (corey@world.std.com)
*
* This driver is free software; you can redistribute it and/or modify
* it under the terms of version 2 only of the GNU General Public License as
* published by the Free Software Foundation.
*
* It is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
* Changes:
* Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 08/08/2000
* - reorganize kmallocs in ray_attach, checking all for failure
* and releasing the previous allocations if one fails
*
* Daniele Bellucci <bellucda@tiscali.it> - 07/10/2003
* - Audit copy_to_user in ioctl(SIOCGIWESSID)
*
=============================================================================*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/proc_fs.h>
#include <linux/ptrace.h>
#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
#include <linux/ioport.h>
#include <linux/skbuff.h>
#include <linux/ieee80211.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
#include <pcmcia/ds.h>
#include <linux/wireless.h>
#include <net/iw_handler.h>
#include <asm/io.h>
#include <asm/byteorder.h>
#include <asm/uaccess.h>
/* Warning : these stuff will slow down the driver... */
#define WIRELESS_SPY /* Enable spying addresses */
/* Definitions we need for spy */
typedef struct iw_statistics iw_stats;
typedef u_char mac_addr[ETH_ALEN]; /* Hardware address */
#include "rayctl.h"
#include "ray_cs.h"
/** Prototypes based on PCMCIA skeleton driver *******************************/
static int ray_config(struct pcmcia_device *link);
static void ray_release(struct pcmcia_device *link);
static void ray_detach(struct pcmcia_device *p_dev);
/***** Prototypes indicated by device structure ******************************/
static int ray_dev_close(struct net_device *dev);
static int ray_dev_config(struct net_device *dev, struct ifmap *map);
static struct net_device_stats *ray_get_stats(struct net_device *dev);
static int ray_dev_init(struct net_device *dev);
static int ray_open(struct net_device *dev);
static netdev_tx_t ray_dev_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
static void ray_update_multi_list(struct net_device *dev, int all);
static int translate_frame(ray_dev_t *local, struct tx_msg __iomem *ptx,
unsigned char *data, int len);
static void ray_build_header(ray_dev_t *local, struct tx_msg __iomem *ptx,
UCHAR msg_type, unsigned char *data);
static void untranslate(ray_dev_t *local, struct sk_buff *skb, int len);
static iw_stats *ray_get_wireless_stats(struct net_device *dev);
static const struct iw_handler_def ray_handler_def;
/***** Prototypes for raylink functions **************************************/
static void authenticate(ray_dev_t *local);
static int build_auth_frame(ray_dev_t *local, UCHAR *dest, int auth_type);
static void authenticate_timeout(u_long);
static int get_free_ccs(ray_dev_t *local);
static int get_free_tx_ccs(ray_dev_t *local);
static void init_startup_params(ray_dev_t *local);
static int parse_addr(char *in_str, UCHAR *out);
static int ray_hw_xmit(unsigned char *data, int len, struct net_device *dev, UCHAR type);
static int ray_init(struct net_device *dev);
static int interrupt_ecf(ray_dev_t *local, int ccs);
static void ray_reset(struct net_device *dev);
static void ray_update_parm(struct net_device *dev, UCHAR objid, UCHAR *value, int len);
static void verify_dl_startup(u_long);
/* Prototypes for interrpt time functions **********************************/
static irqreturn_t ray_interrupt(int reg, void *dev_id);
static void clear_interrupt(ray_dev_t *local);
static void rx_deauthenticate(ray_dev_t *local, struct rcs __iomem *prcs,
unsigned int pkt_addr, int rx_len);
static int copy_from_rx_buff(ray_dev_t *local, UCHAR *dest, int pkt_addr, int len);
static void ray_rx(struct net_device *dev, ray_dev_t *local, struct rcs __iomem *prcs);
static void release_frag_chain(ray_dev_t *local, struct rcs __iomem *prcs);
static void rx_authenticate(ray_dev_t *local, struct rcs __iomem *prcs,
unsigned int pkt_addr, int rx_len);
static void rx_data(struct net_device *dev, struct rcs __iomem *prcs,
unsigned int pkt_addr, int rx_len);
static void associate(ray_dev_t *local);
/* Card command functions */
static int dl_startup_params(struct net_device *dev);
static void join_net(u_long local);
static void start_net(u_long local);
/* void start_net(ray_dev_t *local); */
/*===========================================================================*/
/* Parameters that can be set with 'insmod' */
/* ADHOC=0, Infrastructure=1 */
static int net_type = ADHOC;
/* Hop dwell time in Kus (1024 us units defined by 802.11) */
static int hop_dwell = 128;
/* Beacon period in Kus */
static int beacon_period = 256;
/* power save mode (0 = off, 1 = save power) */
static int psm;
/* String for network's Extended Service Set ID. 32 Characters max */
static char *essid;
/* Default to encapsulation unless translation requested */
static int translate = 1;
static int country = USA;
static int sniffer;
static int bc;
/* 48 bit physical card address if overriding card's real physical
* address is required. Since IEEE 802.11 addresses are 48 bits
* like ethernet, an int can't be used, so a string is used. To
* allow use of addresses starting with a decimal digit, the first
* character must be a letter and will be ignored. This letter is
* followed by up to 12 hex digits which are the address. If less
* than 12 digits are used, the address will be left filled with 0's.
* Note that bit 0 of the first byte is the broadcast bit, and evil
* things will happen if it is not 0 in a card address.
*/
static char *phy_addr = NULL;
static unsigned int ray_mem_speed = 500;
/* WARNING: THIS DRIVER IS NOT CAPABLE OF HANDLING MULTIPLE DEVICES! */
static struct pcmcia_device *this_device = NULL;
MODULE_AUTHOR("Corey Thomas <corey@world.std.com>");
MODULE_DESCRIPTION("Raylink/WebGear wireless LAN driver");
MODULE_LICENSE("GPL");
module_param(net_type, int, 0);
module_param(hop_dwell, int, 0);
module_param(beacon_period, int, 0);
module_param(psm, int, 0);
module_param(essid, charp, 0);
module_param(translate, int, 0);
module_param(country, int, 0);
module_param(sniffer, int, 0);
module_param(bc, int, 0);
module_param(phy_addr, charp, 0);
module_param(ray_mem_speed, int, 0);
static const UCHAR b5_default_startup_parms[] = {
0, 0, /* Adhoc station */
'L', 'I', 'N', 'U', 'X', 0, 0, 0, /* 32 char ESSID */
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
1, 0, /* Active scan, CA Mode */
0, 0, 0, 0, 0, 0, /* No default MAC addr */
0x7f, 0xff, /* Frag threshold */
0x00, 0x80, /* Hop time 128 Kus */
0x01, 0x00, /* Beacon period 256 Kus */
0x01, 0x07, 0xa3, /* DTIM, retries, ack timeout */
0x1d, 0x82, 0x4e, /* SIFS, DIFS, PIFS */
0x7f, 0xff, /* RTS threshold */
0x04, 0xe2, 0x38, 0xA4, /* scan_dwell, max_scan_dwell */
0x05, /* assoc resp timeout thresh */
0x08, 0x02, 0x08, /* adhoc, infra, super cycle max */
0, /* Promiscuous mode */
0x0c, 0x0bd, /* Unique word */
0x32, /* Slot time */
0xff, 0xff, /* roam-low snr, low snr count */
0x05, 0xff, /* Infra, adhoc missed bcn thresh */
0x01, 0x0b, 0x4f, /* USA, hop pattern, hop pat length */
/* b4 - b5 differences start here */
0x00, 0x3f, /* CW max */
0x00, 0x0f, /* CW min */
0x04, 0x08, /* Noise gain, limit offset */
0x28, 0x28, /* det rssi, med busy offsets */
7, /* det sync thresh */
0, 2, 2, /* test mode, min, max */
0, /* allow broadcast SSID probe resp */
0, 0, /* privacy must start, can join */
2, 0, 0, 0, 0, 0, 0, 0 /* basic rate set */
};
static const UCHAR b4_default_startup_parms[] = {
0, 0, /* Adhoc station */
'L', 'I', 'N', 'U', 'X', 0, 0, 0, /* 32 char ESSID */
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
1, 0, /* Active scan, CA Mode */
0, 0, 0, 0, 0, 0, /* No default MAC addr */
0x7f, 0xff, /* Frag threshold */
0x02, 0x00, /* Hop time */
0x00, 0x01, /* Beacon period */
0x01, 0x07, 0xa3, /* DTIM, retries, ack timeout */
0x1d, 0x82, 0xce, /* SIFS, DIFS, PIFS */
0x7f, 0xff, /* RTS threshold */
0xfb, 0x1e, 0xc7, 0x5c, /* scan_dwell, max_scan_dwell */
0x05, /* assoc resp timeout thresh */
0x04, 0x02, 0x4, /* adhoc, infra, super cycle max */
0, /* Promiscuous mode */
0x0c, 0x0bd, /* Unique word */
0x4e, /* Slot time (TBD seems wrong) */
0xff, 0xff, /* roam-low snr, low snr count */
0x05, 0xff, /* Infra, adhoc missed bcn thresh */
0x01, 0x0b, 0x4e, /* USA, hop pattern, hop pat length */
/* b4 - b5 differences start here */
0x3f, 0x0f, /* CW max, min */
0x04, 0x08, /* Noise gain, limit offset */
0x28, 0x28, /* det rssi, med busy offsets */
7, /* det sync thresh */
0, 2, 2 /* test mode, min, max */
};
/*===========================================================================*/
static const u8 eth2_llc[] = { 0xaa, 0xaa, 3, 0, 0, 0 };
static const char hop_pattern_length[] = { 1,
USA_HOP_MOD, EUROPE_HOP_MOD,
JAPAN_HOP_MOD, KOREA_HOP_MOD,
SPAIN_HOP_MOD, FRANCE_HOP_MOD,
ISRAEL_HOP_MOD, AUSTRALIA_HOP_MOD,
JAPAN_TEST_HOP_MOD
};
static const char rcsid[] =
"Raylink/WebGear wireless LAN - Corey <Thomas corey@world.std.com>";
static const struct net_device_ops ray_netdev_ops = {
.ndo_init = ray_dev_init,
.ndo_open = ray_open,
.ndo_stop = ray_dev_close,
.ndo_start_xmit = ray_dev_start_xmit,
.ndo_set_config = ray_dev_config,
.ndo_get_stats = ray_get_stats,
.ndo_set_rx_mode = set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
static int ray_probe(struct pcmcia_device *p_dev)
{
ray_dev_t *local;
struct net_device *dev;
dev_dbg(&p_dev->dev, "ray_attach()\n");
/* Allocate space for private device-specific data */
dev = alloc_etherdev(sizeof(ray_dev_t));
if (!dev)
goto fail_alloc_dev;
local = netdev_priv(dev);
local->finder = p_dev;
/* The io structure describes IO port mapping. None used here */
p_dev->resource[0]->end = 0;
p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
/* General socket configuration */
p_dev->config_flags |= CONF_ENABLE_IRQ;
p_dev->config_index = 1;
p_dev->priv = dev;
local->finder = p_dev;
local->card_status = CARD_INSERTED;
local->authentication_state = UNAUTHENTICATED;
local->num_multi = 0;
dev_dbg(&p_dev->dev, "ray_attach p_dev = %p, dev = %p, local = %p, intr = %p\n",
p_dev, dev, local, &ray_interrupt);
/* Raylink entries in the device structure */
dev->netdev_ops = &ray_netdev_ops;
dev->wireless_handlers = &ray_handler_def;
#ifdef WIRELESS_SPY
local->wireless_data.spy_data = &local->spy_data;
dev->wireless_data = &local->wireless_data;
#endif /* WIRELESS_SPY */
dev_dbg(&p_dev->dev, "ray_cs ray_attach calling ether_setup.)\n");
netif_stop_queue(dev);
init_timer(&local->timer);
this_device = p_dev;
return ray_config(p_dev);
fail_alloc_dev:
return -ENOMEM;
} /* ray_attach */
static void ray_detach(struct pcmcia_device *link)
{
struct net_device *dev;
ray_dev_t *local;
dev_dbg(&link->dev, "ray_detach\n");
this_device = NULL;
dev = link->priv;
ray_release(link);
local = netdev_priv(dev);
del_timer(&local->timer);
if (link->priv) {
unregister_netdev(dev);
free_netdev(dev);
}
dev_dbg(&link->dev, "ray_cs ray_detach ending\n");
} /* ray_detach */
#define MAX_TUPLE_SIZE 128
static int ray_config(struct pcmcia_device *link)
{
int ret = 0;
int i;
struct net_device *dev = (struct net_device *)link->priv;
ray_dev_t *local = netdev_priv(dev);
dev_dbg(&link->dev, "ray_config\n");
/* Determine card type and firmware version */
printk(KERN_INFO "ray_cs Detected: %s%s%s%s\n",
link->prod_id[0] ? link->prod_id[0] : " ",
link->prod_id[1] ? link->prod_id[1] : " ",
link->prod_id[2] ? link->prod_id[2] : " ",
link->prod_id[3] ? link->prod_id[3] : " ");
/* Now allocate an interrupt line. Note that this does not
actually assign a handler to the interrupt.
*/
ret = pcmcia_request_irq(link, ray_interrupt);
if (ret)
goto failed;
dev->irq = link->irq;
ret = pcmcia_enable_device(link);
if (ret)
goto failed;
/*** Set up 32k window for shared memory (transmit and control) ************/
link->resource[2]->flags |= WIN_DATA_WIDTH_8 | WIN_MEMORY_TYPE_CM | WIN_ENABLE | WIN_USE_WAIT;
link->resource[2]->start = 0;
link->resource[2]->end = 0x8000;
ret = pcmcia_request_window(link, link->resource[2], ray_mem_speed);
if (ret)
goto failed;
ret = pcmcia_map_mem_page(link, link->resource[2], 0);
if (ret)
goto failed;
local->sram = ioremap(link->resource[2]->start,
resource_size(link->resource[2]));
/*** Set up 16k window for shared memory (receive buffer) ***************/
link->resource[3]->flags |=
WIN_DATA_WIDTH_8 | WIN_MEMORY_TYPE_CM | WIN_ENABLE | WIN_USE_WAIT;
link->resource[3]->start = 0;
link->resource[3]->end = 0x4000;
ret = pcmcia_request_window(link, link->resource[3], ray_mem_speed);
if (ret)
goto failed;
ret = pcmcia_map_mem_page(link, link->resource[3], 0x8000);
if (ret)
goto failed;
local->rmem = ioremap(link->resource[3]->start,
resource_size(link->resource[3]));
/*** Set up window for attribute memory ***********************************/
link->resource[4]->flags |=
WIN_DATA_WIDTH_8 | WIN_MEMORY_TYPE_AM | WIN_ENABLE | WIN_USE_WAIT;
link->resource[4]->start = 0;
link->resource[4]->end = 0x1000;
ret = pcmcia_request_window(link, link->resource[4], ray_mem_speed);
if (ret)
goto failed;
ret = pcmcia_map_mem_page(link, link->resource[4], 0);
if (ret)
goto failed;
local->amem = ioremap(link->resource[4]->start,
resource_size(link->resource[4]));
dev_dbg(&link->dev, "ray_config sram=%p\n", local->sram);
dev_dbg(&link->dev, "ray_config rmem=%p\n", local->rmem);
dev_dbg(&link->dev, "ray_config amem=%p\n", local->amem);
if (ray_init(dev) < 0) {
ray_release(link);
return -ENODEV;
}
SET_NETDEV_DEV(dev, &link->dev);
i = register_netdev(dev);
if (i != 0) {
printk("ray_config register_netdev() failed\n");
ray_release(link);
return i;
}
printk(KERN_INFO "%s: RayLink, irq %d, hw_addr %pM\n",
dev->name, dev->irq, dev->dev_addr);
return 0;
failed:
ray_release(link);
return -ENODEV;
} /* ray_config */
static inline struct ccs __iomem *ccs_base(ray_dev_t *dev)
{
return dev->sram + CCS_BASE;
}
static inline struct rcs __iomem *rcs_base(ray_dev_t *dev)
{
/*
* This looks nonsensical, since there is a separate
* RCS_BASE. But the difference between a "struct rcs"
* and a "struct ccs" ends up being in the _index_ off
* the base, so the base pointer is the same for both
* ccs/rcs.
*/
return dev->sram + CCS_BASE;
}
/*===========================================================================*/
static int ray_init(struct net_device *dev)
{
int i;
UCHAR *p;
struct ccs __iomem *pccs;
ray_dev_t *local = netdev_priv(dev);
struct pcmcia_device *link = local->finder;
dev_dbg(&link->dev, "ray_init(0x%p)\n", dev);
if (!(pcmcia_dev_present(link))) {
dev_dbg(&link->dev, "ray_init - device not present\n");
return -1;
}
local->net_type = net_type;
local->sta_type = TYPE_STA;
/* Copy the startup results to local memory */
memcpy_fromio(&local->startup_res, local->sram + ECF_TO_HOST_BASE,
sizeof(struct startup_res_6));
/* Check Power up test status and get mac address from card */
if (local->startup_res.startup_word != 0x80) {
printk(KERN_INFO "ray_init ERROR card status = %2x\n",
local->startup_res.startup_word);
local->card_status = CARD_INIT_ERROR;
return -1;
}
local->fw_ver = local->startup_res.firmware_version[0];
local->fw_bld = local->startup_res.firmware_version[1];
local->fw_var = local->startup_res.firmware_version[2];
dev_dbg(&link->dev, "ray_init firmware version %d.%d\n", local->fw_ver,
local->fw_bld);
local->tib_length = 0x20;
if ((local->fw_ver == 5) && (local->fw_bld >= 30))
local->tib_length = local->startup_res.tib_length;
dev_dbg(&link->dev, "ray_init tib_length = 0x%02x\n", local->tib_length);
/* Initialize CCS's to buffer free state */
pccs = ccs_base(local);
for (i = 0; i < NUMBER_OF_CCS; i++) {
writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status);
}
init_startup_params(local);
/* copy mac address to startup parameters */
if (parse_addr(phy_addr, local->sparm.b4.a_mac_addr)) {
p = local->sparm.b4.a_mac_addr;
} else {
memcpy(&local->sparm.b4.a_mac_addr,
&local->startup_res.station_addr, ADDRLEN);
p = local->sparm.b4.a_mac_addr;
}
clear_interrupt(local); /* Clear any interrupt from the card */
local->card_status = CARD_AWAITING_PARAM;
dev_dbg(&link->dev, "ray_init ending\n");
return 0;
} /* ray_init */
/*===========================================================================*/
/* Download startup parameters to the card and command it to read them */
static int dl_startup_params(struct net_device *dev)
{
int ccsindex;
ray_dev_t *local = netdev_priv(dev);
struct ccs __iomem *pccs;
struct pcmcia_device *link = local->finder;
dev_dbg(&link->dev, "dl_startup_params entered\n");
if (!(pcmcia_dev_present(link))) {
dev_dbg(&link->dev, "ray_cs dl_startup_params - device not present\n");
return -1;
}
/* Copy parameters to host to ECF area */
if (local->fw_ver == 0x55)
memcpy_toio(local->sram + HOST_TO_ECF_BASE, &local->sparm.b4,
sizeof(struct b4_startup_params));
else
memcpy_toio(local->sram + HOST_TO_ECF_BASE, &local->sparm.b5,
sizeof(struct b5_startup_params));
/* Fill in the CCS fields for the ECF */
if ((ccsindex = get_free_ccs(local)) < 0)
return -1;
local->dl_param_ccs = ccsindex;
pccs = ccs_base(local) + ccsindex;
writeb(CCS_DOWNLOAD_STARTUP_PARAMS, &pccs->cmd);
dev_dbg(&link->dev, "dl_startup_params start ccsindex = %d\n",
local->dl_param_ccs);
/* Interrupt the firmware to process the command */
if (interrupt_ecf(local, ccsindex)) {
printk(KERN_INFO "ray dl_startup_params failed - "
"ECF not ready for intr\n");
local->card_status = CARD_DL_PARAM_ERROR;
writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status);
return -2;
}
local->card_status = CARD_DL_PARAM;
/* Start kernel timer to wait for dl startup to complete. */
local->timer.expires = jiffies + HZ / 2;
local->timer.data = (long)local;
local->timer.function = verify_dl_startup;
add_timer(&local->timer);
dev_dbg(&link->dev,
"ray_cs dl_startup_params started timer for verify_dl_startup\n");
return 0;
} /* dl_startup_params */
/*===========================================================================*/
static void init_startup_params(ray_dev_t *local)
{
int i;
if (country > JAPAN_TEST)
country = USA;
else if (country < USA)
country = USA;
/* structure for hop time and beacon period is defined here using
* New 802.11D6.1 format. Card firmware is still using old format
* until version 6.
* Before After
* a_hop_time ms byte a_hop_time ms byte
* a_hop_time 2s byte a_hop_time ls byte
* a_hop_time ls byte a_beacon_period ms byte
* a_beacon_period a_beacon_period ls byte
*
* a_hop_time = uS a_hop_time = KuS
* a_beacon_period = hops a_beacon_period = KuS
*//* 64ms = 010000 */
if (local->fw_ver == 0x55) {
memcpy((UCHAR *) &local->sparm.b4, b4_default_startup_parms,
sizeof(struct b4_startup_params));
/* Translate sane kus input values to old build 4/5 format */
/* i = hop time in uS truncated to 3 bytes */
i = (hop_dwell * 1024) & 0xffffff;
local->sparm.b4.a_hop_time[0] = (i >> 16) & 0xff;
local->sparm.b4.a_hop_time[1] = (i >> 8) & 0xff;
local->sparm.b4.a_beacon_period[0] = 0;
local->sparm.b4.a_beacon_period[1] =
((beacon_period / hop_dwell) - 1) & 0xff;
local->sparm.b4.a_curr_country_code = country;
local->sparm.b4.a_hop_pattern_length =
hop_pattern_length[(int)country] - 1;
if (bc) {
local->sparm.b4.a_ack_timeout = 0x50;
local->sparm.b4.a_sifs = 0x3f;
}
} else { /* Version 5 uses real kus values */
memcpy((UCHAR *) &local->sparm.b5, b5_default_startup_parms,
sizeof(struct b5_startup_params));
local->sparm.b5.a_hop_time[0] = (hop_dwell >> 8) & 0xff;
local->sparm.b5.a_hop_time[1] = hop_dwell & 0xff;
local->sparm.b5.a_beacon_period[0] =
(beacon_period >> 8) & 0xff;
local->sparm.b5.a_beacon_period[1] = beacon_period & 0xff;
if (psm)
local->sparm.b5.a_power_mgt_state = 1;
local->sparm.b5.a_curr_country_code = country;
local->sparm.b5.a_hop_pattern_length =
hop_pattern_length[(int)country];
}
local->sparm.b4.a_network_type = net_type & 0x01;
local->sparm.b4.a_acting_as_ap_status = TYPE_STA;
if (essid != NULL)
strncpy(local->sparm.b4.a_current_ess_id, essid, ESSID_SIZE);
} /* init_startup_params */
/*===========================================================================*/
static void verify_dl_startup(u_long data)
{
ray_dev_t *local = (ray_dev_t *) data;
struct ccs __iomem *pccs = ccs_base(local) + local->dl_param_ccs;
UCHAR status;
struct pcmcia_device *link = local->finder;
if (!(pcmcia_dev_present(link))) {
dev_dbg(&link->dev, "ray_cs verify_dl_startup - device not present\n");
return;
}
#if 0
{
int i;
printk(KERN_DEBUG
"verify_dl_startup parameters sent via ccs %d:\n",
local->dl_param_ccs);
for (i = 0; i < sizeof(struct b5_startup_params); i++) {
printk(" %2x",
(unsigned int)readb(local->sram +
HOST_TO_ECF_BASE + i));
}
printk("\n");
}
#endif
status = readb(&pccs->buffer_status);
if (status != CCS_BUFFER_FREE) {
printk(KERN_INFO
"Download startup params failed. Status = %d\n",
status);
local->card_status = CARD_DL_PARAM_ERROR;
return;
}
if (local->sparm.b4.a_network_type == ADHOC)
start_net((u_long) local);
else
join_net((u_long) local);
} /* end verify_dl_startup */
/*===========================================================================*/
/* Command card to start a network */
static void start_net(u_long data)
{
ray_dev_t *local = (ray_dev_t *) data;
struct ccs __iomem *pccs;
int ccsindex;
struct pcmcia_device *link = local->finder;
if (!(pcmcia_dev_present(link))) {
dev_dbg(&link->dev, "ray_cs start_net - device not present\n");
return;
}
/* Fill in the CCS fields for the ECF */
if ((ccsindex = get_free_ccs(local)) < 0)
return;
pccs = ccs_base(local) + ccsindex;
writeb(CCS_START_NETWORK, &pccs->cmd);
writeb(0, &pccs->var.start_network.update_param);
/* Interrupt the firmware to process the command */
if (interrupt_ecf(local, ccsindex)) {
dev_dbg(&link->dev, "ray start net failed - card not ready for intr\n");
writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status);
return;
}
local->card_status = CARD_DOING_ACQ;
} /* end start_net */
/*===========================================================================*/
/* Command card to join a network */
static void join_net(u_long data)
{
ray_dev_t *local = (ray_dev_t *) data;
struct ccs __iomem *pccs;
int ccsindex;
struct pcmcia_device *link = local->finder;
if (!(pcmcia_dev_present(link))) {
dev_dbg(&link->dev, "ray_cs join_net - device not present\n");
return;
}
/* Fill in the CCS fields for the ECF */
if ((ccsindex = get_free_ccs(local)) < 0)
return;
pccs = ccs_base(local) + ccsindex;
writeb(CCS_JOIN_NETWORK, &pccs->cmd);
writeb(0, &pccs->var.join_network.update_param);
writeb(0, &pccs->var.join_network.net_initiated);
/* Interrupt the firmware to process the command */
if (interrupt_ecf(local, ccsindex)) {
dev_dbg(&link->dev, "ray join net failed - card not ready for intr\n");
writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status);
return;
}
local->card_status = CARD_DOING_ACQ;
}
static void ray_release(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
ray_dev_t *local = netdev_priv(dev);
dev_dbg(&link->dev, "ray_release\n");
del_timer(&local->timer);
iounmap(local->sram);
iounmap(local->rmem);
iounmap(local->amem);
pcmcia_disable_device(link);
dev_dbg(&link->dev, "ray_release ending\n");
}
static int ray_suspend(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
if (link->open)
netif_device_detach(dev);
return 0;
}
static int ray_resume(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
if (link->open) {
ray_reset(dev);
netif_device_attach(dev);
}
return 0;
}
/*===========================================================================*/
static int ray_dev_init(struct net_device *dev)
{
#ifdef RAY_IMMEDIATE_INIT
int i;
#endif /* RAY_IMMEDIATE_INIT */
ray_dev_t *local = netdev_priv(dev);
struct pcmcia_device *link = local->finder;
dev_dbg(&link->dev, "ray_dev_init(dev=%p)\n", dev);
if (!(pcmcia_dev_present(link))) {
dev_dbg(&link->dev, "ray_dev_init - device not present\n");
return -1;
}
#ifdef RAY_IMMEDIATE_INIT
/* Download startup parameters */
if ((i = dl_startup_params(dev)) < 0) {
printk(KERN_INFO "ray_dev_init dl_startup_params failed - "
"returns 0x%x\n", i);
return -1;
}
#else /* RAY_IMMEDIATE_INIT */
/* Postpone the card init so that we can still configure the card,
* for example using the Wireless Extensions. The init will happen
* in ray_open() - Jean II */
dev_dbg(&link->dev,
"ray_dev_init: postponing card init to ray_open() ; Status = %d\n",
local->card_status);
#endif /* RAY_IMMEDIATE_INIT */
/* copy mac and broadcast addresses to linux device */
memcpy(dev->dev_addr, &local->sparm.b4.a_mac_addr, ADDRLEN);
memset(dev->broadcast, 0xff, ETH_ALEN);
dev_dbg(&link->dev, "ray_dev_init ending\n");
return 0;
}
/*===========================================================================*/
static int ray_dev_config(struct net_device *dev, struct ifmap *map)
{
ray_dev_t *local = netdev_priv(dev);
struct pcmcia_device *link = local->finder;
/* Dummy routine to satisfy device structure */
dev_dbg(&link->dev, "ray_dev_config(dev=%p,ifmap=%p)\n", dev, map);
if (!(pcmcia_dev_present(link))) {
dev_dbg(&link->dev, "ray_dev_config - device not present\n");
return -1;
}
return 0;
}
/*===========================================================================*/
static netdev_tx_t ray_dev_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
ray_dev_t *local = netdev_priv(dev);
struct pcmcia_device *link = local->finder;
short length = skb->len;
if (!pcmcia_dev_present(link)) {
dev_dbg(&link->dev, "ray_dev_start_xmit - device not present\n");
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
dev_dbg(&link->dev, "ray_dev_start_xmit(skb=%p, dev=%p)\n", skb, dev);
if (local->authentication_state == NEED_TO_AUTH) {
dev_dbg(&link->dev, "ray_cs Sending authentication request.\n");
if (!build_auth_frame(local, local->auth_id, OPEN_AUTH_REQUEST)) {
local->authentication_state = AUTHENTICATED;
netif_stop_queue(dev);
return NETDEV_TX_BUSY;
}
}
if (length < ETH_ZLEN) {
if (skb_padto(skb, ETH_ZLEN))
return NETDEV_TX_OK;
length = ETH_ZLEN;
}
switch (ray_hw_xmit(skb->data, length, dev, DATA_TYPE)) {
case XMIT_NO_CCS:
case XMIT_NEED_AUTH:
netif_stop_queue(dev);
return NETDEV_TX_BUSY;
case XMIT_NO_INTR:
case XMIT_MSG_BAD:
case XMIT_OK:
default:
dev_kfree_skb(skb);
}
return NETDEV_TX_OK;
} /* ray_dev_start_xmit */
/*===========================================================================*/
static int ray_hw_xmit(unsigned char *data, int len, struct net_device *dev,
UCHAR msg_type)
{
ray_dev_t *local = netdev_priv(dev);
struct ccs __iomem *pccs;
int ccsindex;
int offset;
struct tx_msg __iomem *ptx; /* Address of xmit buffer in PC space */
short int addr; /* Address of xmit buffer in card space */
pr_debug("ray_hw_xmit(data=%p, len=%d, dev=%p)\n", data, len, dev);
if (len + TX_HEADER_LENGTH > TX_BUF_SIZE) {
printk(KERN_INFO "ray_hw_xmit packet too large: %d bytes\n",
len);
return XMIT_MSG_BAD;
}
switch (ccsindex = get_free_tx_ccs(local)) {
case ECCSBUSY:
pr_debug("ray_hw_xmit tx_ccs table busy\n");
case ECCSFULL:
pr_debug("ray_hw_xmit No free tx ccs\n");
case ECARDGONE:
netif_stop_queue(dev);
return XMIT_NO_CCS;
default:
break;
}
addr = TX_BUF_BASE + (ccsindex << 11);
if (msg_type == DATA_TYPE) {
local->stats.tx_bytes += len;
local->stats.tx_packets++;
}
ptx = local->sram + addr;
ray_build_header(local, ptx, msg_type, data);
if (translate) {
offset = translate_frame(local, ptx, data, len);
} else { /* Encapsulate frame */
/* TBD TIB length will move address of ptx->var */
memcpy_toio(&ptx->var, data, len);
offset = 0;
}
/* fill in the CCS */
pccs = ccs_base(local) + ccsindex;
len += TX_HEADER_LENGTH + offset;
writeb(CCS_TX_REQUEST, &pccs->cmd);
writeb(addr >> 8, &pccs->var.tx_request.tx_data_ptr[0]);
writeb(local->tib_length, &pccs->var.tx_request.tx_data_ptr[1]);
writeb(len >> 8, &pccs->var.tx_request.tx_data_length[0]);
writeb(len & 0xff, &pccs->var.tx_request.tx_data_length[1]);
/* TBD still need psm_cam? */
writeb(PSM_CAM, &pccs->var.tx_request.pow_sav_mode);
writeb(local->net_default_tx_rate, &pccs->var.tx_request.tx_rate);
writeb(0, &pccs->var.tx_request.antenna);
pr_debug("ray_hw_xmit default_tx_rate = 0x%x\n",
local->net_default_tx_rate);
/* Interrupt the firmware to process the command */
if (interrupt_ecf(local, ccsindex)) {
pr_debug("ray_hw_xmit failed - ECF not ready for intr\n");
/* TBD very inefficient to copy packet to buffer, and then not
send it, but the alternative is to queue the messages and that
won't be done for a while. Maybe set tbusy until a CCS is free?
*/
writeb(CCS_BUFFER_FREE, &pccs->buffer_status);
return XMIT_NO_INTR;
}
return XMIT_OK;
} /* end ray_hw_xmit */
/*===========================================================================*/
static int translate_frame(ray_dev_t *local, struct tx_msg __iomem *ptx,
unsigned char *data, int len)
{
__be16 proto = ((struct ethhdr *)data)->h_proto;
if (ntohs(proto) >= 1536) { /* DIX II ethernet frame */
pr_debug("ray_cs translate_frame DIX II\n");
/* Copy LLC header to card buffer */
memcpy_toio(&ptx->var, eth2_llc, sizeof(eth2_llc));
memcpy_toio(((void __iomem *)&ptx->var) + sizeof(eth2_llc),
(UCHAR *) &proto, 2);
if (proto == htons(ETH_P_AARP) || proto == htons(ETH_P_IPX)) {
/* This is the selective translation table, only 2 entries */
writeb(0xf8,
&((struct snaphdr_t __iomem *)ptx->var)->org[3]);
}
/* Copy body of ethernet packet without ethernet header */
memcpy_toio((void __iomem *)&ptx->var +
sizeof(struct snaphdr_t), data + ETH_HLEN,
len - ETH_HLEN);
return (int)sizeof(struct snaphdr_t) - ETH_HLEN;
} else { /* already 802 type, and proto is length */
pr_debug("ray_cs translate_frame 802\n");
if (proto == htons(0xffff)) { /* evil netware IPX 802.3 without LLC */
pr_debug("ray_cs translate_frame evil IPX\n");
memcpy_toio(&ptx->var, data + ETH_HLEN, len - ETH_HLEN);
return 0 - ETH_HLEN;
}
memcpy_toio(&ptx->var, data + ETH_HLEN, len - ETH_HLEN);
return 0 - ETH_HLEN;
}
/* TBD do other frame types */
} /* end translate_frame */
/*===========================================================================*/
static void ray_build_header(ray_dev_t *local, struct tx_msg __iomem *ptx,
UCHAR msg_type, unsigned char *data)
{
writeb(PROTOCOL_VER | msg_type, &ptx->mac.frame_ctl_1);
/*** IEEE 802.11 Address field assignments *************
TODS FROMDS addr_1 addr_2 addr_3 addr_4
Adhoc 0 0 dest src (terminal) BSSID N/A
AP to Terminal 0 1 dest AP(BSSID) source N/A
Terminal to AP 1 0 AP(BSSID) src (terminal) dest N/A
AP to AP 1 1 dest AP src AP dest source
*******************************************************/
if (local->net_type == ADHOC) {
writeb(0, &ptx->mac.frame_ctl_2);
memcpy_toio(ptx->mac.addr_1, ((struct ethhdr *)data)->h_dest,
2 * ADDRLEN);
memcpy_toio(ptx->mac.addr_3, local->bss_id, ADDRLEN);
} else { /* infrastructure */
if (local->sparm.b4.a_acting_as_ap_status) {
writeb(FC2_FROM_DS, &ptx->mac.frame_ctl_2);
memcpy_toio(ptx->mac.addr_1,
((struct ethhdr *)data)->h_dest, ADDRLEN);
memcpy_toio(ptx->mac.addr_2, local->bss_id, 6);
memcpy_toio(ptx->mac.addr_3,
((struct ethhdr *)data)->h_source, ADDRLEN);
} else { /* Terminal */
writeb(FC2_TO_DS, &ptx->mac.frame_ctl_2);
memcpy_toio(ptx->mac.addr_1, local->bss_id, ADDRLEN);
memcpy_toio(ptx->mac.addr_2,
((struct ethhdr *)data)->h_source, ADDRLEN);
memcpy_toio(ptx->mac.addr_3,
((struct ethhdr *)data)->h_dest, ADDRLEN);
}
}
} /* end encapsulate_frame */
/*====================================================================*/
/*------------------------------------------------------------------*/
/*
* Wireless Handler : get protocol name
*/
static int ray_get_name(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
strcpy(wrqu->name, "IEEE 802.11-FH");
return 0;
}
/*------------------------------------------------------------------*/
/*
* Wireless Handler : set frequency
*/
static int ray_set_freq(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
int err = -EINPROGRESS; /* Call commit handler */
/* Reject if card is already initialised */
if (local->card_status != CARD_AWAITING_PARAM)
return -EBUSY;
/* Setting by channel number */
if ((wrqu->freq.m > USA_HOP_MOD) || (wrqu->freq.e > 0))
err = -EOPNOTSUPP;
else
local->sparm.b5.a_hop_pattern = wrqu->freq.m;
return err;
}
/*------------------------------------------------------------------*/
/*
* Wireless Handler : get frequency
*/
static int ray_get_freq(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
wrqu->freq.m = local->sparm.b5.a_hop_pattern;
wrqu->freq.e = 0;
return 0;
}
/*------------------------------------------------------------------*/
/*
* Wireless Handler : set ESSID
*/
static int ray_set_essid(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
/* Reject if card is already initialised */
if (local->card_status != CARD_AWAITING_PARAM)
return -EBUSY;
/* Check if we asked for `any' */
if (wrqu->essid.flags == 0)
/* Corey : can you do that ? */
return -EOPNOTSUPP;
/* Check the size of the string */
if (wrqu->essid.length > IW_ESSID_MAX_SIZE)
return -E2BIG;
/* Set the ESSID in the card */
memset(local->sparm.b5.a_current_ess_id, 0, IW_ESSID_MAX_SIZE);
memcpy(local->sparm.b5.a_current_ess_id, extra, wrqu->essid.length);
return -EINPROGRESS; /* Call commit handler */
}
/*------------------------------------------------------------------*/
/*
* Wireless Handler : get ESSID
*/
static int ray_get_essid(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
/* Get the essid that was set */
memcpy(extra, local->sparm.b5.a_current_ess_id, IW_ESSID_MAX_SIZE);
/* Push it out ! */
wrqu->essid.length = strlen(extra);
wrqu->essid.flags = 1; /* active */
return 0;
}
/*------------------------------------------------------------------*/
/*
* Wireless Handler : get AP address
*/
static int ray_get_wap(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
memcpy(wrqu->ap_addr.sa_data, local->bss_id, ETH_ALEN);
wrqu->ap_addr.sa_family = ARPHRD_ETHER;
return 0;
}
/*------------------------------------------------------------------*/
/*
* Wireless Handler : set Bit-Rate
*/
static int ray_set_rate(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
/* Reject if card is already initialised */
if (local->card_status != CARD_AWAITING_PARAM)
return -EBUSY;
/* Check if rate is in range */
if ((wrqu->bitrate.value != 1000000) && (wrqu->bitrate.value != 2000000))
return -EINVAL;
/* Hack for 1.5 Mb/s instead of 2 Mb/s */
if ((local->fw_ver == 0x55) && /* Please check */
(wrqu->bitrate.value == 2000000))
local->net_default_tx_rate = 3;
else
local->net_default_tx_rate = wrqu->bitrate.value / 500000;
return 0;
}
/*------------------------------------------------------------------*/
/*
* Wireless Handler : get Bit-Rate
*/
static int ray_get_rate(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
if (local->net_default_tx_rate == 3)
wrqu->bitrate.value = 2000000; /* Hum... */
else
wrqu->bitrate.value = local->net_default_tx_rate * 500000;
wrqu->bitrate.fixed = 0; /* We are in auto mode */
return 0;
}
/*------------------------------------------------------------------*/
/*
* Wireless Handler : set RTS threshold
*/
static int ray_set_rts(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
int rthr = wrqu->rts.value;
/* Reject if card is already initialised */
if (local->card_status != CARD_AWAITING_PARAM)
return -EBUSY;
/* if(wrq->u.rts.fixed == 0) we should complain */
if (wrqu->rts.disabled)
rthr = 32767;
else {
if ((rthr < 0) || (rthr > 2347)) /* What's the max packet size ??? */
return -EINVAL;
}
local->sparm.b5.a_rts_threshold[0] = (rthr >> 8) & 0xFF;
local->sparm.b5.a_rts_threshold[1] = rthr & 0xFF;
return -EINPROGRESS; /* Call commit handler */
}
/*------------------------------------------------------------------*/
/*
* Wireless Handler : get RTS threshold
*/
static int ray_get_rts(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
wrqu->rts.value = (local->sparm.b5.a_rts_threshold[0] << 8)
+ local->sparm.b5.a_rts_threshold[1];
wrqu->rts.disabled = (wrqu->rts.value == 32767);
wrqu->rts.fixed = 1;
return 0;
}
/*------------------------------------------------------------------*/
/*
* Wireless Handler : set Fragmentation threshold
*/
static int ray_set_frag(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
int fthr = wrqu->frag.value;
/* Reject if card is already initialised */
if (local->card_status != CARD_AWAITING_PARAM)
return -EBUSY;
/* if(wrq->u.frag.fixed == 0) should complain */
if (wrqu->frag.disabled)
fthr = 32767;
else {
if ((fthr < 256) || (fthr > 2347)) /* To check out ! */
return -EINVAL;
}
local->sparm.b5.a_frag_threshold[0] = (fthr >> 8) & 0xFF;
local->sparm.b5.a_frag_threshold[1] = fthr & 0xFF;
return -EINPROGRESS; /* Call commit handler */
}
/*------------------------------------------------------------------*/
/*
* Wireless Handler : get Fragmentation threshold
*/
static int ray_get_frag(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
wrqu->frag.value = (local->sparm.b5.a_frag_threshold[0] << 8)
+ local->sparm.b5.a_frag_threshold[1];
wrqu->frag.disabled = (wrqu->frag.value == 32767);
wrqu->frag.fixed = 1;
return 0;
}
/*------------------------------------------------------------------*/
/*
* Wireless Handler : set Mode of Operation
*/
static int ray_set_mode(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
int err = -EINPROGRESS; /* Call commit handler */
char card_mode = 1;
/* Reject if card is already initialised */
if (local->card_status != CARD_AWAITING_PARAM)
return -EBUSY;
switch (wrqu->mode) {
case IW_MODE_ADHOC:
card_mode = 0;
/* Fall through */
case IW_MODE_INFRA:
local->sparm.b5.a_network_type = card_mode;
break;
default:
err = -EINVAL;
}
return err;
}
/*------------------------------------------------------------------*/
/*
* Wireless Handler : get Mode of Operation
*/
static int ray_get_mode(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
ray_dev_t *local = netdev_priv(dev);
if (local->sparm.b5.a_network_type)
wrqu->mode = IW_MODE_INFRA;
else
wrqu->mode = IW_MODE_ADHOC;
return 0;
}
/*------------------------------------------------------------------*/
/*
* Wireless Handler : get range info
*/
static int ray_get_range(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct iw_range *range = (struct iw_range *)extra;
memset(range, 0, sizeof(struct iw_range));
/* Set the length (very important for backward compatibility) */
wrqu->data.length = sizeof(struct iw_range);
/* Set the Wireless Extension versions */
range->we_version_compiled = WIRELESS_EXT;
range->we_version_source = 9;
/* Set information in the range struct */
range->throughput = 1.1 * 1000 * 1000; /* Put the right number here */
range->num_channels = hop_pattern_length[(int)country];
range->num_frequency = 0;
range->max_qual.qual = 0;
range->max_qual.level = 255; /* What's the correct value ? */
range->max_qual.noise = 255; /* Idem */
range->num_bitrates = 2;
range->bitrate[0] = 1000000; /* 1 Mb/s */
range->bitrate[1] = 2000000; /* 2 Mb/s */
return 0;
}
/*------------------------------------------------------------------*/
/*
* Wireless Private Handler : set framing mode
*/
static int ray_set_framing(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
translate = *(extra); /* Set framing mode */
return 0;
}
/*------------------------------------------------------------------*/
/*
* Wireless Private Handler : get framing mode
*/
static int ray_get_framing(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
*(extra) = translate;
return 0;
}
/*------------------------------------------------------------------*/
/*
* Wireless Private Handler : get country
*/
static int ray_get_country(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
*(extra) = country;
return 0;
}
/*------------------------------------------------------------------*/
/*
* Commit handler : called after a bunch of SET operations
*/
static int ray_commit(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
return 0;
}
/*------------------------------------------------------------------*/
/*
* Stats handler : return Wireless Stats
*/
static iw_stats *ray_get_wireless_stats(struct net_device *dev)
{
ray_dev_t *local = netdev_priv(dev);
struct pcmcia_device *link = local->finder;
struct status __iomem *p = local->sram + STATUS_BASE;
local->wstats.status = local->card_status;
#ifdef WIRELESS_SPY
if ((local->spy_data.spy_number > 0)
&& (local->sparm.b5.a_network_type == 0)) {
/* Get it from the first node in spy list */
local->wstats.qual.qual = local->spy_data.spy_stat[0].qual;
local->wstats.qual.level = local->spy_data.spy_stat[0].level;
local->wstats.qual.noise = local->spy_data.spy_stat[0].noise;
local->wstats.qual.updated =
local->spy_data.spy_stat[0].updated;
}
#endif /* WIRELESS_SPY */
if (pcmcia_dev_present(link)) {
local->wstats.qual.noise = readb(&p->rxnoise);
local->wstats.qual.updated |= 4;
}
return &local->wstats;
} /* end ray_get_wireless_stats */
/*------------------------------------------------------------------*/
/*
* Structures to export the Wireless Handlers
*/
static const iw_handler ray_handler[] = {
IW_HANDLER(SIOCSIWCOMMIT, ray_commit),
IW_HANDLER(SIOCGIWNAME, ray_get_name),
IW_HANDLER(SIOCSIWFREQ, ray_set_freq),
IW_HANDLER(SIOCGIWFREQ, ray_get_freq),
IW_HANDLER(SIOCSIWMODE, ray_set_mode),
IW_HANDLER(SIOCGIWMODE, ray_get_mode),
IW_HANDLER(SIOCGIWRANGE, ray_get_range),
#ifdef WIRELESS_SPY
IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
#endif /* WIRELESS_SPY */
IW_HANDLER(SIOCGIWAP, ray_get_wap),
IW_HANDLER(SIOCSIWESSID, ray_set_essid),
IW_HANDLER(SIOCGIWESSID, ray_get_essid),
IW_HANDLER(SIOCSIWRATE, ray_set_rate),
IW_HANDLER(SIOCGIWRATE, ray_get_rate),
IW_HANDLER(SIOCSIWRTS, ray_set_rts),
IW_HANDLER(SIOCGIWRTS, ray_get_rts),
IW_HANDLER(SIOCSIWFRAG, ray_set_frag),
IW_HANDLER(SIOCGIWFRAG, ray_get_frag),
};
#define SIOCSIPFRAMING SIOCIWFIRSTPRIV /* Set framing mode */
#define SIOCGIPFRAMING SIOCIWFIRSTPRIV + 1 /* Get framing mode */
#define SIOCGIPCOUNTRY SIOCIWFIRSTPRIV + 3 /* Get country code */
static const iw_handler ray_private_handler[] = {
[0] = ray_set_framing,
[1] = ray_get_framing,
[3] = ray_get_country,
};
static const struct iw_priv_args ray_private_args[] = {
/* cmd, set_args, get_args, name */
{SIOCSIPFRAMING, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1, 0,
"set_framing"},
{SIOCGIPFRAMING, 0, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1,
"get_framing"},
{SIOCGIPCOUNTRY, 0, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1,
"get_country"},
};
static const struct iw_handler_def ray_handler_def = {
.num_standard = ARRAY_SIZE(ray_handler),
.num_private = ARRAY_SIZE(ray_private_handler),
.num_private_args = ARRAY_SIZE(ray_private_args),
.standard = ray_handler,
.private = ray_private_handler,
.private_args = ray_private_args,
.get_wireless_stats = ray_get_wireless_stats,
};
/*===========================================================================*/
static int ray_open(struct net_device *dev)
{
ray_dev_t *local = netdev_priv(dev);
struct pcmcia_device *link;
link = local->finder;
dev_dbg(&link->dev, "ray_open('%s')\n", dev->name);
if (link->open == 0)
local->num_multi = 0;
link->open++;
/* If the card is not started, time to start it ! - Jean II */
if (local->card_status == CARD_AWAITING_PARAM) {
int i;
dev_dbg(&link->dev, "ray_open: doing init now !\n");
/* Download startup parameters */
if ((i = dl_startup_params(dev)) < 0) {
printk(KERN_INFO
"ray_dev_init dl_startup_params failed - "
"returns 0x%x\n", i);
return -1;
}
}
if (sniffer)
netif_stop_queue(dev);
else
netif_start_queue(dev);
dev_dbg(&link->dev, "ray_open ending\n");
return 0;
} /* end ray_open */
/*===========================================================================*/
static int ray_dev_close(struct net_device *dev)
{
ray_dev_t *local = netdev_priv(dev);
struct pcmcia_device *link;
link = local->finder;
dev_dbg(&link->dev, "ray_dev_close('%s')\n", dev->name);
link->open--;
netif_stop_queue(dev);
/* In here, we should stop the hardware (stop card from beeing active)
* and set local->card_status to CARD_AWAITING_PARAM, so that while the
* card is closed we can chage its configuration.
* Probably also need a COR reset to get sane state - Jean II */
return 0;
} /* end ray_dev_close */
/*===========================================================================*/
static void ray_reset(struct net_device *dev)
{
pr_debug("ray_reset entered\n");
}
/*===========================================================================*/
/* Cause a firmware interrupt if it is ready for one */
/* Return nonzero if not ready */
static int interrupt_ecf(ray_dev_t *local, int ccs)
{
int i = 50;
struct pcmcia_device *link = local->finder;
if (!(pcmcia_dev_present(link))) {
dev_dbg(&link->dev, "ray_cs interrupt_ecf - device not present\n");
return -1;
}
dev_dbg(&link->dev, "interrupt_ecf(local=%p, ccs = 0x%x\n", local, ccs);
while (i &&
(readb(local->amem + CIS_OFFSET + ECF_INTR_OFFSET) &
ECF_INTR_SET))
i--;
if (i == 0) {
dev_dbg(&link->dev, "ray_cs interrupt_ecf card not ready for interrupt\n");
return -1;
}
/* Fill the mailbox, then kick the card */
writeb(ccs, local->sram + SCB_BASE);
writeb(ECF_INTR_SET, local->amem + CIS_OFFSET + ECF_INTR_OFFSET);
return 0;
} /* interrupt_ecf */
/*===========================================================================*/
/* Get next free transmit CCS */
/* Return - index of current tx ccs */
static int get_free_tx_ccs(ray_dev_t *local)
{
int i;
struct ccs __iomem *pccs = ccs_base(local);
struct pcmcia_device *link = local->finder;
if (!(pcmcia_dev_present(link))) {
dev_dbg(&link->dev, "ray_cs get_free_tx_ccs - device not present\n");
return ECARDGONE;
}
if (test_and_set_bit(0, &local->tx_ccs_lock)) {
dev_dbg(&link->dev, "ray_cs tx_ccs_lock busy\n");
return ECCSBUSY;
}
for (i = 0; i < NUMBER_OF_TX_CCS; i++) {
if (readb(&(pccs + i)->buffer_status) == CCS_BUFFER_FREE) {
writeb(CCS_BUFFER_BUSY, &(pccs + i)->buffer_status);
writeb(CCS_END_LIST, &(pccs + i)->link);
local->tx_ccs_lock = 0;
return i;
}
}
local->tx_ccs_lock = 0;
dev_dbg(&link->dev, "ray_cs ERROR no free tx CCS for raylink card\n");
return ECCSFULL;
} /* get_free_tx_ccs */
/*===========================================================================*/
/* Get next free CCS */
/* Return - index of current ccs */
static int get_free_ccs(ray_dev_t *local)
{
int i;
struct ccs __iomem *pccs = ccs_base(local);
struct pcmcia_device *link = local->finder;
if (!(pcmcia_dev_present(link))) {
dev_dbg(&link->dev, "ray_cs get_free_ccs - device not present\n");
return ECARDGONE;
}
if (test_and_set_bit(0, &local->ccs_lock)) {
dev_dbg(&link->dev, "ray_cs ccs_lock busy\n");
return ECCSBUSY;
}
for (i = NUMBER_OF_TX_CCS; i < NUMBER_OF_CCS; i++) {
if (readb(&(pccs + i)->buffer_status) == CCS_BUFFER_FREE) {
writeb(CCS_BUFFER_BUSY, &(pccs + i)->buffer_status);
writeb(CCS_END_LIST, &(pccs + i)->link);
local->ccs_lock = 0;
return i;
}
}
local->ccs_lock = 0;
dev_dbg(&link->dev, "ray_cs ERROR no free CCS for raylink card\n");
return ECCSFULL;
} /* get_free_ccs */
/*===========================================================================*/
static void authenticate_timeout(u_long data)
{
ray_dev_t *local = (ray_dev_t *) data;
del_timer(&local->timer);
printk(KERN_INFO "ray_cs Authentication with access point failed"
" - timeout\n");
join_net((u_long) local);
}
/*===========================================================================*/
static int parse_addr(char *in_str, UCHAR *out)
{
int len;
int i, j, k;
int status;
if (in_str == NULL)
return 0;
if ((len = strlen(in_str)) < 2)
return 0;
memset(out, 0, ADDRLEN);
status = 1;
j = len - 1;
if (j > 12)
j = 12;
i = 5;
while (j > 0) {
if ((k = hex_to_bin(in_str[j--])) != -1)
out[i] = k;
else
return 0;
if (j == 0)
break;
if ((k = hex_to_bin(in_str[j--])) != -1)
out[i] += k << 4;
else
return 0;
if (!i--)
break;
}
return status;
}
/*===========================================================================*/
static struct net_device_stats *ray_get_stats(struct net_device *dev)
{
ray_dev_t *local = netdev_priv(dev);
struct pcmcia_device *link = local->finder;
struct status __iomem *p = local->sram + STATUS_BASE;
if (!(pcmcia_dev_present(link))) {
dev_dbg(&link->dev, "ray_cs net_device_stats - device not present\n");
return &local->stats;
}
if (readb(&p->mrx_overflow_for_host)) {
local->stats.rx_over_errors += swab16(readw(&p->mrx_overflow));
writeb(0, &p->mrx_overflow);
writeb(0, &p->mrx_overflow_for_host);
}
if (readb(&p->mrx_checksum_error_for_host)) {
local->stats.rx_crc_errors +=
swab16(readw(&p->mrx_checksum_error));
writeb(0, &p->mrx_checksum_error);
writeb(0, &p->mrx_checksum_error_for_host);
}
if (readb(&p->rx_hec_error_for_host)) {
local->stats.rx_frame_errors += swab16(readw(&p->rx_hec_error));
writeb(0, &p->rx_hec_error);
writeb(0, &p->rx_hec_error_for_host);
}
return &local->stats;
}
/*===========================================================================*/
static void ray_update_parm(struct net_device *dev, UCHAR objid, UCHAR *value,
int len)
{
ray_dev_t *local = netdev_priv(dev);
struct pcmcia_device *link = local->finder;
int ccsindex;
int i;
struct ccs __iomem *pccs;
if (!(pcmcia_dev_present(link))) {
dev_dbg(&link->dev, "ray_update_parm - device not present\n");
return;
}
if ((ccsindex = get_free_ccs(local)) < 0) {
dev_dbg(&link->dev, "ray_update_parm - No free ccs\n");
return;
}
pccs = ccs_base(local) + ccsindex;
writeb(CCS_UPDATE_PARAMS, &pccs->cmd);
writeb(objid, &pccs->var.update_param.object_id);
writeb(1, &pccs->var.update_param.number_objects);
writeb(0, &pccs->var.update_param.failure_cause);
for (i = 0; i < len; i++) {
writeb(value[i], local->sram + HOST_TO_ECF_BASE);
}
/* Interrupt the firmware to process the command */
if (interrupt_ecf(local, ccsindex)) {
dev_dbg(&link->dev, "ray_cs associate failed - ECF not ready for intr\n");
writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status);
}
}
/*===========================================================================*/
static void ray_update_multi_list(struct net_device *dev, int all)
{
int ccsindex;
struct ccs __iomem *pccs;
ray_dev_t *local = netdev_priv(dev);
struct pcmcia_device *link = local->finder;
void __iomem *p = local->sram + HOST_TO_ECF_BASE;
if (!(pcmcia_dev_present(link))) {
dev_dbg(&link->dev, "ray_update_multi_list - device not present\n");
return;
} else
dev_dbg(&link->dev, "ray_update_multi_list(%p)\n", dev);
if ((ccsindex = get_free_ccs(local)) < 0) {
dev_dbg(&link->dev, "ray_update_multi - No free ccs\n");
return;
}
pccs = ccs_base(local) + ccsindex;
writeb(CCS_UPDATE_MULTICAST_LIST, &pccs->cmd);
if (all) {
writeb(0xff, &pccs->var);
local->num_multi = 0xff;
} else {
struct netdev_hw_addr *ha;
int i = 0;
/* Copy the kernel's list of MC addresses to card */
netdev_for_each_mc_addr(ha, dev) {
memcpy_toio(p, ha->addr, ETH_ALEN);
dev_dbg(&link->dev, "ray_update_multi add addr %pm\n",
ha->addr);
p += ETH_ALEN;
i++;
}
if (i > 256 / ADDRLEN)
i = 256 / ADDRLEN;
writeb((UCHAR) i, &pccs->var);
dev_dbg(&link->dev, "ray_cs update_multi %d addresses in list\n", i);
/* Interrupt the firmware to process the command */
local->num_multi = i;
}
if (interrupt_ecf(local, ccsindex)) {
dev_dbg(&link->dev,
"ray_cs update_multi failed - ECF not ready for intr\n");
writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status);
}
} /* end ray_update_multi_list */
/*===========================================================================*/
static void set_multicast_list(struct net_device *dev)
{
ray_dev_t *local = netdev_priv(dev);
UCHAR promisc;
pr_debug("ray_cs set_multicast_list(%p)\n", dev);
if (dev->flags & IFF_PROMISC) {
if (local->sparm.b5.a_promiscuous_mode == 0) {
pr_debug("ray_cs set_multicast_list promisc on\n");
local->sparm.b5.a_promiscuous_mode = 1;
promisc = 1;
ray_update_parm(dev, OBJID_promiscuous_mode,
&promisc, sizeof(promisc));
}
} else {
if (local->sparm.b5.a_promiscuous_mode == 1) {
pr_debug("ray_cs set_multicast_list promisc off\n");
local->sparm.b5.a_promiscuous_mode = 0;
promisc = 0;
ray_update_parm(dev, OBJID_promiscuous_mode,
&promisc, sizeof(promisc));
}
}
if (dev->flags & IFF_ALLMULTI)
ray_update_multi_list(dev, 1);
else {
if (local->num_multi != netdev_mc_count(dev))
ray_update_multi_list(dev, 0);
}
} /* end set_multicast_list */
/*=============================================================================
* All routines below here are run at interrupt time.
=============================================================================*/
static irqreturn_t ray_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
struct pcmcia_device *link;
ray_dev_t *local;
struct ccs __iomem *pccs;
struct rcs __iomem *prcs;
UCHAR rcsindex;
UCHAR tmp;
UCHAR cmd;
UCHAR status;
if (dev == NULL) /* Note that we want interrupts with dev->start == 0 */
return IRQ_NONE;
pr_debug("ray_cs: interrupt for *dev=%p\n", dev);
local = netdev_priv(dev);
link = (struct pcmcia_device *)local->finder;
if (!pcmcia_dev_present(link)) {
pr_debug(
"ray_cs interrupt from device not present or suspended.\n");
return IRQ_NONE;
}
rcsindex = readb(&((struct scb __iomem *)(local->sram))->rcs_index);
if (rcsindex >= (NUMBER_OF_CCS + NUMBER_OF_RCS)) {
dev_dbg(&link->dev, "ray_cs interrupt bad rcsindex = 0x%x\n", rcsindex);
clear_interrupt(local);
return IRQ_HANDLED;
}
if (rcsindex < NUMBER_OF_CCS) { /* If it's a returned CCS */
pccs = ccs_base(local) + rcsindex;
cmd = readb(&pccs->cmd);
status = readb(&pccs->buffer_status);
switch (cmd) {
case CCS_DOWNLOAD_STARTUP_PARAMS: /* Happens in firmware someday */
del_timer(&local->timer);
if (status == CCS_COMMAND_COMPLETE) {
dev_dbg(&link->dev,
"ray_cs interrupt download_startup_parameters OK\n");
} else {
dev_dbg(&link->dev,
"ray_cs interrupt download_startup_parameters fail\n");
}
break;
case CCS_UPDATE_PARAMS:
dev_dbg(&link->dev, "ray_cs interrupt update params done\n");
if (status != CCS_COMMAND_COMPLETE) {
tmp =
readb(&pccs->var.update_param.
failure_cause);
dev_dbg(&link->dev,
"ray_cs interrupt update params failed - reason %d\n",
tmp);
}
break;
case CCS_REPORT_PARAMS:
dev_dbg(&link->dev, "ray_cs interrupt report params done\n");
break;
case CCS_UPDATE_MULTICAST_LIST: /* Note that this CCS isn't returned */
dev_dbg(&link->dev,
"ray_cs interrupt CCS Update Multicast List done\n");
break;
case CCS_UPDATE_POWER_SAVINGS_MODE:
dev_dbg(&link->dev,
"ray_cs interrupt update power save mode done\n");
break;
case CCS_START_NETWORK:
case CCS_JOIN_NETWORK:
if (status == CCS_COMMAND_COMPLETE) {
if (readb
(&pccs->var.start_network.net_initiated) ==
1) {
dev_dbg(&link->dev,
"ray_cs interrupt network \"%s\" started\n",
local->sparm.b4.a_current_ess_id);
} else {
dev_dbg(&link->dev,
"ray_cs interrupt network \"%s\" joined\n",
local->sparm.b4.a_current_ess_id);
}
memcpy_fromio(&local->bss_id,
pccs->var.start_network.bssid,
ADDRLEN);
if (local->fw_ver == 0x55)
local->net_default_tx_rate = 3;
else
local->net_default_tx_rate =
readb(&pccs->var.start_network.
net_default_tx_rate);
local->encryption =
readb(&pccs->var.start_network.encryption);
if (!sniffer && (local->net_type == INFRA)
&& !(local->sparm.b4.a_acting_as_ap_status)) {
authenticate(local);
}
local->card_status = CARD_ACQ_COMPLETE;
} else {
local->card_status = CARD_ACQ_FAILED;
del_timer(&local->timer);
local->timer.expires = jiffies + HZ * 5;
local->timer.data = (long)local;
if (status == CCS_START_NETWORK) {
dev_dbg(&link->dev,
"ray_cs interrupt network \"%s\" start failed\n",
local->sparm.b4.a_current_ess_id);
local->timer.function = start_net;
} else {
dev_dbg(&link->dev,
"ray_cs interrupt network \"%s\" join failed\n",
local->sparm.b4.a_current_ess_id);
local->timer.function = join_net;
}
add_timer(&local->timer);
}
break;
case CCS_START_ASSOCIATION:
if (status == CCS_COMMAND_COMPLETE) {
local->card_status = CARD_ASSOC_COMPLETE;
dev_dbg(&link->dev, "ray_cs association successful\n");
} else {
dev_dbg(&link->dev, "ray_cs association failed,\n");
local->card_status = CARD_ASSOC_FAILED;
join_net((u_long) local);
}
break;
case CCS_TX_REQUEST:
if (status == CCS_COMMAND_COMPLETE) {
dev_dbg(&link->dev,
"ray_cs interrupt tx request complete\n");
} else {
dev_dbg(&link->dev,
"ray_cs interrupt tx request failed\n");
}
if (!sniffer)
netif_start_queue(dev);
netif_wake_queue(dev);
break;
case CCS_TEST_MEMORY:
dev_dbg(&link->dev, "ray_cs interrupt mem test done\n");
break;
case CCS_SHUTDOWN:
dev_dbg(&link->dev,
"ray_cs interrupt Unexpected CCS returned - Shutdown\n");
break;
case CCS_DUMP_MEMORY:
dev_dbg(&link->dev, "ray_cs interrupt dump memory done\n");
break;
case CCS_START_TIMER:
dev_dbg(&link->dev,
"ray_cs interrupt DING - raylink timer expired\n");
break;
default:
dev_dbg(&link->dev,
"ray_cs interrupt Unexpected CCS 0x%x returned 0x%x\n",
rcsindex, cmd);
}
writeb(CCS_BUFFER_FREE, &pccs->buffer_status);
} else { /* It's an RCS */
prcs = rcs_base(local) + rcsindex;
switch (readb(&prcs->interrupt_id)) {
case PROCESS_RX_PACKET:
ray_rx(dev, local, prcs);
break;
case REJOIN_NET_COMPLETE:
dev_dbg(&link->dev, "ray_cs interrupt rejoin net complete\n");
local->card_status = CARD_ACQ_COMPLETE;
/* do we need to clear tx buffers CCS's? */
if (local->sparm.b4.a_network_type == ADHOC) {
if (!sniffer)
netif_start_queue(dev);
} else {
memcpy_fromio(&local->bss_id,
prcs->var.rejoin_net_complete.
bssid, ADDRLEN);
dev_dbg(&link->dev, "ray_cs new BSSID = %pm\n",
local->bss_id);
if (!sniffer)
authenticate(local);
}
break;
case ROAMING_INITIATED:
dev_dbg(&link->dev, "ray_cs interrupt roaming initiated\n");
netif_stop_queue(dev);
local->card_status = CARD_DOING_ACQ;
break;
case JAPAN_CALL_SIGN_RXD:
dev_dbg(&link->dev, "ray_cs interrupt japan call sign rx\n");
break;
default:
dev_dbg(&link->dev,
"ray_cs Unexpected interrupt for RCS 0x%x cmd = 0x%x\n",
rcsindex,
(unsigned int)readb(&prcs->interrupt_id));
break;
}
writeb(CCS_BUFFER_FREE, &prcs->buffer_status);
}
clear_interrupt(local);
return IRQ_HANDLED;
} /* ray_interrupt */
/*===========================================================================*/
static void ray_rx(struct net_device *dev, ray_dev_t *local,
struct rcs __iomem *prcs)
{
int rx_len;
unsigned int pkt_addr;
void __iomem *pmsg;
pr_debug("ray_rx process rx packet\n");
/* Calculate address of packet within Rx buffer */
pkt_addr = ((readb(&prcs->var.rx_packet.rx_data_ptr[0]) << 8)
+ readb(&prcs->var.rx_packet.rx_data_ptr[1])) & RX_BUFF_END;
/* Length of first packet fragment */
rx_len = (readb(&prcs->var.rx_packet.rx_data_length[0]) << 8)
+ readb(&prcs->var.rx_packet.rx_data_length[1]);
local->last_rsl = readb(&prcs->var.rx_packet.rx_sig_lev);
pmsg = local->rmem + pkt_addr;
switch (readb(pmsg)) {
case DATA_TYPE:
pr_debug("ray_rx data type\n");
rx_data(dev, prcs, pkt_addr, rx_len);
break;
case AUTHENTIC_TYPE:
pr_debug("ray_rx authentic type\n");
if (sniffer)
rx_data(dev, prcs, pkt_addr, rx_len);
else
rx_authenticate(local, prcs, pkt_addr, rx_len);
break;
case DEAUTHENTIC_TYPE:
pr_debug("ray_rx deauth type\n");
if (sniffer)
rx_data(dev, prcs, pkt_addr, rx_len);
else
rx_deauthenticate(local, prcs, pkt_addr, rx_len);
break;
case NULL_MSG_TYPE:
pr_debug("ray_cs rx NULL msg\n");
break;
case BEACON_TYPE:
pr_debug("ray_rx beacon type\n");
if (sniffer)
rx_data(dev, prcs, pkt_addr, rx_len);
copy_from_rx_buff(local, (UCHAR *) &local->last_bcn, pkt_addr,
rx_len < sizeof(struct beacon_rx) ?
rx_len : sizeof(struct beacon_rx));
local->beacon_rxed = 1;
/* Get the statistics so the card counters never overflow */
ray_get_stats(dev);
break;
default:
pr_debug("ray_cs unknown pkt type %2x\n",
(unsigned int)readb(pmsg));
break;
}
} /* end ray_rx */
/*===========================================================================*/
static void rx_data(struct net_device *dev, struct rcs __iomem *prcs,
unsigned int pkt_addr, int rx_len)
{
struct sk_buff *skb = NULL;
struct rcs __iomem *prcslink = prcs;
ray_dev_t *local = netdev_priv(dev);
UCHAR *rx_ptr;
int total_len;
int tmp;
#ifdef WIRELESS_SPY
int siglev = local->last_rsl;
u_char linksrcaddr[ETH_ALEN]; /* Other end of the wireless link */
#endif
if (!sniffer) {
if (translate) {
/* TBD length needs fixing for translated header */
if (rx_len < (ETH_HLEN + RX_MAC_HEADER_LENGTH) ||
rx_len >
(dev->mtu + RX_MAC_HEADER_LENGTH + ETH_HLEN +
FCS_LEN)) {
pr_debug(
"ray_cs invalid packet length %d received\n",
rx_len);
return;
}
} else { /* encapsulated ethernet */
if (rx_len < (ETH_HLEN + RX_MAC_HEADER_LENGTH) ||
rx_len >
(dev->mtu + RX_MAC_HEADER_LENGTH + ETH_HLEN +
FCS_LEN)) {
pr_debug(
"ray_cs invalid packet length %d received\n",
rx_len);
return;
}
}
}
pr_debug("ray_cs rx_data packet\n");
/* If fragmented packet, verify sizes of fragments add up */
if (readb(&prcs->var.rx_packet.next_frag_rcs_index) != 0xFF) {
pr_debug("ray_cs rx'ed fragment\n");
tmp = (readb(&prcs->var.rx_packet.totalpacketlength[0]) << 8)
+ readb(&prcs->var.rx_packet.totalpacketlength[1]);
total_len = tmp;
prcslink = prcs;
do {
tmp -=
(readb(&prcslink->var.rx_packet.rx_data_length[0])
<< 8)
+ readb(&prcslink->var.rx_packet.rx_data_length[1]);
if (readb(&prcslink->var.rx_packet.next_frag_rcs_index)
== 0xFF || tmp < 0)
break;
prcslink = rcs_base(local)
+ readb(&prcslink->link_field);
} while (1);
if (tmp < 0) {
pr_debug(
"ray_cs rx_data fragment lengths don't add up\n");
local->stats.rx_dropped++;
release_frag_chain(local, prcs);
return;
}
} else { /* Single unfragmented packet */
total_len = rx_len;
}
skb = dev_alloc_skb(total_len + 5);
if (skb == NULL) {
pr_debug("ray_cs rx_data could not allocate skb\n");
local->stats.rx_dropped++;
if (readb(&prcs->var.rx_packet.next_frag_rcs_index) != 0xFF)
release_frag_chain(local, prcs);
return;
}
skb_reserve(skb, 2); /* Align IP on 16 byte (TBD check this) */
pr_debug("ray_cs rx_data total_len = %x, rx_len = %x\n", total_len,
rx_len);
/************************/
/* Reserve enough room for the whole damn packet. */
rx_ptr = skb_put(skb, total_len);
/* Copy the whole packet to sk_buff */
rx_ptr +=
copy_from_rx_buff(local, rx_ptr, pkt_addr & RX_BUFF_END, rx_len);
/* Get source address */
#ifdef WIRELESS_SPY
skb_copy_from_linear_data_offset(skb,
offsetof(struct mac_header, addr_2),
linksrcaddr, ETH_ALEN);
#endif
/* Now, deal with encapsulation/translation/sniffer */
if (!sniffer) {
if (!translate) {
/* Encapsulated ethernet, so just lop off 802.11 MAC header */
/* TBD reserve skb_reserve( skb, RX_MAC_HEADER_LENGTH); */
skb_pull(skb, RX_MAC_HEADER_LENGTH);
} else {
/* Do translation */
untranslate(local, skb, total_len);
}
} else { /* sniffer mode, so just pass whole packet */
};
/************************/
/* Now pick up the rest of the fragments if any */
tmp = 17;
if (readb(&prcs->var.rx_packet.next_frag_rcs_index) != 0xFF) {
prcslink = prcs;
pr_debug("ray_cs rx_data in fragment loop\n");
do {
prcslink = rcs_base(local)
+
readb(&prcslink->var.rx_packet.next_frag_rcs_index);
rx_len =
((readb(&prcslink->var.rx_packet.rx_data_length[0])
<< 8)
+
readb(&prcslink->var.rx_packet.rx_data_length[1]))
& RX_BUFF_END;
pkt_addr =
((readb(&prcslink->var.rx_packet.rx_data_ptr[0]) <<
8)
+ readb(&prcslink->var.rx_packet.rx_data_ptr[1]))
& RX_BUFF_END;
rx_ptr +=
copy_from_rx_buff(local, rx_ptr, pkt_addr, rx_len);
} while (tmp-- &&
readb(&prcslink->var.rx_packet.next_frag_rcs_index) !=
0xFF);
release_frag_chain(local, prcs);
}
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
local->stats.rx_packets++;
local->stats.rx_bytes += total_len;
/* Gather signal strength per address */
#ifdef WIRELESS_SPY
/* For the Access Point or the node having started the ad-hoc net
* note : ad-hoc work only in some specific configurations, but we
* kludge in ray_get_wireless_stats... */
if (!memcmp(linksrcaddr, local->bss_id, ETH_ALEN)) {
/* Update statistics */
/*local->wstats.qual.qual = none ? */
local->wstats.qual.level = siglev;
/*local->wstats.qual.noise = none ? */
local->wstats.qual.updated = 0x2;
}
/* Now, update the spy stuff */
{
struct iw_quality wstats;
wstats.level = siglev;
/* wstats.noise = none ? */
/* wstats.qual = none ? */
wstats.updated = 0x2;
/* Update spy records */
wireless_spy_update(dev, linksrcaddr, &wstats);
}
#endif /* WIRELESS_SPY */
} /* end rx_data */
/*===========================================================================*/
static void untranslate(ray_dev_t *local, struct sk_buff *skb, int len)
{
snaphdr_t *psnap = (snaphdr_t *) (skb->data + RX_MAC_HEADER_LENGTH);
struct ieee80211_hdr *pmac = (struct ieee80211_hdr *)skb->data;
__be16 type = *(__be16 *) psnap->ethertype;
int delta;
struct ethhdr *peth;
UCHAR srcaddr[ADDRLEN];
UCHAR destaddr[ADDRLEN];
static const UCHAR org_bridge[3] = { 0, 0, 0xf8 };
static const UCHAR org_1042[3] = { 0, 0, 0 };
memcpy(destaddr, ieee80211_get_DA(pmac), ADDRLEN);
memcpy(srcaddr, ieee80211_get_SA(pmac), ADDRLEN);
#if 0
if {
print_hex_dump(KERN_DEBUG, "skb->data before untranslate: ",
DUMP_PREFIX_NONE, 16, 1,
skb->data, 64, true);
printk(KERN_DEBUG
"type = %08x, xsap = %02x%02x%02x, org = %02x02x02x\n",
ntohs(type), psnap->dsap, psnap->ssap, psnap->ctrl,
psnap->org[0], psnap->org[1], psnap->org[2]);
printk(KERN_DEBUG "untranslate skb->data = %p\n", skb->data);
}
#endif
if (psnap->dsap != 0xaa || psnap->ssap != 0xaa || psnap->ctrl != 3) {
/* not a snap type so leave it alone */
pr_debug("ray_cs untranslate NOT SNAP %02x %02x %02x\n",
psnap->dsap, psnap->ssap, psnap->ctrl);
delta = RX_MAC_HEADER_LENGTH - ETH_HLEN;
peth = (struct ethhdr *)(skb->data + delta);
peth->h_proto = htons(len - RX_MAC_HEADER_LENGTH);
} else { /* Its a SNAP */
if (memcmp(psnap->org, org_bridge, 3) == 0) {
/* EtherII and nuke the LLC */
pr_debug("ray_cs untranslate Bridge encap\n");
delta = RX_MAC_HEADER_LENGTH
+ sizeof(struct snaphdr_t) - ETH_HLEN;
peth = (struct ethhdr *)(skb->data + delta);
peth->h_proto = type;
} else if (memcmp(psnap->org, org_1042, 3) == 0) {
switch (ntohs(type)) {
case ETH_P_IPX:
case ETH_P_AARP:
pr_debug("ray_cs untranslate RFC IPX/AARP\n");
delta = RX_MAC_HEADER_LENGTH - ETH_HLEN;
peth = (struct ethhdr *)(skb->data + delta);
peth->h_proto =
htons(len - RX_MAC_HEADER_LENGTH);
break;
default:
pr_debug("ray_cs untranslate RFC default\n");
delta = RX_MAC_HEADER_LENGTH +
sizeof(struct snaphdr_t) - ETH_HLEN;
peth = (struct ethhdr *)(skb->data + delta);
peth->h_proto = type;
break;
}
} else {
printk("ray_cs untranslate very confused by packet\n");
delta = RX_MAC_HEADER_LENGTH - ETH_HLEN;
peth = (struct ethhdr *)(skb->data + delta);
peth->h_proto = type;
}
}
/* TBD reserve skb_reserve(skb, delta); */
skb_pull(skb, delta);
pr_debug("untranslate after skb_pull(%d), skb->data = %p\n", delta,
skb->data);
memcpy(peth->h_dest, destaddr, ADDRLEN);
memcpy(peth->h_source, srcaddr, ADDRLEN);
#if 0
{
int i;
printk(KERN_DEBUG "skb->data after untranslate:");
for (i = 0; i < 64; i++)
printk("%02x ", skb->data[i]);
printk("\n");
}
#endif
} /* end untranslate */
/*===========================================================================*/
/* Copy data from circular receive buffer to PC memory.
* dest = destination address in PC memory
* pkt_addr = source address in receive buffer
* len = length of packet to copy
*/
static int copy_from_rx_buff(ray_dev_t *local, UCHAR *dest, int pkt_addr,
int length)
{
int wrap_bytes = (pkt_addr + length) - (RX_BUFF_END + 1);
if (wrap_bytes <= 0) {
memcpy_fromio(dest, local->rmem + pkt_addr, length);
} else { /* Packet wrapped in circular buffer */
memcpy_fromio(dest, local->rmem + pkt_addr,
length - wrap_bytes);
memcpy_fromio(dest + length - wrap_bytes, local->rmem,
wrap_bytes);
}
return length;
}
/*===========================================================================*/
static void release_frag_chain(ray_dev_t *local, struct rcs __iomem *prcs)
{
struct rcs __iomem *prcslink = prcs;
int tmp = 17;
unsigned rcsindex = readb(&prcs->var.rx_packet.next_frag_rcs_index);
while (tmp--) {
writeb(CCS_BUFFER_FREE, &prcslink->buffer_status);
if (rcsindex >= (NUMBER_OF_CCS + NUMBER_OF_RCS)) {
pr_debug("ray_cs interrupt bad rcsindex = 0x%x\n",
rcsindex);
break;
}
prcslink = rcs_base(local) + rcsindex;
rcsindex = readb(&prcslink->var.rx_packet.next_frag_rcs_index);
}
writeb(CCS_BUFFER_FREE, &prcslink->buffer_status);
}
/*===========================================================================*/
static void authenticate(ray_dev_t *local)
{
struct pcmcia_device *link = local->finder;
dev_dbg(&link->dev, "ray_cs Starting authentication.\n");
if (!(pcmcia_dev_present(link))) {
dev_dbg(&link->dev, "ray_cs authenticate - device not present\n");
return;
}
del_timer(&local->timer);
if (build_auth_frame(local, local->bss_id, OPEN_AUTH_REQUEST)) {
local->timer.function = join_net;
} else {
local->timer.function = authenticate_timeout;
}
local->timer.expires = jiffies + HZ * 2;
local->timer.data = (long)local;
add_timer(&local->timer);
local->authentication_state = AWAITING_RESPONSE;
} /* end authenticate */
/*===========================================================================*/
static void rx_authenticate(ray_dev_t *local, struct rcs __iomem *prcs,
unsigned int pkt_addr, int rx_len)
{
UCHAR buff[256];
struct ray_rx_msg *msg = (struct ray_rx_msg *) buff;
del_timer(&local->timer);
copy_from_rx_buff(local, buff, pkt_addr, rx_len & 0xff);
/* if we are trying to get authenticated */
if (local->sparm.b4.a_network_type == ADHOC) {
pr_debug("ray_cs rx_auth var= %02x %02x %02x %02x %02x %02x\n",
msg->var[0], msg->var[1], msg->var[2], msg->var[3],
msg->var[4], msg->var[5]);
if (msg->var[2] == 1) {
pr_debug("ray_cs Sending authentication response.\n");
if (!build_auth_frame
(local, msg->mac.addr_2, OPEN_AUTH_RESPONSE)) {
local->authentication_state = NEED_TO_AUTH;
memcpy(local->auth_id, msg->mac.addr_2,
ADDRLEN);
}
}
} else { /* Infrastructure network */
if (local->authentication_state == AWAITING_RESPONSE) {
/* Verify authentication sequence #2 and success */
if (msg->var[2] == 2) {
if ((msg->var[3] | msg->var[4]) == 0) {
pr_debug("Authentication successful\n");
local->card_status = CARD_AUTH_COMPLETE;
associate(local);
local->authentication_state =
AUTHENTICATED;
} else {
pr_debug("Authentication refused\n");
local->card_status = CARD_AUTH_REFUSED;
join_net((u_long) local);
local->authentication_state =
UNAUTHENTICATED;
}
}
}
}
} /* end rx_authenticate */
/*===========================================================================*/
static void associate(ray_dev_t *local)
{
struct ccs __iomem *pccs;
struct pcmcia_device *link = local->finder;
struct net_device *dev = link->priv;
int ccsindex;
if (!(pcmcia_dev_present(link))) {
dev_dbg(&link->dev, "ray_cs associate - device not present\n");
return;
}
/* If no tx buffers available, return */
if ((ccsindex = get_free_ccs(local)) < 0) {
/* TBD should never be here but... what if we are? */
dev_dbg(&link->dev, "ray_cs associate - No free ccs\n");
return;
}
dev_dbg(&link->dev, "ray_cs Starting association with access point\n");
pccs = ccs_base(local) + ccsindex;
/* fill in the CCS */
writeb(CCS_START_ASSOCIATION, &pccs->cmd);
/* Interrupt the firmware to process the command */
if (interrupt_ecf(local, ccsindex)) {
dev_dbg(&link->dev, "ray_cs associate failed - ECF not ready for intr\n");
writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status);
del_timer(&local->timer);
local->timer.expires = jiffies + HZ * 2;
local->timer.data = (long)local;
local->timer.function = join_net;
add_timer(&local->timer);
local->card_status = CARD_ASSOC_FAILED;
return;
}
if (!sniffer)
netif_start_queue(dev);
} /* end associate */
/*===========================================================================*/
static void rx_deauthenticate(ray_dev_t *local, struct rcs __iomem *prcs,
unsigned int pkt_addr, int rx_len)
{
/* UCHAR buff[256];
struct ray_rx_msg *msg = (struct ray_rx_msg *) buff;
*/
pr_debug("Deauthentication frame received\n");
local->authentication_state = UNAUTHENTICATED;
/* Need to reauthenticate or rejoin depending on reason code */
/* copy_from_rx_buff(local, buff, pkt_addr, rx_len & 0xff);
*/
}
/*===========================================================================*/
static void clear_interrupt(ray_dev_t *local)
{
writeb(0, local->amem + CIS_OFFSET + HCS_INTR_OFFSET);
}
/*===========================================================================*/
#ifdef CONFIG_PROC_FS
#define MAXDATA (PAGE_SIZE - 80)
static const char *card_status[] = {
"Card inserted - uninitialized", /* 0 */
"Card not downloaded", /* 1 */
"Waiting for download parameters", /* 2 */
"Card doing acquisition", /* 3 */
"Acquisition complete", /* 4 */
"Authentication complete", /* 5 */
"Association complete", /* 6 */
"???", "???", "???", "???", /* 7 8 9 10 undefined */
"Card init error", /* 11 */
"Download parameters error", /* 12 */
"???", /* 13 */
"Acquisition failed", /* 14 */
"Authentication refused", /* 15 */
"Association failed" /* 16 */
};
static const char *nettype[] = { "Adhoc", "Infra " };
static const char *framing[] = { "Encapsulation", "Translation" }
;
/*===========================================================================*/
static int ray_cs_proc_show(struct seq_file *m, void *v)
{
/* Print current values which are not available via other means
* eg ifconfig
*/
int i;
struct pcmcia_device *link;
struct net_device *dev;
ray_dev_t *local;
UCHAR *p;
struct freq_hop_element *pfh;
UCHAR c[33];
link = this_device;
if (!link)
return 0;
dev = (struct net_device *)link->priv;
if (!dev)
return 0;
local = netdev_priv(dev);
if (!local)
return 0;
seq_puts(m, "Raylink Wireless LAN driver status\n");
seq_printf(m, "%s\n", rcsid);
/* build 4 does not report version, and field is 0x55 after memtest */
seq_puts(m, "Firmware version = ");
if (local->fw_ver == 0x55)
seq_puts(m, "4 - Use dump_cis for more details\n");
else
seq_printf(m, "%2d.%02d.%02d\n",
local->fw_ver, local->fw_bld, local->fw_var);
for (i = 0; i < 32; i++)
c[i] = local->sparm.b5.a_current_ess_id[i];
c[32] = 0;
seq_printf(m, "%s network ESSID = \"%s\"\n",
nettype[local->sparm.b5.a_network_type], c);
p = local->bss_id;
seq_printf(m, "BSSID = %pM\n", p);
seq_printf(m, "Country code = %d\n",
local->sparm.b5.a_curr_country_code);
i = local->card_status;
if (i < 0)
i = 10;
if (i > 16)
i = 10;
seq_printf(m, "Card status = %s\n", card_status[i]);
seq_printf(m, "Framing mode = %s\n", framing[translate]);
seq_printf(m, "Last pkt signal lvl = %d\n", local->last_rsl);
if (local->beacon_rxed) {
/* Pull some fields out of last beacon received */
seq_printf(m, "Beacon Interval = %d Kus\n",
local->last_bcn.beacon_intvl[0]
+ 256 * local->last_bcn.beacon_intvl[1]);
p = local->last_bcn.elements;
if (p[0] == C_ESSID_ELEMENT_ID)
p += p[1] + 2;
else {
seq_printf(m,
"Parse beacon failed at essid element id = %d\n",
p[0]);
return 0;
}
if (p[0] == C_SUPPORTED_RATES_ELEMENT_ID) {
seq_puts(m, "Supported rate codes = ");
for (i = 2; i < p[1] + 2; i++)
seq_printf(m, "0x%02x ", p[i]);
seq_putc(m, '\n');
p += p[1] + 2;
} else {
seq_puts(m, "Parse beacon failed at rates element\n");
return 0;
}
if (p[0] == C_FH_PARAM_SET_ELEMENT_ID) {
pfh = (struct freq_hop_element *)p;
seq_printf(m, "Hop dwell = %d Kus\n",
pfh->dwell_time[0] +
256 * pfh->dwell_time[1]);
seq_printf(m, "Hop set = %d\n",
pfh->hop_set);
seq_printf(m, "Hop pattern = %d\n",
pfh->hop_pattern);
seq_printf(m, "Hop index = %d\n",
pfh->hop_index);
p += p[1] + 2;
} else {
seq_puts(m,
"Parse beacon failed at FH param element\n");
return 0;
}
} else {
seq_puts(m, "No beacons received\n");
}
return 0;
}
static int ray_cs_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, ray_cs_proc_show, NULL);
}
static const struct file_operations ray_cs_proc_fops = {
.owner = THIS_MODULE,
.open = ray_cs_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
#endif
/*===========================================================================*/
static int build_auth_frame(ray_dev_t *local, UCHAR *dest, int auth_type)
{
int addr;
struct ccs __iomem *pccs;
struct tx_msg __iomem *ptx;
int ccsindex;
/* If no tx buffers available, return */
if ((ccsindex = get_free_tx_ccs(local)) < 0) {
pr_debug("ray_cs send authenticate - No free tx ccs\n");
return -1;
}
pccs = ccs_base(local) + ccsindex;
/* Address in card space */
addr = TX_BUF_BASE + (ccsindex << 11);
/* fill in the CCS */
writeb(CCS_TX_REQUEST, &pccs->cmd);
writeb(addr >> 8, pccs->var.tx_request.tx_data_ptr);
writeb(0x20, pccs->var.tx_request.tx_data_ptr + 1);
writeb(TX_AUTHENTICATE_LENGTH_MSB, pccs->var.tx_request.tx_data_length);
writeb(TX_AUTHENTICATE_LENGTH_LSB,
pccs->var.tx_request.tx_data_length + 1);
writeb(0, &pccs->var.tx_request.pow_sav_mode);
ptx = local->sram + addr;
/* fill in the mac header */
writeb(PROTOCOL_VER | AUTHENTIC_TYPE, &ptx->mac.frame_ctl_1);
writeb(0, &ptx->mac.frame_ctl_2);
memcpy_toio(ptx->mac.addr_1, dest, ADDRLEN);
memcpy_toio(ptx->mac.addr_2, local->sparm.b4.a_mac_addr, ADDRLEN);
memcpy_toio(ptx->mac.addr_3, local->bss_id, ADDRLEN);
/* Fill in msg body with protocol 00 00, sequence 01 00 ,status 00 00 */
memset_io(ptx->var, 0, 6);
writeb(auth_type & 0xff, ptx->var + 2);
/* Interrupt the firmware to process the command */
if (interrupt_ecf(local, ccsindex)) {
pr_debug(
"ray_cs send authentication request failed - ECF not ready for intr\n");
writeb(CCS_BUFFER_FREE, &(pccs++)->buffer_status);
return -1;
}
return 0;
} /* End build_auth_frame */
/*===========================================================================*/
#ifdef CONFIG_PROC_FS
static ssize_t ray_cs_essid_proc_write(struct file *file,
const char __user *buffer, size_t count, loff_t *pos)
{
static char proc_essid[33];
unsigned int len = count;
if (len > 32)
len = 32;
memset(proc_essid, 0, 33);
if (copy_from_user(proc_essid, buffer, len))
return -EFAULT;
essid = proc_essid;
return count;
}
static const struct file_operations ray_cs_essid_proc_fops = {
.owner = THIS_MODULE,
.write = ray_cs_essid_proc_write,
.llseek = noop_llseek,
};
static ssize_t int_proc_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
static char proc_number[10];
char *p;
int nr, len;
if (!count)
return 0;
if (count > 9)
return -EINVAL;
if (copy_from_user(proc_number, buffer, count))
return -EFAULT;
p = proc_number;
nr = 0;
len = count;
do {
unsigned int c = *p - '0';
if (c > 9)
return -EINVAL;
nr = nr * 10 + c;
p++;
} while (--len);
*(int *)PDE(file->f_path.dentry->d_inode)->data = nr;
return count;
}
static const struct file_operations int_proc_fops = {
.owner = THIS_MODULE,
.write = int_proc_write,
.llseek = noop_llseek,
};
#endif
static const struct pcmcia_device_id ray_ids[] = {
PCMCIA_DEVICE_MANF_CARD(0x01a6, 0x0000),
PCMCIA_DEVICE_NULL,
};
MODULE_DEVICE_TABLE(pcmcia, ray_ids);
static struct pcmcia_driver ray_driver = {
.owner = THIS_MODULE,
.name = "ray_cs",
.probe = ray_probe,
.remove = ray_detach,
.id_table = ray_ids,
.suspend = ray_suspend,
.resume = ray_resume,
};
static int __init init_ray_cs(void)
{
int rc;
pr_debug("%s\n", rcsid);
rc = pcmcia_register_driver(&ray_driver);
pr_debug("raylink init_module register_pcmcia_driver returns 0x%x\n",
rc);
#ifdef CONFIG_PROC_FS
proc_mkdir("driver/ray_cs", NULL);
proc_create("driver/ray_cs/ray_cs", 0, NULL, &ray_cs_proc_fops);
proc_create("driver/ray_cs/essid", S_IWUSR, NULL, &ray_cs_essid_proc_fops);
proc_create_data("driver/ray_cs/net_type", S_IWUSR, NULL, &int_proc_fops, &net_type);
proc_create_data("driver/ray_cs/translate", S_IWUSR, NULL, &int_proc_fops, &translate);
#endif
if (translate != 0)
translate = 1;
return 0;
} /* init_ray_cs */
/*===========================================================================*/
static void __exit exit_ray_cs(void)
{
pr_debug("ray_cs: cleanup_module\n");
#ifdef CONFIG_PROC_FS
remove_proc_entry("driver/ray_cs/ray_cs", NULL);
remove_proc_entry("driver/ray_cs/essid", NULL);
remove_proc_entry("driver/ray_cs/net_type", NULL);
remove_proc_entry("driver/ray_cs/translate", NULL);
remove_proc_entry("driver/ray_cs", NULL);
#endif
pcmcia_unregister_driver(&ray_driver);
} /* exit_ray_cs */
module_init(init_ray_cs);
module_exit(exit_ray_cs);
/*===========================================================================*/
| gpl-2.0 |
jgcaaprom/android_kernel_oneplus_msm8974 | net/ax25/ax25_iface.c | 5089 | 5040 | /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/slab.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <asm/uaccess.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
static struct ax25_protocol *protocol_list;
static DEFINE_RWLOCK(protocol_list_lock);
static HLIST_HEAD(ax25_linkfail_list);
static DEFINE_SPINLOCK(linkfail_lock);
static struct listen_struct {
struct listen_struct *next;
ax25_address callsign;
struct net_device *dev;
} *listen_list = NULL;
static DEFINE_SPINLOCK(listen_lock);
/*
* Do not register the internal protocols AX25_P_TEXT, AX25_P_SEGMENT,
* AX25_P_IP or AX25_P_ARP ...
*/
void ax25_register_pid(struct ax25_protocol *ap)
{
write_lock_bh(&protocol_list_lock);
ap->next = protocol_list;
protocol_list = ap;
write_unlock_bh(&protocol_list_lock);
}
EXPORT_SYMBOL_GPL(ax25_register_pid);
void ax25_protocol_release(unsigned int pid)
{
struct ax25_protocol *protocol;
write_lock_bh(&protocol_list_lock);
protocol = protocol_list;
if (protocol == NULL)
goto out;
if (protocol->pid == pid) {
protocol_list = protocol->next;
goto out;
}
while (protocol != NULL && protocol->next != NULL) {
if (protocol->next->pid == pid) {
protocol->next = protocol->next->next;
goto out;
}
protocol = protocol->next;
}
out:
write_unlock_bh(&protocol_list_lock);
}
EXPORT_SYMBOL(ax25_protocol_release);
void ax25_linkfail_register(struct ax25_linkfail *lf)
{
spin_lock_bh(&linkfail_lock);
hlist_add_head(&lf->lf_node, &ax25_linkfail_list);
spin_unlock_bh(&linkfail_lock);
}
EXPORT_SYMBOL(ax25_linkfail_register);
void ax25_linkfail_release(struct ax25_linkfail *lf)
{
spin_lock_bh(&linkfail_lock);
hlist_del_init(&lf->lf_node);
spin_unlock_bh(&linkfail_lock);
}
EXPORT_SYMBOL(ax25_linkfail_release);
int ax25_listen_register(ax25_address *callsign, struct net_device *dev)
{
struct listen_struct *listen;
if (ax25_listen_mine(callsign, dev))
return 0;
if ((listen = kmalloc(sizeof(*listen), GFP_ATOMIC)) == NULL)
return -ENOMEM;
listen->callsign = *callsign;
listen->dev = dev;
spin_lock_bh(&listen_lock);
listen->next = listen_list;
listen_list = listen;
spin_unlock_bh(&listen_lock);
return 0;
}
EXPORT_SYMBOL(ax25_listen_register);
void ax25_listen_release(ax25_address *callsign, struct net_device *dev)
{
struct listen_struct *s, *listen;
spin_lock_bh(&listen_lock);
listen = listen_list;
if (listen == NULL) {
spin_unlock_bh(&listen_lock);
return;
}
if (ax25cmp(&listen->callsign, callsign) == 0 && listen->dev == dev) {
listen_list = listen->next;
spin_unlock_bh(&listen_lock);
kfree(listen);
return;
}
while (listen != NULL && listen->next != NULL) {
if (ax25cmp(&listen->next->callsign, callsign) == 0 && listen->next->dev == dev) {
s = listen->next;
listen->next = listen->next->next;
spin_unlock_bh(&listen_lock);
kfree(s);
return;
}
listen = listen->next;
}
spin_unlock_bh(&listen_lock);
}
EXPORT_SYMBOL(ax25_listen_release);
int (*ax25_protocol_function(unsigned int pid))(struct sk_buff *, ax25_cb *)
{
int (*res)(struct sk_buff *, ax25_cb *) = NULL;
struct ax25_protocol *protocol;
read_lock(&protocol_list_lock);
for (protocol = protocol_list; protocol != NULL; protocol = protocol->next)
if (protocol->pid == pid) {
res = protocol->func;
break;
}
read_unlock(&protocol_list_lock);
return res;
}
int ax25_listen_mine(ax25_address *callsign, struct net_device *dev)
{
struct listen_struct *listen;
spin_lock_bh(&listen_lock);
for (listen = listen_list; listen != NULL; listen = listen->next)
if (ax25cmp(&listen->callsign, callsign) == 0 &&
(listen->dev == dev || listen->dev == NULL)) {
spin_unlock_bh(&listen_lock);
return 1;
}
spin_unlock_bh(&listen_lock);
return 0;
}
void ax25_link_failed(ax25_cb *ax25, int reason)
{
struct ax25_linkfail *lf;
struct hlist_node *node;
spin_lock_bh(&linkfail_lock);
hlist_for_each_entry(lf, node, &ax25_linkfail_list, lf_node)
lf->func(ax25, reason);
spin_unlock_bh(&linkfail_lock);
}
int ax25_protocol_is_registered(unsigned int pid)
{
struct ax25_protocol *protocol;
int res = 0;
read_lock_bh(&protocol_list_lock);
for (protocol = protocol_list; protocol != NULL; protocol = protocol->next)
if (protocol->pid == pid) {
res = 1;
break;
}
read_unlock_bh(&protocol_list_lock);
return res;
}
| gpl-2.0 |
hroark13/zw340 | drivers/char/hw_random/tx4939-rng.c | 5089 | 4607 | /*
* RNG driver for TX4939 Random Number Generators (RNG)
*
* Copyright (C) 2009 Atsushi Nemoto <anemo@mba.ocn.ne.jp>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/hw_random.h>
#include <linux/gfp.h>
#define TX4939_RNG_RCSR 0x00000000
#define TX4939_RNG_ROR(n) (0x00000018 + (n) * 8)
#define TX4939_RNG_RCSR_INTE 0x00000008
#define TX4939_RNG_RCSR_RST 0x00000004
#define TX4939_RNG_RCSR_FIN 0x00000002
#define TX4939_RNG_RCSR_ST 0x00000001
struct tx4939_rng {
struct hwrng rng;
void __iomem *base;
u64 databuf[3];
unsigned int data_avail;
};
static void rng_io_start(void)
{
#ifndef CONFIG_64BIT
/*
* readq is reading a 64-bit register using a 64-bit load. On
* a 32-bit kernel however interrupts or any other processor
* exception would clobber the upper 32-bit of the processor
* register so interrupts need to be disabled.
*/
local_irq_disable();
#endif
}
static void rng_io_end(void)
{
#ifndef CONFIG_64BIT
local_irq_enable();
#endif
}
static u64 read_rng(void __iomem *base, unsigned int offset)
{
return ____raw_readq(base + offset);
}
static void write_rng(u64 val, void __iomem *base, unsigned int offset)
{
return ____raw_writeq(val, base + offset);
}
static int tx4939_rng_data_present(struct hwrng *rng, int wait)
{
struct tx4939_rng *rngdev = container_of(rng, struct tx4939_rng, rng);
int i;
if (rngdev->data_avail)
return rngdev->data_avail;
for (i = 0; i < 20; i++) {
rng_io_start();
if (!(read_rng(rngdev->base, TX4939_RNG_RCSR)
& TX4939_RNG_RCSR_ST)) {
rngdev->databuf[0] =
read_rng(rngdev->base, TX4939_RNG_ROR(0));
rngdev->databuf[1] =
read_rng(rngdev->base, TX4939_RNG_ROR(1));
rngdev->databuf[2] =
read_rng(rngdev->base, TX4939_RNG_ROR(2));
rngdev->data_avail =
sizeof(rngdev->databuf) / sizeof(u32);
/* Start RNG */
write_rng(TX4939_RNG_RCSR_ST,
rngdev->base, TX4939_RNG_RCSR);
wait = 0;
}
rng_io_end();
if (!wait)
break;
/* 90 bus clock cycles by default for generation */
ndelay(90 * 5);
}
return rngdev->data_avail;
}
static int tx4939_rng_data_read(struct hwrng *rng, u32 *buffer)
{
struct tx4939_rng *rngdev = container_of(rng, struct tx4939_rng, rng);
rngdev->data_avail--;
*buffer = *((u32 *)&rngdev->databuf + rngdev->data_avail);
return sizeof(u32);
}
static int __init tx4939_rng_probe(struct platform_device *dev)
{
struct tx4939_rng *rngdev;
struct resource *r;
int i;
r = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!r)
return -EBUSY;
rngdev = devm_kzalloc(&dev->dev, sizeof(*rngdev), GFP_KERNEL);
if (!rngdev)
return -ENOMEM;
rngdev->base = devm_request_and_ioremap(&dev->dev, r);
if (!rngdev->base)
return -EBUSY;
rngdev->rng.name = dev_name(&dev->dev);
rngdev->rng.data_present = tx4939_rng_data_present;
rngdev->rng.data_read = tx4939_rng_data_read;
rng_io_start();
/* Reset RNG */
write_rng(TX4939_RNG_RCSR_RST, rngdev->base, TX4939_RNG_RCSR);
write_rng(0, rngdev->base, TX4939_RNG_RCSR);
/* Start RNG */
write_rng(TX4939_RNG_RCSR_ST, rngdev->base, TX4939_RNG_RCSR);
rng_io_end();
/*
* Drop first two results. From the datasheet:
* The quality of the random numbers generated immediately
* after reset can be insufficient. Therefore, do not use
* random numbers obtained from the first and second
* generations; use the ones from the third or subsequent
* generation.
*/
for (i = 0; i < 2; i++) {
rngdev->data_avail = 0;
if (!tx4939_rng_data_present(&rngdev->rng, 1))
return -EIO;
}
platform_set_drvdata(dev, rngdev);
return hwrng_register(&rngdev->rng);
}
static int __exit tx4939_rng_remove(struct platform_device *dev)
{
struct tx4939_rng *rngdev = platform_get_drvdata(dev);
hwrng_unregister(&rngdev->rng);
platform_set_drvdata(dev, NULL);
return 0;
}
static struct platform_driver tx4939_rng_driver = {
.driver = {
.name = "tx4939-rng",
.owner = THIS_MODULE,
},
.remove = tx4939_rng_remove,
};
static int __init tx4939rng_init(void)
{
return platform_driver_probe(&tx4939_rng_driver, tx4939_rng_probe);
}
static void __exit tx4939rng_exit(void)
{
platform_driver_unregister(&tx4939_rng_driver);
}
module_init(tx4939rng_init);
module_exit(tx4939rng_exit);
MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver for TX4939");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Arc-Team/android_kernel_htc_a11 | drivers/net/wireless/atmel_cs.c | 5089 | 10247 | /*** -*- linux-c -*- **********************************************************
Driver for Atmel at76c502 at76c504 and at76c506 wireless cards.
Copyright 2000-2001 ATMEL Corporation.
Copyright 2003 Simon Kelley.
This code was developed from version 2.1.1 of the Atmel drivers,
released by Atmel corp. under the GPL in December 2002. It also
includes code from the Linux aironet drivers (C) Benjamin Reed,
and the Linux PCMCIA package, (C) David Hinds.
For all queries about this code, please contact the current author,
Simon Kelley <simon@thekelleys.org.uk> and not Atmel Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Atmel wireless lan drivers; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
******************************************************************************/
#ifdef __IN_PCMCIA_PACKAGE__
#include <pcmcia/k_compat.h>
#endif
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/netdevice.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
#include <pcmcia/ds.h>
#include <pcmcia/ciscode.h>
#include <asm/io.h>
#include <linux/wireless.h>
#include "atmel.h"
/*====================================================================*/
MODULE_AUTHOR("Simon Kelley");
MODULE_DESCRIPTION("Support for Atmel at76c50x 802.11 wireless ethernet cards.");
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("Atmel at76c50x PCMCIA cards");
/*====================================================================*/
static int atmel_config(struct pcmcia_device *link);
static void atmel_release(struct pcmcia_device *link);
static void atmel_detach(struct pcmcia_device *p_dev);
typedef struct local_info_t {
struct net_device *eth_dev;
} local_info_t;
static int atmel_probe(struct pcmcia_device *p_dev)
{
local_info_t *local;
dev_dbg(&p_dev->dev, "atmel_attach()\n");
/* Allocate space for private device-specific data */
local = kzalloc(sizeof(local_info_t), GFP_KERNEL);
if (!local) {
printk(KERN_ERR "atmel_cs: no memory for new device\n");
return -ENOMEM;
}
p_dev->priv = local;
return atmel_config(p_dev);
} /* atmel_attach */
static void atmel_detach(struct pcmcia_device *link)
{
dev_dbg(&link->dev, "atmel_detach\n");
atmel_release(link);
kfree(link->priv);
}
/* Call-back function to interrogate PCMCIA-specific information
about the current existence of the card */
static int card_present(void *arg)
{
struct pcmcia_device *link = (struct pcmcia_device *)arg;
if (pcmcia_dev_present(link))
return 1;
return 0;
}
static int atmel_config_check(struct pcmcia_device *p_dev, void *priv_data)
{
if (p_dev->config_index == 0)
return -EINVAL;
return pcmcia_request_io(p_dev);
}
static int atmel_config(struct pcmcia_device *link)
{
local_info_t *dev;
int ret;
const struct pcmcia_device_id *did;
dev = link->priv;
did = dev_get_drvdata(&link->dev);
dev_dbg(&link->dev, "atmel_config\n");
link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_VPP |
CONF_AUTO_AUDIO | CONF_AUTO_SET_IO;
if (pcmcia_loop_config(link, atmel_config_check, NULL))
goto failed;
if (!link->irq) {
dev_err(&link->dev, "atmel: cannot assign IRQ: check that CONFIG_ISA is set in kernel config.");
goto failed;
}
ret = pcmcia_enable_device(link);
if (ret)
goto failed;
((local_info_t*)link->priv)->eth_dev =
init_atmel_card(link->irq,
link->resource[0]->start,
did ? did->driver_info : ATMEL_FW_TYPE_NONE,
&link->dev,
card_present,
link);
if (!((local_info_t*)link->priv)->eth_dev)
goto failed;
return 0;
failed:
atmel_release(link);
return -ENODEV;
}
static void atmel_release(struct pcmcia_device *link)
{
struct net_device *dev = ((local_info_t*)link->priv)->eth_dev;
dev_dbg(&link->dev, "atmel_release\n");
if (dev)
stop_atmel_card(dev);
((local_info_t*)link->priv)->eth_dev = NULL;
pcmcia_disable_device(link);
}
static int atmel_suspend(struct pcmcia_device *link)
{
local_info_t *local = link->priv;
netif_device_detach(local->eth_dev);
return 0;
}
static int atmel_resume(struct pcmcia_device *link)
{
local_info_t *local = link->priv;
atmel_open(local->eth_dev);
netif_device_attach(local->eth_dev);
return 0;
}
/*====================================================================*/
/* We use the driver_info field to store the correct firmware type for a card. */
#define PCMCIA_DEVICE_MANF_CARD_INFO(manf, card, info) { \
.match_flags = PCMCIA_DEV_ID_MATCH_MANF_ID| \
PCMCIA_DEV_ID_MATCH_CARD_ID, \
.manf_id = (manf), \
.card_id = (card), \
.driver_info = (kernel_ulong_t)(info), }
#define PCMCIA_DEVICE_PROD_ID12_INFO(v1, v2, vh1, vh2, info) { \
.match_flags = PCMCIA_DEV_ID_MATCH_PROD_ID1| \
PCMCIA_DEV_ID_MATCH_PROD_ID2, \
.prod_id = { (v1), (v2), NULL, NULL }, \
.prod_id_hash = { (vh1), (vh2), 0, 0 }, \
.driver_info = (kernel_ulong_t)(info), }
static const struct pcmcia_device_id atmel_ids[] = {
PCMCIA_DEVICE_MANF_CARD_INFO(0x0101, 0x0620, ATMEL_FW_TYPE_502_3COM),
PCMCIA_DEVICE_MANF_CARD_INFO(0x0101, 0x0696, ATMEL_FW_TYPE_502_3COM),
PCMCIA_DEVICE_MANF_CARD_INFO(0x01bf, 0x3302, ATMEL_FW_TYPE_502E),
PCMCIA_DEVICE_MANF_CARD_INFO(0xd601, 0x0007, ATMEL_FW_TYPE_502),
PCMCIA_DEVICE_PROD_ID12_INFO("11WAVE", "11WP611AL-E", 0x9eb2da1f, 0xc9a0d3f9, ATMEL_FW_TYPE_502E),
PCMCIA_DEVICE_PROD_ID12_INFO("ATMEL", "AT76C502AR", 0xabda4164, 0x41b37e1f, ATMEL_FW_TYPE_502),
PCMCIA_DEVICE_PROD_ID12_INFO("ATMEL", "AT76C502AR_D", 0xabda4164, 0x3675d704, ATMEL_FW_TYPE_502D),
PCMCIA_DEVICE_PROD_ID12_INFO("ATMEL", "AT76C502AR_E", 0xabda4164, 0x4172e792, ATMEL_FW_TYPE_502E),
PCMCIA_DEVICE_PROD_ID12_INFO("ATMEL", "AT76C504_R", 0xabda4164, 0x917f3d72, ATMEL_FW_TYPE_504_2958),
PCMCIA_DEVICE_PROD_ID12_INFO("ATMEL", "AT76C504", 0xabda4164, 0x5040670a, ATMEL_FW_TYPE_504),
PCMCIA_DEVICE_PROD_ID12_INFO("ATMEL", "AT76C504A", 0xabda4164, 0xe15ed87f, ATMEL_FW_TYPE_504A_2958),
PCMCIA_DEVICE_PROD_ID12_INFO("BT", "Voyager 1020 Laptop Adapter", 0xae49b86a, 0x1e957cd5, ATMEL_FW_TYPE_502),
PCMCIA_DEVICE_PROD_ID12_INFO("CNet", "CNWLC 11Mbps Wireless PC Card V-5", 0xbc477dde, 0x502fae6b, ATMEL_FW_TYPE_502E),
PCMCIA_DEVICE_PROD_ID12_INFO("IEEE 802.11b", "Wireless LAN PC Card", 0x5b878724, 0x122f1df6, ATMEL_FW_TYPE_502),
PCMCIA_DEVICE_PROD_ID12_INFO("IEEE 802.11b", "Wireless LAN Card S", 0x5b878724, 0x5fba533a, ATMEL_FW_TYPE_504_2958),
PCMCIA_DEVICE_PROD_ID12_INFO("OEM", "11Mbps Wireless LAN PC Card V-3", 0xfea54c90, 0x1c5b0f68, ATMEL_FW_TYPE_502),
PCMCIA_DEVICE_PROD_ID12_INFO("SMC", "2632W", 0xc4f8b18b, 0x30f38774, ATMEL_FW_TYPE_502D),
PCMCIA_DEVICE_PROD_ID12_INFO("SMC", "2632W-V2", 0xc4f8b18b, 0x172d1377, ATMEL_FW_TYPE_502),
PCMCIA_DEVICE_PROD_ID12_INFO("Wireless", "PC_CARD", 0xa407ecdd, 0x119f6314, ATMEL_FW_TYPE_502D),
PCMCIA_DEVICE_PROD_ID12_INFO("WLAN", "802.11b PC CARD", 0x575c516c, 0xb1f6dbc4, ATMEL_FW_TYPE_502D),
PCMCIA_DEVICE_PROD_ID12_INFO("LG", "LW2100N", 0xb474d43a, 0x6b1fec94, ATMEL_FW_TYPE_502E),
PCMCIA_DEVICE_NULL
};
MODULE_DEVICE_TABLE(pcmcia, atmel_ids);
static struct pcmcia_driver atmel_driver = {
.owner = THIS_MODULE,
.name = "atmel_cs",
.probe = atmel_probe,
.remove = atmel_detach,
.id_table = atmel_ids,
.suspend = atmel_suspend,
.resume = atmel_resume,
};
static int __init atmel_cs_init(void)
{
return pcmcia_register_driver(&atmel_driver);
}
static void __exit atmel_cs_cleanup(void)
{
pcmcia_unregister_driver(&atmel_driver);
}
/*
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
In addition:
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
module_init(atmel_cs_init);
module_exit(atmel_cs_cleanup);
| gpl-2.0 |
curbthepain/revkernel_s5 | drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c | 5089 | 24491 | /*
* Linux network driver for Brocade Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
/*
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*/
#include "bfa_ioc.h"
#include "cna.h"
#include "bfi.h"
#include "bfi_reg.h"
#include "bfa_defs.h"
#define bfa_ioc_ct_sync_pos(__ioc) \
((u32) (1 << bfa_ioc_pcifn(__ioc)))
#define BFA_IOC_SYNC_REQD_SH 16
#define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
#define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
#define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
#define bfa_ioc_ct_sync_reqd_pos(__ioc) \
(bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
/*
* forward declarations
*/
static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc);
static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc);
static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc);
static void bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc);
static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
static void bfa_ioc_ct2_map_port(struct bfa_ioc *ioc);
static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc);
static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc);
static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb,
enum bfi_asic_mode asic_mode);
static enum bfa_status bfa_ioc_ct2_pll_init(void __iomem *rb,
enum bfi_asic_mode asic_mode);
static bool bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc);
static const struct bfa_ioc_hwif nw_hwif_ct = {
.ioc_pll_init = bfa_ioc_ct_pll_init,
.ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
.ioc_reg_init = bfa_ioc_ct_reg_init,
.ioc_map_port = bfa_ioc_ct_map_port,
.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
.ioc_notify_fail = bfa_ioc_ct_notify_fail,
.ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
.ioc_sync_start = bfa_ioc_ct_sync_start,
.ioc_sync_join = bfa_ioc_ct_sync_join,
.ioc_sync_leave = bfa_ioc_ct_sync_leave,
.ioc_sync_ack = bfa_ioc_ct_sync_ack,
.ioc_sync_complete = bfa_ioc_ct_sync_complete,
};
static const struct bfa_ioc_hwif nw_hwif_ct2 = {
.ioc_pll_init = bfa_ioc_ct2_pll_init,
.ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
.ioc_reg_init = bfa_ioc_ct2_reg_init,
.ioc_map_port = bfa_ioc_ct2_map_port,
.ioc_lpu_read_stat = bfa_ioc_ct2_lpu_read_stat,
.ioc_isr_mode_set = NULL,
.ioc_notify_fail = bfa_ioc_ct_notify_fail,
.ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
.ioc_sync_start = bfa_ioc_ct_sync_start,
.ioc_sync_join = bfa_ioc_ct_sync_join,
.ioc_sync_leave = bfa_ioc_ct_sync_leave,
.ioc_sync_ack = bfa_ioc_ct_sync_ack,
.ioc_sync_complete = bfa_ioc_ct_sync_complete,
};
/**
* Called from bfa_ioc_attach() to map asic specific calls.
*/
void
bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
{
ioc->ioc_hwif = &nw_hwif_ct;
}
void
bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc *ioc)
{
ioc->ioc_hwif = &nw_hwif_ct2;
}
/**
* Return true if firmware of current driver matches the running firmware.
*/
static bool
bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
{
enum bfi_ioc_state ioc_fwstate;
u32 usecnt;
struct bfi_ioc_image_hdr fwhdr;
/**
* If bios boot (flash based) -- do not increment usage count
*/
if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
BFA_IOC_FWIMG_MINSZ)
return true;
bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
/**
* If usage count is 0, always return TRUE.
*/
if (usecnt == 0) {
writel(1, ioc->ioc_regs.ioc_usage_reg);
bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
writel(0, ioc->ioc_regs.ioc_fail_sync);
return true;
}
ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
/**
* Use count cannot be non-zero and chip in uninitialized state.
*/
BUG_ON(!(ioc_fwstate != BFI_IOC_UNINIT));
/**
* Check if another driver with a different firmware is active
*/
bfa_nw_ioc_fwver_get(ioc, &fwhdr);
if (!bfa_nw_ioc_fwver_cmp(ioc, &fwhdr)) {
bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
return false;
}
/**
* Same firmware version. Increment the reference count.
*/
usecnt++;
writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
return true;
}
static void
bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
{
u32 usecnt;
/**
* If bios boot (flash based) -- do not decrement usage count
*/
if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
BFA_IOC_FWIMG_MINSZ)
return;
/**
* decrement usage count
*/
bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
BUG_ON(!(usecnt > 0));
usecnt--;
writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
}
/**
* Notify other functions on HB failure.
*/
static void
bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
{
writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
/* Wait for halt to take effect */
readl(ioc->ioc_regs.ll_halt);
readl(ioc->ioc_regs.alt_ll_halt);
}
/**
* Host to LPU mailbox message addresses
*/
static const struct {
u32 hfn_mbox;
u32 lpu_mbox;
u32 hfn_pgn;
} ct_fnreg[] = {
{ HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
{ HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
{ HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
{ HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
};
/**
* Host <-> LPU mailbox command/status registers - port 0
*/
static const struct {
u32 hfn;
u32 lpu;
} ct_p0reg[] = {
{ HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
{ HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT },
{ HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT },
{ HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT }
};
/**
* Host <-> LPU mailbox command/status registers - port 1
*/
static const struct {
u32 hfn;
u32 lpu;
} ct_p1reg[] = {
{ HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT },
{ HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT },
{ HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT },
{ HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT }
};
static const struct {
u32 hfn_mbox;
u32 lpu_mbox;
u32 hfn_pgn;
u32 hfn;
u32 lpu;
u32 lpu_read;
} ct2_reg[] = {
{ CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT,
CT2_HOSTFN_LPU0_READ_STAT},
{ CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT,
CT2_HOSTFN_LPU1_READ_STAT},
};
static void
bfa_ioc_ct_reg_init(struct bfa_ioc *ioc)
{
void __iomem *rb;
int pcifn = bfa_ioc_pcifn(ioc);
rb = bfa_ioc_bar0(ioc);
ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
if (ioc->port_id == 0) {
ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn;
ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu;
ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
} else {
ioc->ioc_regs.heartbeat = rb + BFA_IOC1_HBEAT_REG;
ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC1_STATE_REG;
ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn;
ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu;
ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
}
/*
* PSS control registers
*/
ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG;
ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG;
ioc->ioc_regs.app_pll_fast_ctl_reg = rb + APP_PLL_LCLK_CTL_REG;
ioc->ioc_regs.app_pll_slow_ctl_reg = rb + APP_PLL_SCLK_CTL_REG;
/*
* IOC semaphore registers and serialization
*/
ioc->ioc_regs.ioc_sem_reg = rb + HOST_SEM0_REG;
ioc->ioc_regs.ioc_usage_sem_reg = rb + HOST_SEM1_REG;
ioc->ioc_regs.ioc_init_sem_reg = rb + HOST_SEM2_REG;
ioc->ioc_regs.ioc_usage_reg = rb + BFA_FW_USE_COUNT;
ioc->ioc_regs.ioc_fail_sync = rb + BFA_IOC_FAIL_SYNC;
/**
* sram memory access
*/
ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START;
ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
/*
* err set reg : for notification of hb failure in fcmode
*/
ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
}
static void
bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc)
{
void __iomem *rb;
int port = bfa_ioc_portid(ioc);
rb = bfa_ioc_bar0(ioc);
ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox;
ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox;
ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn;
ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn;
ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu;
ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read;
if (port == 0) {
ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG;
ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
} else {
ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC1_HBEAT_REG;
ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
}
/*
* PSS control registers
*/
ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG;
ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG;
ioc->ioc_regs.app_pll_fast_ctl_reg = rb + CT2_APP_PLL_LCLK_CTL_REG;
ioc->ioc_regs.app_pll_slow_ctl_reg = rb + CT2_APP_PLL_SCLK_CTL_REG;
/*
* IOC semaphore registers and serialization
*/
ioc->ioc_regs.ioc_sem_reg = rb + CT2_HOST_SEM0_REG;
ioc->ioc_regs.ioc_usage_sem_reg = rb + CT2_HOST_SEM1_REG;
ioc->ioc_regs.ioc_init_sem_reg = rb + CT2_HOST_SEM2_REG;
ioc->ioc_regs.ioc_usage_reg = rb + CT2_BFA_FW_USE_COUNT;
ioc->ioc_regs.ioc_fail_sync = rb + CT2_BFA_IOC_FAIL_SYNC;
/**
* sram memory access
*/
ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START;
ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
/*
* err set reg : for notification of hb failure in fcmode
*/
ioc->ioc_regs.err_set = rb + ERR_SET_REG;
}
/**
* Initialize IOC to port mapping.
*/
#define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
static void
bfa_ioc_ct_map_port(struct bfa_ioc *ioc)
{
void __iomem *rb = ioc->pcidev.pci_bar_kva;
u32 r32;
/**
* For catapult, base port id on personality register and IOC type
*/
r32 = readl(rb + FNC_PERS_REG);
r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
}
static void
bfa_ioc_ct2_map_port(struct bfa_ioc *ioc)
{
void __iomem *rb = ioc->pcidev.pci_bar_kva;
u32 r32;
r32 = readl(rb + CT2_HOSTFN_PERSONALITY0);
ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH);
}
/**
* Set interrupt mode for a function: INTX or MSIX
*/
static void
bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix)
{
void __iomem *rb = ioc->pcidev.pci_bar_kva;
u32 r32, mode;
r32 = readl(rb + FNC_PERS_REG);
mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
__F0_INTX_STATUS;
/**
* If already in desired mode, do not change anything
*/
if ((!msix && mode) || (msix && !mode))
return;
if (msix)
mode = __F0_INTX_STATUS_MSIX;
else
mode = __F0_INTX_STATUS_INTA;
r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
writel(r32, rb + FNC_PERS_REG);
}
static bool
bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc)
{
u32 r32;
r32 = readl(ioc->ioc_regs.lpu_read_stat);
if (r32) {
writel(1, ioc->ioc_regs.lpu_read_stat);
return true;
}
return false;
}
/**
* MSI-X resource allocation for 1860 with no asic block
*/
#define HOSTFN_MSIX_DEFAULT 64
#define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138
#define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c
#define __MSIX_VT_NUMVT__MK 0x003ff800
#define __MSIX_VT_NUMVT__SH 11
#define __MSIX_VT_NUMVT_(_v) ((_v) << __MSIX_VT_NUMVT__SH)
#define __MSIX_VT_OFST_ 0x000007ff
void
bfa_nw_ioc_ct2_poweron(struct bfa_ioc *ioc)
{
void __iomem *rb = ioc->pcidev.pci_bar_kva;
u32 r32;
r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT);
if (r32 & __MSIX_VT_NUMVT__MK) {
writel(r32 & __MSIX_VT_OFST_,
rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
return;
}
writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) |
HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
rb + HOSTFN_MSIX_VT_OFST_NUMVT);
writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
}
/**
* Cleanup hw semaphore and usecnt registers
*/
static void
bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
{
bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
writel(0, ioc->ioc_regs.ioc_usage_reg);
bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
/*
* Read the hw sem reg to make sure that it is locked
* before we clear it. If it is not locked, writing 1
* will lock it instead of clearing it.
*/
readl(ioc->ioc_regs.ioc_sem_reg);
bfa_nw_ioc_hw_sem_release(ioc);
}
/**
* Synchronized IOC failure processing routines
*/
static bool
bfa_ioc_ct_sync_start(struct bfa_ioc *ioc)
{
u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
/*
* Driver load time. If the sync required bit for this PCI fn
* is set, it is due to an unclean exit by the driver for this
* PCI fn in the previous incarnation. Whoever comes here first
* should clean it up, no matter which PCI fn.
*/
if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
writel(0, ioc->ioc_regs.ioc_fail_sync);
writel(1, ioc->ioc_regs.ioc_usage_reg);
writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
return true;
}
return bfa_ioc_ct_sync_complete(ioc);
}
/**
* Synchronized IOC failure processing routines
*/
static void
bfa_ioc_ct_sync_join(struct bfa_ioc *ioc)
{
u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
u32 sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
}
static void
bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc)
{
u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
u32 sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
bfa_ioc_ct_sync_pos(ioc);
writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
}
static void
bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc)
{
u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
writel((r32 | bfa_ioc_ct_sync_pos(ioc)), ioc->ioc_regs.ioc_fail_sync);
}
static bool
bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc)
{
u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
u32 sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
u32 tmp_ackd;
if (sync_ackd == 0)
return true;
/**
* The check below is to see whether any other PCI fn
* has reinitialized the ASIC (reset sync_ackd bits)
* and failed again while this IOC was waiting for hw
* semaphore (in bfa_iocpf_sm_semwait()).
*/
tmp_ackd = sync_ackd;
if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) &&
!(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
if (sync_reqd == sync_ackd) {
writel(bfa_ioc_ct_clear_sync_ackd(r32),
ioc->ioc_regs.ioc_fail_sync);
writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
return true;
}
/**
* If another PCI fn reinitialized and failed again while
* this IOC was waiting for hw sem, the sync_ackd bit for
* this IOC need to be set again to allow reinitialization.
*/
if (tmp_ackd != sync_ackd)
writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
return false;
}
static enum bfa_status
bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
{
u32 pll_sclk, pll_fclk, r32;
bool fcmode = (asic_mode == BFI_ASIC_MODE_FC);
pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST |
__APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) |
__APP_PLL_SCLK_JITLMT0_1(3U) |
__APP_PLL_SCLK_CNTLMT0_1(1U);
pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST |
__APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
__APP_PLL_LCLK_JITLMT0_1(3U) |
__APP_PLL_LCLK_CNTLMT0_1(1U);
if (fcmode) {
writel(0, (rb + OP_MODE));
writel(__APP_EMS_CMLCKSEL |
__APP_EMS_REFCKBUFEN2 |
__APP_EMS_CHANNEL_SEL,
(rb + ETH_MAC_SER_REG));
} else {
writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
writel(__APP_EMS_REFCKBUFEN1,
(rb + ETH_MAC_SER_REG));
}
writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
writel(pll_sclk |
__APP_PLL_SCLK_LOGIC_SOFT_RESET,
rb + APP_PLL_SCLK_CTL_REG);
writel(pll_fclk |
__APP_PLL_LCLK_LOGIC_SOFT_RESET,
rb + APP_PLL_LCLK_CTL_REG);
writel(pll_sclk |
__APP_PLL_SCLK_LOGIC_SOFT_RESET | __APP_PLL_SCLK_ENABLE,
rb + APP_PLL_SCLK_CTL_REG);
writel(pll_fclk |
__APP_PLL_LCLK_LOGIC_SOFT_RESET | __APP_PLL_LCLK_ENABLE,
rb + APP_PLL_LCLK_CTL_REG);
readl(rb + HOSTFN0_INT_MSK);
udelay(2000);
writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
writel(pll_sclk |
__APP_PLL_SCLK_ENABLE,
rb + APP_PLL_SCLK_CTL_REG);
writel(pll_fclk |
__APP_PLL_LCLK_ENABLE,
rb + APP_PLL_LCLK_CTL_REG);
if (!fcmode) {
writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
}
r32 = readl((rb + PSS_CTL_REG));
r32 &= ~__PSS_LMEM_RESET;
writel(r32, (rb + PSS_CTL_REG));
udelay(1000);
if (!fcmode) {
writel(0, (rb + PMM_1T_RESET_REG_P0));
writel(0, (rb + PMM_1T_RESET_REG_P1));
}
writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
udelay(1000);
r32 = readl((rb + MBIST_STAT_REG));
writel(0, (rb + MBIST_CTL_REG));
return BFA_STATUS_OK;
}
static void
bfa_ioc_ct2_sclk_init(void __iomem *rb)
{
u32 r32;
/*
* put s_clk PLL and PLL FSM in reset
*/
r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN);
r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS |
__APP_PLL_SCLK_LOGIC_SOFT_RESET);
writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
/*
* Ignore mode and program for the max clock (which is FC16)
* Firmware/NFC will do the PLL init appropiately
*/
r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2);
writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
/*
* while doing PLL init dont clock gate ethernet subsystem
*/
r32 = readl((rb + CT2_CHIP_MISC_PRG));
writel((r32 | __ETH_CLK_ENABLE_PORT0),
(rb + CT2_CHIP_MISC_PRG));
r32 = readl((rb + CT2_PCIE_MISC_REG));
writel((r32 | __ETH_CLK_ENABLE_PORT1),
(rb + CT2_PCIE_MISC_REG));
/*
* set sclk value
*/
r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL |
__APP_PLL_SCLK_CLK_DIV2);
writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG));
/*
* poll for s_clk lock or delay 1ms
*/
udelay(1000);
/*
* Dont do clock gating for ethernet subsystem, firmware/NFC will
* do this appropriately
*/
}
static void
bfa_ioc_ct2_lclk_init(void __iomem *rb)
{
u32 r32;
/*
* put l_clk PLL and PLL FSM in reset
*/
r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN);
r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS |
__APP_PLL_LCLK_LOGIC_SOFT_RESET);
writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
/*
* set LPU speed (set for FC16 which will work for other modes)
*/
r32 = readl((rb + CT2_CHIP_MISC_PRG));
writel(r32, (rb + CT2_CHIP_MISC_PRG));
/*
* set LPU half speed (set for FC16 which will work for other modes)
*/
r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
/*
* set lclk for mode (set for FC16)
*/
r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED);
r32 |= 0x20c1731b;
writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
/*
* poll for s_clk lock or delay 1ms
*/
udelay(1000);
}
static void
bfa_ioc_ct2_mem_init(void __iomem *rb)
{
u32 r32;
r32 = readl((rb + PSS_CTL_REG));
r32 &= ~__PSS_LMEM_RESET;
writel(r32, (rb + PSS_CTL_REG));
udelay(1000);
writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG));
udelay(1000);
writel(0, (rb + CT2_MBIST_CTL_REG));
}
static void
bfa_ioc_ct2_mac_reset(void __iomem *rb)
{
volatile u32 r32;
bfa_ioc_ct2_sclk_init(rb);
bfa_ioc_ct2_lclk_init(rb);
/*
* release soft reset on s_clk & l_clk
*/
r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
writel((r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET),
(rb + CT2_APP_PLL_SCLK_CTL_REG));
/*
* release soft reset on s_clk & l_clk
*/
r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
writel((r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET),
(rb + CT2_APP_PLL_LCLK_CTL_REG));
/* put port0, port1 MAC & AHB in reset */
writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
(rb + CT2_CSI_MAC_CONTROL_REG(0)));
writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
(rb + CT2_CSI_MAC_CONTROL_REG(1)));
}
#define CT2_NFC_MAX_DELAY 1000
static enum bfa_status
bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
{
volatile u32 wgn, r32;
int i;
/*
* Initialize PLL if not already done by NFC
*/
wgn = readl(rb + CT2_WGN_STATUS);
if (!(wgn & __GLBL_PF_VF_CFG_RDY)) {
writel(__HALT_NFC_CONTROLLER, (rb + CT2_NFC_CSR_SET_REG));
for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
r32 = readl(rb + CT2_NFC_CSR_SET_REG);
if (r32 & __NFC_CONTROLLER_HALTED)
break;
udelay(1000);
}
}
/*
* Mask the interrupts and clear any
* pending interrupts left by BIOS/EFI
*/
writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
if (r32 == 1) {
writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
}
r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
if (r32 == 1) {
writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
}
bfa_ioc_ct2_mac_reset(rb);
bfa_ioc_ct2_sclk_init(rb);
bfa_ioc_ct2_lclk_init(rb);
/*
* release soft reset on s_clk & l_clk
*/
r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
writel((r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET),
(rb + CT2_APP_PLL_SCLK_CTL_REG));
/*
* release soft reset on s_clk & l_clk
*/
r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
(rb + CT2_APP_PLL_LCLK_CTL_REG));
/*
* Announce flash device presence, if flash was corrupted.
*/
if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
r32 = readl((rb + PSS_GPIO_OUT_REG));
writel((r32 & ~1), (rb + PSS_GPIO_OUT_REG));
r32 = readl((rb + PSS_GPIO_OE_REG));
writel((r32 | 1), (rb + PSS_GPIO_OE_REG));
}
bfa_ioc_ct2_mem_init(rb);
writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG));
writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG));
return BFA_STATUS_OK;
}
| gpl-2.0 |
TeamApexQ/android_kernel_samsung_d2 | drivers/infiniband/hw/qib/qib_mr.c | 8161 | 11915 | /*
* Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <rdma/ib_umem.h>
#include <rdma/ib_smi.h>
#include "qib.h"
/* Fast memory region */
struct qib_fmr {
struct ib_fmr ibfmr;
struct qib_mregion mr; /* must be last */
};
static inline struct qib_fmr *to_ifmr(struct ib_fmr *ibfmr)
{
return container_of(ibfmr, struct qib_fmr, ibfmr);
}
/**
* qib_get_dma_mr - get a DMA memory region
* @pd: protection domain for this memory region
* @acc: access flags
*
* Returns the memory region on success, otherwise returns an errno.
* Note that all DMA addresses should be created via the
* struct ib_dma_mapping_ops functions (see qib_dma.c).
*/
struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc)
{
struct qib_ibdev *dev = to_idev(pd->device);
struct qib_mr *mr;
struct ib_mr *ret;
unsigned long flags;
if (to_ipd(pd)->user) {
ret = ERR_PTR(-EPERM);
goto bail;
}
mr = kzalloc(sizeof *mr, GFP_KERNEL);
if (!mr) {
ret = ERR_PTR(-ENOMEM);
goto bail;
}
mr->mr.access_flags = acc;
atomic_set(&mr->mr.refcount, 0);
spin_lock_irqsave(&dev->lk_table.lock, flags);
if (!dev->dma_mr)
dev->dma_mr = &mr->mr;
spin_unlock_irqrestore(&dev->lk_table.lock, flags);
ret = &mr->ibmr;
bail:
return ret;
}
static struct qib_mr *alloc_mr(int count, struct qib_lkey_table *lk_table)
{
struct qib_mr *mr;
int m, i = 0;
/* Allocate struct plus pointers to first level page tables. */
m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
mr = kmalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL);
if (!mr)
goto done;
/* Allocate first level page tables. */
for (; i < m; i++) {
mr->mr.map[i] = kmalloc(sizeof *mr->mr.map[0], GFP_KERNEL);
if (!mr->mr.map[i])
goto bail;
}
mr->mr.mapsz = m;
mr->mr.page_shift = 0;
mr->mr.max_segs = count;
/*
* ib_reg_phys_mr() will initialize mr->ibmr except for
* lkey and rkey.
*/
if (!qib_alloc_lkey(lk_table, &mr->mr))
goto bail;
mr->ibmr.lkey = mr->mr.lkey;
mr->ibmr.rkey = mr->mr.lkey;
atomic_set(&mr->mr.refcount, 0);
goto done;
bail:
while (i)
kfree(mr->mr.map[--i]);
kfree(mr);
mr = NULL;
done:
return mr;
}
/**
* qib_reg_phys_mr - register a physical memory region
* @pd: protection domain for this memory region
* @buffer_list: pointer to the list of physical buffers to register
* @num_phys_buf: the number of physical buffers to register
* @iova_start: the starting address passed over IB which maps to this MR
*
* Returns the memory region on success, otherwise returns an errno.
*/
struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd,
struct ib_phys_buf *buffer_list,
int num_phys_buf, int acc, u64 *iova_start)
{
struct qib_mr *mr;
int n, m, i;
struct ib_mr *ret;
mr = alloc_mr(num_phys_buf, &to_idev(pd->device)->lk_table);
if (mr == NULL) {
ret = ERR_PTR(-ENOMEM);
goto bail;
}
mr->mr.pd = pd;
mr->mr.user_base = *iova_start;
mr->mr.iova = *iova_start;
mr->mr.length = 0;
mr->mr.offset = 0;
mr->mr.access_flags = acc;
mr->umem = NULL;
m = 0;
n = 0;
for (i = 0; i < num_phys_buf; i++) {
mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr;
mr->mr.map[m]->segs[n].length = buffer_list[i].size;
mr->mr.length += buffer_list[i].size;
n++;
if (n == QIB_SEGSZ) {
m++;
n = 0;
}
}
ret = &mr->ibmr;
bail:
return ret;
}
/**
* qib_reg_user_mr - register a userspace memory region
* @pd: protection domain for this memory region
* @start: starting userspace address
* @length: length of region to register
* @virt_addr: virtual address to use (from HCA's point of view)
* @mr_access_flags: access flags for this memory region
* @udata: unused by the QLogic_IB driver
*
* Returns the memory region on success, otherwise returns an errno.
*/
struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
struct ib_udata *udata)
{
struct qib_mr *mr;
struct ib_umem *umem;
struct ib_umem_chunk *chunk;
int n, m, i;
struct ib_mr *ret;
if (length == 0) {
ret = ERR_PTR(-EINVAL);
goto bail;
}
umem = ib_umem_get(pd->uobject->context, start, length,
mr_access_flags, 0);
if (IS_ERR(umem))
return (void *) umem;
n = 0;
list_for_each_entry(chunk, &umem->chunk_list, list)
n += chunk->nents;
mr = alloc_mr(n, &to_idev(pd->device)->lk_table);
if (!mr) {
ret = ERR_PTR(-ENOMEM);
ib_umem_release(umem);
goto bail;
}
mr->mr.pd = pd;
mr->mr.user_base = start;
mr->mr.iova = virt_addr;
mr->mr.length = length;
mr->mr.offset = umem->offset;
mr->mr.access_flags = mr_access_flags;
mr->umem = umem;
if (is_power_of_2(umem->page_size))
mr->mr.page_shift = ilog2(umem->page_size);
m = 0;
n = 0;
list_for_each_entry(chunk, &umem->chunk_list, list) {
for (i = 0; i < chunk->nents; i++) {
void *vaddr;
vaddr = page_address(sg_page(&chunk->page_list[i]));
if (!vaddr) {
ret = ERR_PTR(-EINVAL);
goto bail;
}
mr->mr.map[m]->segs[n].vaddr = vaddr;
mr->mr.map[m]->segs[n].length = umem->page_size;
n++;
if (n == QIB_SEGSZ) {
m++;
n = 0;
}
}
}
ret = &mr->ibmr;
bail:
return ret;
}
/**
* qib_dereg_mr - unregister and free a memory region
* @ibmr: the memory region to free
*
* Returns 0 on success.
*
* Note that this is called to free MRs created by qib_get_dma_mr()
* or qib_reg_user_mr().
*/
int qib_dereg_mr(struct ib_mr *ibmr)
{
struct qib_mr *mr = to_imr(ibmr);
struct qib_ibdev *dev = to_idev(ibmr->device);
int ret;
int i;
ret = qib_free_lkey(dev, &mr->mr);
if (ret)
return ret;
i = mr->mr.mapsz;
while (i)
kfree(mr->mr.map[--i]);
if (mr->umem)
ib_umem_release(mr->umem);
kfree(mr);
return 0;
}
/*
* Allocate a memory region usable with the
* IB_WR_FAST_REG_MR send work request.
*
* Return the memory region on success, otherwise return an errno.
*/
struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
{
struct qib_mr *mr;
mr = alloc_mr(max_page_list_len, &to_idev(pd->device)->lk_table);
if (mr == NULL)
return ERR_PTR(-ENOMEM);
mr->mr.pd = pd;
mr->mr.user_base = 0;
mr->mr.iova = 0;
mr->mr.length = 0;
mr->mr.offset = 0;
mr->mr.access_flags = 0;
mr->umem = NULL;
return &mr->ibmr;
}
struct ib_fast_reg_page_list *
qib_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len)
{
unsigned size = page_list_len * sizeof(u64);
struct ib_fast_reg_page_list *pl;
if (size > PAGE_SIZE)
return ERR_PTR(-EINVAL);
pl = kmalloc(sizeof *pl, GFP_KERNEL);
if (!pl)
return ERR_PTR(-ENOMEM);
pl->page_list = kmalloc(size, GFP_KERNEL);
if (!pl->page_list)
goto err_free;
return pl;
err_free:
kfree(pl);
return ERR_PTR(-ENOMEM);
}
void qib_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl)
{
kfree(pl->page_list);
kfree(pl);
}
/**
* qib_alloc_fmr - allocate a fast memory region
* @pd: the protection domain for this memory region
* @mr_access_flags: access flags for this memory region
* @fmr_attr: fast memory region attributes
*
* Returns the memory region on success, otherwise returns an errno.
*/
struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
struct ib_fmr_attr *fmr_attr)
{
struct qib_fmr *fmr;
int m, i = 0;
struct ib_fmr *ret;
/* Allocate struct plus pointers to first level page tables. */
m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ;
fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
if (!fmr)
goto bail;
/* Allocate first level page tables. */
for (; i < m; i++) {
fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0],
GFP_KERNEL);
if (!fmr->mr.map[i])
goto bail;
}
fmr->mr.mapsz = m;
/*
* ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
* rkey.
*/
if (!qib_alloc_lkey(&to_idev(pd->device)->lk_table, &fmr->mr))
goto bail;
fmr->ibfmr.rkey = fmr->mr.lkey;
fmr->ibfmr.lkey = fmr->mr.lkey;
/*
* Resources are allocated but no valid mapping (RKEY can't be
* used).
*/
fmr->mr.pd = pd;
fmr->mr.user_base = 0;
fmr->mr.iova = 0;
fmr->mr.length = 0;
fmr->mr.offset = 0;
fmr->mr.access_flags = mr_access_flags;
fmr->mr.max_segs = fmr_attr->max_pages;
fmr->mr.page_shift = fmr_attr->page_shift;
atomic_set(&fmr->mr.refcount, 0);
ret = &fmr->ibfmr;
goto done;
bail:
while (i)
kfree(fmr->mr.map[--i]);
kfree(fmr);
ret = ERR_PTR(-ENOMEM);
done:
return ret;
}
/**
* qib_map_phys_fmr - set up a fast memory region
* @ibmfr: the fast memory region to set up
* @page_list: the list of pages to associate with the fast memory region
* @list_len: the number of pages to associate with the fast memory region
* @iova: the virtual address of the start of the fast memory region
*
* This may be called from interrupt context.
*/
int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
int list_len, u64 iova)
{
struct qib_fmr *fmr = to_ifmr(ibfmr);
struct qib_lkey_table *rkt;
unsigned long flags;
int m, n, i;
u32 ps;
int ret;
if (atomic_read(&fmr->mr.refcount))
return -EBUSY;
if (list_len > fmr->mr.max_segs) {
ret = -EINVAL;
goto bail;
}
rkt = &to_idev(ibfmr->device)->lk_table;
spin_lock_irqsave(&rkt->lock, flags);
fmr->mr.user_base = iova;
fmr->mr.iova = iova;
ps = 1 << fmr->mr.page_shift;
fmr->mr.length = list_len * ps;
m = 0;
n = 0;
for (i = 0; i < list_len; i++) {
fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
fmr->mr.map[m]->segs[n].length = ps;
if (++n == QIB_SEGSZ) {
m++;
n = 0;
}
}
spin_unlock_irqrestore(&rkt->lock, flags);
ret = 0;
bail:
return ret;
}
/**
* qib_unmap_fmr - unmap fast memory regions
* @fmr_list: the list of fast memory regions to unmap
*
* Returns 0 on success.
*/
int qib_unmap_fmr(struct list_head *fmr_list)
{
struct qib_fmr *fmr;
struct qib_lkey_table *rkt;
unsigned long flags;
list_for_each_entry(fmr, fmr_list, ibfmr.list) {
rkt = &to_idev(fmr->ibfmr.device)->lk_table;
spin_lock_irqsave(&rkt->lock, flags);
fmr->mr.user_base = 0;
fmr->mr.iova = 0;
fmr->mr.length = 0;
spin_unlock_irqrestore(&rkt->lock, flags);
}
return 0;
}
/**
* qib_dealloc_fmr - deallocate a fast memory region
* @ibfmr: the fast memory region to deallocate
*
* Returns 0 on success.
*/
int qib_dealloc_fmr(struct ib_fmr *ibfmr)
{
struct qib_fmr *fmr = to_ifmr(ibfmr);
int ret;
int i;
ret = qib_free_lkey(to_idev(ibfmr->device), &fmr->mr);
if (ret)
return ret;
i = fmr->mr.mapsz;
while (i)
kfree(fmr->mr.map[--i]);
kfree(fmr);
return 0;
}
| gpl-2.0 |
AscendG630-DEV/kernel_huawei_msm8610 | arch/sh/mm/tlb-sh3.c | 8929 | 2267 | /*
* arch/sh/mm/tlb-sh3.c
*
* SH-3 specific TLB operations
*
* Copyright (C) 1999 Niibe Yutaka
* Copyright (C) 2002 Paul Mundt
*
* Released under the terms of the GNU GPL v2.0.
*/
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{
unsigned long flags, pteval, vpn;
/*
* Handle debugger faulting in for debugee.
*/
if (vma && current->active_mm != vma->vm_mm)
return;
local_irq_save(flags);
/* Set PTEH register */
vpn = (address & MMU_VPN_MASK) | get_asid();
__raw_writel(vpn, MMU_PTEH);
pteval = pte_val(pte);
/* Set PTEL register */
pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
/* conveniently, we want all the software flags to be 0 anyway */
__raw_writel(pteval, MMU_PTEL);
/* Load the TLB */
asm volatile("ldtlb": /* no output */ : /* no input */ : "memory");
local_irq_restore(flags);
}
void local_flush_tlb_one(unsigned long asid, unsigned long page)
{
unsigned long addr, data;
int i, ways = MMU_NTLB_WAYS;
/*
* NOTE: PTEH.ASID should be set to this MM
* _AND_ we need to write ASID to the array.
*
* It would be simple if we didn't need to set PTEH.ASID...
*/
addr = MMU_TLB_ADDRESS_ARRAY | (page & 0x1F000);
data = (page & 0xfffe0000) | asid; /* VALID bit is off */
if ((current_cpu_data.flags & CPU_HAS_MMU_PAGE_ASSOC)) {
addr |= MMU_PAGE_ASSOC_BIT;
ways = 1; /* we already know the way .. */
}
for (i = 0; i < ways; i++)
__raw_writel(data, addr + (i << 8));
}
void local_flush_tlb_all(void)
{
unsigned long flags, status;
/*
* Flush all the TLB.
*
* Write to the MMU control register's bit:
* TF-bit for SH-3, TI-bit for SH-4.
* It's same position, bit #2.
*/
local_irq_save(flags);
status = __raw_readl(MMUCR);
status |= 0x04;
__raw_writel(status, MMUCR);
ctrl_barrier();
local_irq_restore(flags);
}
| gpl-2.0 |
zombah/android_kernel_nokia_msm8610 | arch/mn10300/kernel/ptrace.c | 8929 | 9827 | /* MN10300 Process tracing
*
* Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd.
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Modified by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/regset.h>
#include <linux/elf.h>
#include <linux/tracehook.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/cacheflush.h>
#include <asm/fpu.h>
#include <asm/asm-offsets.h>
/*
* translate ptrace register IDs into struct pt_regs offsets
*/
static const u8 ptrace_regid_to_frame[] = {
[PT_A3 << 2] = REG_A3,
[PT_A2 << 2] = REG_A2,
[PT_D3 << 2] = REG_D3,
[PT_D2 << 2] = REG_D2,
[PT_MCVF << 2] = REG_MCVF,
[PT_MCRL << 2] = REG_MCRL,
[PT_MCRH << 2] = REG_MCRH,
[PT_MDRQ << 2] = REG_MDRQ,
[PT_E1 << 2] = REG_E1,
[PT_E0 << 2] = REG_E0,
[PT_E7 << 2] = REG_E7,
[PT_E6 << 2] = REG_E6,
[PT_E5 << 2] = REG_E5,
[PT_E4 << 2] = REG_E4,
[PT_E3 << 2] = REG_E3,
[PT_E2 << 2] = REG_E2,
[PT_SP << 2] = REG_SP,
[PT_LAR << 2] = REG_LAR,
[PT_LIR << 2] = REG_LIR,
[PT_MDR << 2] = REG_MDR,
[PT_A1 << 2] = REG_A1,
[PT_A0 << 2] = REG_A0,
[PT_D1 << 2] = REG_D1,
[PT_D0 << 2] = REG_D0,
[PT_ORIG_D0 << 2] = REG_ORIG_D0,
[PT_EPSW << 2] = REG_EPSW,
[PT_PC << 2] = REG_PC,
};
static inline int get_stack_long(struct task_struct *task, int offset)
{
return *(unsigned long *)
((unsigned long) task->thread.uregs + offset);
}
static inline
int put_stack_long(struct task_struct *task, int offset, unsigned long data)
{
unsigned long stack;
stack = (unsigned long) task->thread.uregs + offset;
*(unsigned long *) stack = data;
return 0;
}
/*
* retrieve the contents of MN10300 userspace general registers
*/
static int genregs_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
const struct pt_regs *regs = task_pt_regs(target);
int ret;
/* we need to skip regs->next */
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
regs, 0, PT_ORIG_D0 * sizeof(long));
if (ret < 0)
return ret;
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
®s->orig_d0, PT_ORIG_D0 * sizeof(long),
NR_PTREGS * sizeof(long));
if (ret < 0)
return ret;
return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
NR_PTREGS * sizeof(long), -1);
}
/*
* update the contents of the MN10300 userspace general registers
*/
static int genregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct pt_regs *regs = task_pt_regs(target);
unsigned long tmp;
int ret;
/* we need to skip regs->next */
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
regs, 0, PT_ORIG_D0 * sizeof(long));
if (ret < 0)
return ret;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
®s->orig_d0, PT_ORIG_D0 * sizeof(long),
PT_EPSW * sizeof(long));
if (ret < 0)
return ret;
/* we need to mask off changes to EPSW */
tmp = regs->epsw;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&tmp, PT_EPSW * sizeof(long),
PT_PC * sizeof(long));
tmp &= EPSW_FLAG_V | EPSW_FLAG_C | EPSW_FLAG_N | EPSW_FLAG_Z;
tmp |= regs->epsw & ~(EPSW_FLAG_V | EPSW_FLAG_C | EPSW_FLAG_N |
EPSW_FLAG_Z);
regs->epsw = tmp;
if (ret < 0)
return ret;
/* and finally load the PC */
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
®s->pc, PT_PC * sizeof(long),
NR_PTREGS * sizeof(long));
if (ret < 0)
return ret;
return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
NR_PTREGS * sizeof(long), -1);
}
/*
* retrieve the contents of MN10300 userspace FPU registers
*/
static int fpuregs_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
const struct fpu_state_struct *fpregs = &target->thread.fpu_state;
int ret;
unlazy_fpu(target);
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
fpregs, 0, sizeof(*fpregs));
if (ret < 0)
return ret;
return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
sizeof(*fpregs), -1);
}
/*
* update the contents of the MN10300 userspace FPU registers
*/
static int fpuregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct fpu_state_struct fpu_state = target->thread.fpu_state;
int ret;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&fpu_state, 0, sizeof(fpu_state));
if (ret < 0)
return ret;
fpu_kill_state(target);
target->thread.fpu_state = fpu_state;
set_using_fpu(target);
return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
sizeof(fpu_state), -1);
}
/*
* determine if the FPU registers have actually been used
*/
static int fpuregs_active(struct task_struct *target,
const struct user_regset *regset)
{
return is_using_fpu(target) ? regset->n : 0;
}
/*
* Define the register sets available on the MN10300 under Linux
*/
enum mn10300_regset {
REGSET_GENERAL,
REGSET_FPU,
};
static const struct user_regset mn10300_regsets[] = {
/*
* General register format is:
* A3, A2, D3, D2, MCVF, MCRL, MCRH, MDRQ
* E1, E0, E7...E2, SP, LAR, LIR, MDR
* A1, A0, D1, D0, ORIG_D0, EPSW, PC
*/
[REGSET_GENERAL] = {
.core_note_type = NT_PRSTATUS,
.n = ELF_NGREG,
.size = sizeof(long),
.align = sizeof(long),
.get = genregs_get,
.set = genregs_set,
},
/*
* FPU register format is:
* FS0-31, FPCR
*/
[REGSET_FPU] = {
.core_note_type = NT_PRFPREG,
.n = sizeof(struct fpu_state_struct) / sizeof(long),
.size = sizeof(long),
.align = sizeof(long),
.get = fpuregs_get,
.set = fpuregs_set,
.active = fpuregs_active,
},
};
static const struct user_regset_view user_mn10300_native_view = {
.name = "mn10300",
.e_machine = EM_MN10300,
.regsets = mn10300_regsets,
.n = ARRAY_SIZE(mn10300_regsets),
};
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
return &user_mn10300_native_view;
}
/*
* set the single-step bit
*/
void user_enable_single_step(struct task_struct *child)
{
#ifndef CONFIG_MN10300_USING_JTAG
struct user *dummy = NULL;
long tmp;
tmp = get_stack_long(child, (unsigned long) &dummy->regs.epsw);
tmp |= EPSW_T;
put_stack_long(child, (unsigned long) &dummy->regs.epsw, tmp);
#endif
}
/*
* make sure the single-step bit is not set
*/
void user_disable_single_step(struct task_struct *child)
{
#ifndef CONFIG_MN10300_USING_JTAG
struct user *dummy = NULL;
long tmp;
tmp = get_stack_long(child, (unsigned long) &dummy->regs.epsw);
tmp &= ~EPSW_T;
put_stack_long(child, (unsigned long) &dummy->regs.epsw, tmp);
#endif
}
void ptrace_disable(struct task_struct *child)
{
user_disable_single_step(child);
}
/*
* handle the arch-specific side of process tracing
*/
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
unsigned long tmp;
int ret;
unsigned long __user *datap = (unsigned long __user *) data;
switch (request) {
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR:
ret = -EIO;
if ((addr & 3) || addr > sizeof(struct user) - 3)
break;
tmp = 0; /* Default return condition */
if (addr < NR_PTREGS << 2)
tmp = get_stack_long(child,
ptrace_regid_to_frame[addr]);
ret = put_user(tmp, datap);
break;
/* write the word at location addr in the USER area */
case PTRACE_POKEUSR:
ret = -EIO;
if ((addr & 3) || addr > sizeof(struct user) - 3)
break;
ret = 0;
if (addr < NR_PTREGS << 2)
ret = put_stack_long(child, ptrace_regid_to_frame[addr],
data);
break;
case PTRACE_GETREGS: /* Get all integer regs from the child. */
return copy_regset_to_user(child, &user_mn10300_native_view,
REGSET_GENERAL,
0, NR_PTREGS * sizeof(long),
datap);
case PTRACE_SETREGS: /* Set all integer regs in the child. */
return copy_regset_from_user(child, &user_mn10300_native_view,
REGSET_GENERAL,
0, NR_PTREGS * sizeof(long),
datap);
case PTRACE_GETFPREGS: /* Get the child FPU state. */
return copy_regset_to_user(child, &user_mn10300_native_view,
REGSET_FPU,
0, sizeof(struct fpu_state_struct),
datap);
case PTRACE_SETFPREGS: /* Set the child FPU state. */
return copy_regset_from_user(child, &user_mn10300_native_view,
REGSET_FPU,
0, sizeof(struct fpu_state_struct),
datap);
default:
ret = ptrace_request(child, request, addr, data);
break;
}
return ret;
}
/*
* handle tracing of system call entry
* - return the revised system call number or ULONG_MAX to cause ENOSYS
*/
asmlinkage unsigned long syscall_trace_entry(struct pt_regs *regs)
{
if (tracehook_report_syscall_entry(regs))
/* tracing decided this syscall should not happen, so
* We'll return a bogus call number to get an ENOSYS
* error, but leave the original number in
* regs->orig_d0
*/
return ULONG_MAX;
return regs->orig_d0;
}
/*
* handle tracing of system call exit
*/
asmlinkage void syscall_trace_exit(struct pt_regs *regs)
{
tracehook_report_syscall_exit(regs, 0);
}
| gpl-2.0 |
christianjann/L4T_PREEMPT_RT | arch/mn10300/kernel/fpu.c | 8929 | 4214 | /* MN10300 FPU management
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <asm/uaccess.h>
#include <asm/fpu.h>
#include <asm/elf.h>
#include <asm/exceptions.h>
#ifdef CONFIG_LAZY_SAVE_FPU
struct task_struct *fpu_state_owner;
#endif
/*
* error functions in FPU disabled exception
*/
asmlinkage void fpu_disabled_in_kernel(struct pt_regs *regs)
{
die_if_no_fixup("An FPU Disabled exception happened in kernel space\n",
regs, EXCEP_FPU_DISABLED);
}
/*
* handle an FPU operational exception
* - there's a possibility that if the FPU is asynchronous, the signal might
* be meant for a process other than the current one
*/
asmlinkage void fpu_exception(struct pt_regs *regs, enum exception_code code)
{
struct task_struct *tsk = current;
siginfo_t info;
u32 fpcr;
if (!user_mode(regs))
die_if_no_fixup("An FPU Operation exception happened in"
" kernel space\n",
regs, code);
if (!is_using_fpu(tsk))
die_if_no_fixup("An FPU Operation exception happened,"
" but the FPU is not in use",
regs, code);
info.si_signo = SIGFPE;
info.si_errno = 0;
info.si_addr = (void *) tsk->thread.uregs->pc;
info.si_code = FPE_FLTINV;
unlazy_fpu(tsk);
fpcr = tsk->thread.fpu_state.fpcr;
if (fpcr & FPCR_EC_Z)
info.si_code = FPE_FLTDIV;
else if (fpcr & FPCR_EC_O)
info.si_code = FPE_FLTOVF;
else if (fpcr & FPCR_EC_U)
info.si_code = FPE_FLTUND;
else if (fpcr & FPCR_EC_I)
info.si_code = FPE_FLTRES;
force_sig_info(SIGFPE, &info, tsk);
}
/*
* save the FPU state to a signal context
*/
int fpu_setup_sigcontext(struct fpucontext *fpucontext)
{
struct task_struct *tsk = current;
if (!is_using_fpu(tsk))
return 0;
/* transfer the current FPU state to memory and cause fpu_init() to be
* triggered by the next attempted FPU operation by the current
* process.
*/
preempt_disable();
#ifndef CONFIG_LAZY_SAVE_FPU
if (tsk->thread.fpu_flags & THREAD_HAS_FPU) {
fpu_save(&tsk->thread.fpu_state);
tsk->thread.uregs->epsw &= ~EPSW_FE;
tsk->thread.fpu_flags &= ~THREAD_HAS_FPU;
}
#else /* !CONFIG_LAZY_SAVE_FPU */
if (fpu_state_owner == tsk) {
fpu_save(&tsk->thread.fpu_state);
fpu_state_owner->thread.uregs->epsw &= ~EPSW_FE;
fpu_state_owner = NULL;
}
#endif /* !CONFIG_LAZY_SAVE_FPU */
preempt_enable();
/* we no longer have a valid current FPU state */
clear_using_fpu(tsk);
/* transfer the saved FPU state onto the userspace stack */
if (copy_to_user(fpucontext,
&tsk->thread.fpu_state,
min(sizeof(struct fpu_state_struct),
sizeof(struct fpucontext))))
return -1;
return 1;
}
/*
* kill a process's FPU state during restoration after signal handling
*/
void fpu_kill_state(struct task_struct *tsk)
{
/* disown anything left in the FPU */
preempt_disable();
#ifndef CONFIG_LAZY_SAVE_FPU
if (tsk->thread.fpu_flags & THREAD_HAS_FPU) {
tsk->thread.uregs->epsw &= ~EPSW_FE;
tsk->thread.fpu_flags &= ~THREAD_HAS_FPU;
}
#else /* !CONFIG_LAZY_SAVE_FPU */
if (fpu_state_owner == tsk) {
fpu_state_owner->thread.uregs->epsw &= ~EPSW_FE;
fpu_state_owner = NULL;
}
#endif /* !CONFIG_LAZY_SAVE_FPU */
preempt_enable();
/* we no longer have a valid current FPU state */
clear_using_fpu(tsk);
}
/*
* restore the FPU state from a signal context
*/
int fpu_restore_sigcontext(struct fpucontext *fpucontext)
{
struct task_struct *tsk = current;
int ret;
/* load up the old FPU state */
ret = copy_from_user(&tsk->thread.fpu_state, fpucontext,
min(sizeof(struct fpu_state_struct),
sizeof(struct fpucontext)));
if (!ret)
set_using_fpu(tsk);
return ret;
}
/*
* fill in the FPU structure for a core dump
*/
int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpreg)
{
struct task_struct *tsk = current;
int fpvalid;
fpvalid = is_using_fpu(tsk);
if (fpvalid) {
unlazy_fpu(tsk);
memcpy(fpreg, &tsk->thread.fpu_state, sizeof(*fpreg));
}
return fpvalid;
}
| gpl-2.0 |
HRTKernel/test | drivers/infiniband/hw/mthca/mthca_mr.c | 9953 | 23989 | /*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/slab.h>
#include <linux/errno.h>
#include "mthca_dev.h"
#include "mthca_cmd.h"
#include "mthca_memfree.h"
struct mthca_mtt {
struct mthca_buddy *buddy;
int order;
u32 first_seg;
};
/*
* Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
*/
struct mthca_mpt_entry {
__be32 flags;
__be32 page_size;
__be32 key;
__be32 pd;
__be64 start;
__be64 length;
__be32 lkey;
__be32 window_count;
__be32 window_count_limit;
__be64 mtt_seg;
__be32 mtt_sz; /* Arbel only */
u32 reserved[2];
} __attribute__((packed));
#define MTHCA_MPT_FLAG_SW_OWNS (0xfUL << 28)
#define MTHCA_MPT_FLAG_MIO (1 << 17)
#define MTHCA_MPT_FLAG_BIND_ENABLE (1 << 15)
#define MTHCA_MPT_FLAG_PHYSICAL (1 << 9)
#define MTHCA_MPT_FLAG_REGION (1 << 8)
#define MTHCA_MTT_FLAG_PRESENT 1
#define MTHCA_MPT_STATUS_SW 0xF0
#define MTHCA_MPT_STATUS_HW 0x00
#define SINAI_FMR_KEY_INC 0x1000000
/*
* Buddy allocator for MTT segments (currently not very efficient
* since it doesn't keep a free list and just searches linearly
* through the bitmaps)
*/
static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
{
int o;
int m;
u32 seg;
spin_lock(&buddy->lock);
for (o = order; o <= buddy->max_order; ++o)
if (buddy->num_free[o]) {
m = 1 << (buddy->max_order - o);
seg = find_first_bit(buddy->bits[o], m);
if (seg < m)
goto found;
}
spin_unlock(&buddy->lock);
return -1;
found:
clear_bit(seg, buddy->bits[o]);
--buddy->num_free[o];
while (o > order) {
--o;
seg <<= 1;
set_bit(seg ^ 1, buddy->bits[o]);
++buddy->num_free[o];
}
spin_unlock(&buddy->lock);
seg <<= order;
return seg;
}
static void mthca_buddy_free(struct mthca_buddy *buddy, u32 seg, int order)
{
seg >>= order;
spin_lock(&buddy->lock);
while (test_bit(seg ^ 1, buddy->bits[order])) {
clear_bit(seg ^ 1, buddy->bits[order]);
--buddy->num_free[order];
seg >>= 1;
++order;
}
set_bit(seg, buddy->bits[order]);
++buddy->num_free[order];
spin_unlock(&buddy->lock);
}
static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order)
{
int i, s;
buddy->max_order = max_order;
spin_lock_init(&buddy->lock);
buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *),
GFP_KERNEL);
buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free,
GFP_KERNEL);
if (!buddy->bits || !buddy->num_free)
goto err_out;
for (i = 0; i <= buddy->max_order; ++i) {
s = BITS_TO_LONGS(1 << (buddy->max_order - i));
buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL);
if (!buddy->bits[i])
goto err_out_free;
bitmap_zero(buddy->bits[i],
1 << (buddy->max_order - i));
}
set_bit(0, buddy->bits[buddy->max_order]);
buddy->num_free[buddy->max_order] = 1;
return 0;
err_out_free:
for (i = 0; i <= buddy->max_order; ++i)
kfree(buddy->bits[i]);
err_out:
kfree(buddy->bits);
kfree(buddy->num_free);
return -ENOMEM;
}
static void mthca_buddy_cleanup(struct mthca_buddy *buddy)
{
int i;
for (i = 0; i <= buddy->max_order; ++i)
kfree(buddy->bits[i]);
kfree(buddy->bits);
kfree(buddy->num_free);
}
static u32 mthca_alloc_mtt_range(struct mthca_dev *dev, int order,
struct mthca_buddy *buddy)
{
u32 seg = mthca_buddy_alloc(buddy, order);
if (seg == -1)
return -1;
if (mthca_is_memfree(dev))
if (mthca_table_get_range(dev, dev->mr_table.mtt_table, seg,
seg + (1 << order) - 1)) {
mthca_buddy_free(buddy, seg, order);
seg = -1;
}
return seg;
}
static struct mthca_mtt *__mthca_alloc_mtt(struct mthca_dev *dev, int size,
struct mthca_buddy *buddy)
{
struct mthca_mtt *mtt;
int i;
if (size <= 0)
return ERR_PTR(-EINVAL);
mtt = kmalloc(sizeof *mtt, GFP_KERNEL);
if (!mtt)
return ERR_PTR(-ENOMEM);
mtt->buddy = buddy;
mtt->order = 0;
for (i = dev->limits.mtt_seg_size / 8; i < size; i <<= 1)
++mtt->order;
mtt->first_seg = mthca_alloc_mtt_range(dev, mtt->order, buddy);
if (mtt->first_seg == -1) {
kfree(mtt);
return ERR_PTR(-ENOMEM);
}
return mtt;
}
struct mthca_mtt *mthca_alloc_mtt(struct mthca_dev *dev, int size)
{
return __mthca_alloc_mtt(dev, size, &dev->mr_table.mtt_buddy);
}
void mthca_free_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt)
{
if (!mtt)
return;
mthca_buddy_free(mtt->buddy, mtt->first_seg, mtt->order);
mthca_table_put_range(dev, dev->mr_table.mtt_table,
mtt->first_seg,
mtt->first_seg + (1 << mtt->order) - 1);
kfree(mtt);
}
static int __mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
int start_index, u64 *buffer_list, int list_len)
{
struct mthca_mailbox *mailbox;
__be64 *mtt_entry;
int err = 0;
int i;
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
mtt_entry = mailbox->buf;
while (list_len > 0) {
mtt_entry[0] = cpu_to_be64(dev->mr_table.mtt_base +
mtt->first_seg * dev->limits.mtt_seg_size +
start_index * 8);
mtt_entry[1] = 0;
for (i = 0; i < list_len && i < MTHCA_MAILBOX_SIZE / 8 - 2; ++i)
mtt_entry[i + 2] = cpu_to_be64(buffer_list[i] |
MTHCA_MTT_FLAG_PRESENT);
/*
* If we have an odd number of entries to write, add
* one more dummy entry for firmware efficiency.
*/
if (i & 1)
mtt_entry[i + 2] = 0;
err = mthca_WRITE_MTT(dev, mailbox, (i + 1) & ~1);
if (err) {
mthca_warn(dev, "WRITE_MTT failed (%d)\n", err);
goto out;
}
list_len -= i;
start_index += i;
buffer_list += i;
}
out:
mthca_free_mailbox(dev, mailbox);
return err;
}
int mthca_write_mtt_size(struct mthca_dev *dev)
{
if (dev->mr_table.fmr_mtt_buddy != &dev->mr_table.mtt_buddy ||
!(dev->mthca_flags & MTHCA_FLAG_FMR))
/*
* Be friendly to WRITE_MTT command
* and leave two empty slots for the
* index and reserved fields of the
* mailbox.
*/
return PAGE_SIZE / sizeof (u64) - 2;
/* For Arbel, all MTTs must fit in the same page. */
return mthca_is_memfree(dev) ? (PAGE_SIZE / sizeof (u64)) : 0x7ffffff;
}
static void mthca_tavor_write_mtt_seg(struct mthca_dev *dev,
struct mthca_mtt *mtt, int start_index,
u64 *buffer_list, int list_len)
{
u64 __iomem *mtts;
int i;
mtts = dev->mr_table.tavor_fmr.mtt_base + mtt->first_seg * dev->limits.mtt_seg_size +
start_index * sizeof (u64);
for (i = 0; i < list_len; ++i)
mthca_write64_raw(cpu_to_be64(buffer_list[i] | MTHCA_MTT_FLAG_PRESENT),
mtts + i);
}
static void mthca_arbel_write_mtt_seg(struct mthca_dev *dev,
struct mthca_mtt *mtt, int start_index,
u64 *buffer_list, int list_len)
{
__be64 *mtts;
dma_addr_t dma_handle;
int i;
int s = start_index * sizeof (u64);
/* For Arbel, all MTTs must fit in the same page. */
BUG_ON(s / PAGE_SIZE != (s + list_len * sizeof(u64) - 1) / PAGE_SIZE);
/* Require full segments */
BUG_ON(s % dev->limits.mtt_seg_size);
mtts = mthca_table_find(dev->mr_table.mtt_table, mtt->first_seg +
s / dev->limits.mtt_seg_size, &dma_handle);
BUG_ON(!mtts);
dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle,
list_len * sizeof (u64), DMA_TO_DEVICE);
for (i = 0; i < list_len; ++i)
mtts[i] = cpu_to_be64(buffer_list[i] | MTHCA_MTT_FLAG_PRESENT);
dma_sync_single_for_device(&dev->pdev->dev, dma_handle,
list_len * sizeof (u64), DMA_TO_DEVICE);
}
int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
int start_index, u64 *buffer_list, int list_len)
{
int size = mthca_write_mtt_size(dev);
int chunk;
if (dev->mr_table.fmr_mtt_buddy != &dev->mr_table.mtt_buddy ||
!(dev->mthca_flags & MTHCA_FLAG_FMR))
return __mthca_write_mtt(dev, mtt, start_index, buffer_list, list_len);
while (list_len > 0) {
chunk = min(size, list_len);
if (mthca_is_memfree(dev))
mthca_arbel_write_mtt_seg(dev, mtt, start_index,
buffer_list, chunk);
else
mthca_tavor_write_mtt_seg(dev, mtt, start_index,
buffer_list, chunk);
list_len -= chunk;
start_index += chunk;
buffer_list += chunk;
}
return 0;
}
static inline u32 tavor_hw_index_to_key(u32 ind)
{
return ind;
}
static inline u32 tavor_key_to_hw_index(u32 key)
{
return key;
}
static inline u32 arbel_hw_index_to_key(u32 ind)
{
return (ind >> 24) | (ind << 8);
}
static inline u32 arbel_key_to_hw_index(u32 key)
{
return (key << 24) | (key >> 8);
}
static inline u32 hw_index_to_key(struct mthca_dev *dev, u32 ind)
{
if (mthca_is_memfree(dev))
return arbel_hw_index_to_key(ind);
else
return tavor_hw_index_to_key(ind);
}
static inline u32 key_to_hw_index(struct mthca_dev *dev, u32 key)
{
if (mthca_is_memfree(dev))
return arbel_key_to_hw_index(key);
else
return tavor_key_to_hw_index(key);
}
static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
{
if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
return ((key << 20) & 0x800000) | (key & 0x7fffff);
else
return key;
}
int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
{
struct mthca_mailbox *mailbox;
struct mthca_mpt_entry *mpt_entry;
u32 key;
int i;
int err;
WARN_ON(buffer_size_shift >= 32);
key = mthca_alloc(&dev->mr_table.mpt_alloc);
if (key == -1)
return -ENOMEM;
key = adjust_key(dev, key);
mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key);
if (mthca_is_memfree(dev)) {
err = mthca_table_get(dev, dev->mr_table.mpt_table, key);
if (err)
goto err_out_mpt_free;
}
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox)) {
err = PTR_ERR(mailbox);
goto err_out_table;
}
mpt_entry = mailbox->buf;
mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS |
MTHCA_MPT_FLAG_MIO |
MTHCA_MPT_FLAG_REGION |
access);
if (!mr->mtt)
mpt_entry->flags |= cpu_to_be32(MTHCA_MPT_FLAG_PHYSICAL);
mpt_entry->page_size = cpu_to_be32(buffer_size_shift - 12);
mpt_entry->key = cpu_to_be32(key);
mpt_entry->pd = cpu_to_be32(pd);
mpt_entry->start = cpu_to_be64(iova);
mpt_entry->length = cpu_to_be64(total_size);
memset(&mpt_entry->lkey, 0,
sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, lkey));
if (mr->mtt)
mpt_entry->mtt_seg =
cpu_to_be64(dev->mr_table.mtt_base +
mr->mtt->first_seg * dev->limits.mtt_seg_size);
if (0) {
mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey);
for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) {
if (i % 4 == 0)
printk("[%02x] ", i * 4);
printk(" %08x", be32_to_cpu(((__be32 *) mpt_entry)[i]));
if ((i + 1) % 4 == 0)
printk("\n");
}
}
err = mthca_SW2HW_MPT(dev, mailbox,
key & (dev->limits.num_mpts - 1));
if (err) {
mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err);
goto err_out_mailbox;
}
mthca_free_mailbox(dev, mailbox);
return err;
err_out_mailbox:
mthca_free_mailbox(dev, mailbox);
err_out_table:
mthca_table_put(dev, dev->mr_table.mpt_table, key);
err_out_mpt_free:
mthca_free(&dev->mr_table.mpt_alloc, key);
return err;
}
int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
u32 access, struct mthca_mr *mr)
{
mr->mtt = NULL;
return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
}
int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
u64 *buffer_list, int buffer_size_shift,
int list_len, u64 iova, u64 total_size,
u32 access, struct mthca_mr *mr)
{
int err;
mr->mtt = mthca_alloc_mtt(dev, list_len);
if (IS_ERR(mr->mtt))
return PTR_ERR(mr->mtt);
err = mthca_write_mtt(dev, mr->mtt, 0, buffer_list, list_len);
if (err) {
mthca_free_mtt(dev, mr->mtt);
return err;
}
err = mthca_mr_alloc(dev, pd, buffer_size_shift, iova,
total_size, access, mr);
if (err)
mthca_free_mtt(dev, mr->mtt);
return err;
}
/* Free mr or fmr */
static void mthca_free_region(struct mthca_dev *dev, u32 lkey)
{
mthca_table_put(dev, dev->mr_table.mpt_table,
key_to_hw_index(dev, lkey));
mthca_free(&dev->mr_table.mpt_alloc, key_to_hw_index(dev, lkey));
}
void mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr)
{
int err;
err = mthca_HW2SW_MPT(dev, NULL,
key_to_hw_index(dev, mr->ibmr.lkey) &
(dev->limits.num_mpts - 1));
if (err)
mthca_warn(dev, "HW2SW_MPT failed (%d)\n", err);
mthca_free_region(dev, mr->ibmr.lkey);
mthca_free_mtt(dev, mr->mtt);
}
int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
u32 access, struct mthca_fmr *mr)
{
struct mthca_mpt_entry *mpt_entry;
struct mthca_mailbox *mailbox;
u64 mtt_seg;
u32 key, idx;
int list_len = mr->attr.max_pages;
int err = -ENOMEM;
int i;
if (mr->attr.page_shift < 12 || mr->attr.page_shift >= 32)
return -EINVAL;
/* For Arbel, all MTTs must fit in the same page. */
if (mthca_is_memfree(dev) &&
mr->attr.max_pages * sizeof *mr->mem.arbel.mtts > PAGE_SIZE)
return -EINVAL;
mr->maps = 0;
key = mthca_alloc(&dev->mr_table.mpt_alloc);
if (key == -1)
return -ENOMEM;
key = adjust_key(dev, key);
idx = key & (dev->limits.num_mpts - 1);
mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key);
if (mthca_is_memfree(dev)) {
err = mthca_table_get(dev, dev->mr_table.mpt_table, key);
if (err)
goto err_out_mpt_free;
mr->mem.arbel.mpt = mthca_table_find(dev->mr_table.mpt_table, key, NULL);
BUG_ON(!mr->mem.arbel.mpt);
} else
mr->mem.tavor.mpt = dev->mr_table.tavor_fmr.mpt_base +
sizeof *(mr->mem.tavor.mpt) * idx;
mr->mtt = __mthca_alloc_mtt(dev, list_len, dev->mr_table.fmr_mtt_buddy);
if (IS_ERR(mr->mtt)) {
err = PTR_ERR(mr->mtt);
goto err_out_table;
}
mtt_seg = mr->mtt->first_seg * dev->limits.mtt_seg_size;
if (mthca_is_memfree(dev)) {
mr->mem.arbel.mtts = mthca_table_find(dev->mr_table.mtt_table,
mr->mtt->first_seg,
&mr->mem.arbel.dma_handle);
BUG_ON(!mr->mem.arbel.mtts);
} else
mr->mem.tavor.mtts = dev->mr_table.tavor_fmr.mtt_base + mtt_seg;
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox)) {
err = PTR_ERR(mailbox);
goto err_out_free_mtt;
}
mpt_entry = mailbox->buf;
mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS |
MTHCA_MPT_FLAG_MIO |
MTHCA_MPT_FLAG_REGION |
access);
mpt_entry->page_size = cpu_to_be32(mr->attr.page_shift - 12);
mpt_entry->key = cpu_to_be32(key);
mpt_entry->pd = cpu_to_be32(pd);
memset(&mpt_entry->start, 0,
sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, start));
mpt_entry->mtt_seg = cpu_to_be64(dev->mr_table.mtt_base + mtt_seg);
if (0) {
mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey);
for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) {
if (i % 4 == 0)
printk("[%02x] ", i * 4);
printk(" %08x", be32_to_cpu(((__be32 *) mpt_entry)[i]));
if ((i + 1) % 4 == 0)
printk("\n");
}
}
err = mthca_SW2HW_MPT(dev, mailbox,
key & (dev->limits.num_mpts - 1));
if (err) {
mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err);
goto err_out_mailbox_free;
}
mthca_free_mailbox(dev, mailbox);
return 0;
err_out_mailbox_free:
mthca_free_mailbox(dev, mailbox);
err_out_free_mtt:
mthca_free_mtt(dev, mr->mtt);
err_out_table:
mthca_table_put(dev, dev->mr_table.mpt_table, key);
err_out_mpt_free:
mthca_free(&dev->mr_table.mpt_alloc, key);
return err;
}
int mthca_free_fmr(struct mthca_dev *dev, struct mthca_fmr *fmr)
{
if (fmr->maps)
return -EBUSY;
mthca_free_region(dev, fmr->ibmr.lkey);
mthca_free_mtt(dev, fmr->mtt);
return 0;
}
static inline int mthca_check_fmr(struct mthca_fmr *fmr, u64 *page_list,
int list_len, u64 iova)
{
int i, page_mask;
if (list_len > fmr->attr.max_pages)
return -EINVAL;
page_mask = (1 << fmr->attr.page_shift) - 1;
/* We are getting page lists, so va must be page aligned. */
if (iova & page_mask)
return -EINVAL;
/* Trust the user not to pass misaligned data in page_list */
if (0)
for (i = 0; i < list_len; ++i) {
if (page_list[i] & ~page_mask)
return -EINVAL;
}
if (fmr->maps >= fmr->attr.max_maps)
return -EINVAL;
return 0;
}
int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
int list_len, u64 iova)
{
struct mthca_fmr *fmr = to_mfmr(ibfmr);
struct mthca_dev *dev = to_mdev(ibfmr->device);
struct mthca_mpt_entry mpt_entry;
u32 key;
int i, err;
err = mthca_check_fmr(fmr, page_list, list_len, iova);
if (err)
return err;
++fmr->maps;
key = tavor_key_to_hw_index(fmr->ibmr.lkey);
key += dev->limits.num_mpts;
fmr->ibmr.lkey = fmr->ibmr.rkey = tavor_hw_index_to_key(key);
writeb(MTHCA_MPT_STATUS_SW, fmr->mem.tavor.mpt);
for (i = 0; i < list_len; ++i) {
__be64 mtt_entry = cpu_to_be64(page_list[i] |
MTHCA_MTT_FLAG_PRESENT);
mthca_write64_raw(mtt_entry, fmr->mem.tavor.mtts + i);
}
mpt_entry.lkey = cpu_to_be32(key);
mpt_entry.length = cpu_to_be64(list_len * (1ull << fmr->attr.page_shift));
mpt_entry.start = cpu_to_be64(iova);
__raw_writel((__force u32) mpt_entry.lkey, &fmr->mem.tavor.mpt->key);
memcpy_toio(&fmr->mem.tavor.mpt->start, &mpt_entry.start,
offsetof(struct mthca_mpt_entry, window_count) -
offsetof(struct mthca_mpt_entry, start));
writeb(MTHCA_MPT_STATUS_HW, fmr->mem.tavor.mpt);
return 0;
}
int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
int list_len, u64 iova)
{
struct mthca_fmr *fmr = to_mfmr(ibfmr);
struct mthca_dev *dev = to_mdev(ibfmr->device);
u32 key;
int i, err;
err = mthca_check_fmr(fmr, page_list, list_len, iova);
if (err)
return err;
++fmr->maps;
key = arbel_key_to_hw_index(fmr->ibmr.lkey);
if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
key += SINAI_FMR_KEY_INC;
else
key += dev->limits.num_mpts;
fmr->ibmr.lkey = fmr->ibmr.rkey = arbel_hw_index_to_key(key);
*(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW;
wmb();
dma_sync_single_for_cpu(&dev->pdev->dev, fmr->mem.arbel.dma_handle,
list_len * sizeof(u64), DMA_TO_DEVICE);
for (i = 0; i < list_len; ++i)
fmr->mem.arbel.mtts[i] = cpu_to_be64(page_list[i] |
MTHCA_MTT_FLAG_PRESENT);
dma_sync_single_for_device(&dev->pdev->dev, fmr->mem.arbel.dma_handle,
list_len * sizeof(u64), DMA_TO_DEVICE);
fmr->mem.arbel.mpt->key = cpu_to_be32(key);
fmr->mem.arbel.mpt->lkey = cpu_to_be32(key);
fmr->mem.arbel.mpt->length = cpu_to_be64(list_len * (1ull << fmr->attr.page_shift));
fmr->mem.arbel.mpt->start = cpu_to_be64(iova);
wmb();
*(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_HW;
wmb();
return 0;
}
void mthca_tavor_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
{
if (!fmr->maps)
return;
fmr->maps = 0;
writeb(MTHCA_MPT_STATUS_SW, fmr->mem.tavor.mpt);
}
void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
{
if (!fmr->maps)
return;
fmr->maps = 0;
*(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW;
}
int mthca_init_mr_table(struct mthca_dev *dev)
{
phys_addr_t addr;
int mpts, mtts, err, i;
err = mthca_alloc_init(&dev->mr_table.mpt_alloc,
dev->limits.num_mpts,
~0, dev->limits.reserved_mrws);
if (err)
return err;
if (!mthca_is_memfree(dev) &&
(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN))
dev->limits.fmr_reserved_mtts = 0;
else
dev->mthca_flags |= MTHCA_FLAG_FMR;
if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
mthca_dbg(dev, "Memory key throughput optimization activated.\n");
err = mthca_buddy_init(&dev->mr_table.mtt_buddy,
fls(dev->limits.num_mtt_segs - 1));
if (err)
goto err_mtt_buddy;
dev->mr_table.tavor_fmr.mpt_base = NULL;
dev->mr_table.tavor_fmr.mtt_base = NULL;
if (dev->limits.fmr_reserved_mtts) {
i = fls(dev->limits.fmr_reserved_mtts - 1);
if (i >= 31) {
mthca_warn(dev, "Unable to reserve 2^31 FMR MTTs.\n");
err = -EINVAL;
goto err_fmr_mpt;
}
mpts = mtts = 1 << i;
} else {
mtts = dev->limits.num_mtt_segs;
mpts = dev->limits.num_mpts;
}
if (!mthca_is_memfree(dev) &&
(dev->mthca_flags & MTHCA_FLAG_FMR)) {
addr = pci_resource_start(dev->pdev, 4) +
((pci_resource_len(dev->pdev, 4) - 1) &
dev->mr_table.mpt_base);
dev->mr_table.tavor_fmr.mpt_base =
ioremap(addr, mpts * sizeof(struct mthca_mpt_entry));
if (!dev->mr_table.tavor_fmr.mpt_base) {
mthca_warn(dev, "MPT ioremap for FMR failed.\n");
err = -ENOMEM;
goto err_fmr_mpt;
}
addr = pci_resource_start(dev->pdev, 4) +
((pci_resource_len(dev->pdev, 4) - 1) &
dev->mr_table.mtt_base);
dev->mr_table.tavor_fmr.mtt_base =
ioremap(addr, mtts * dev->limits.mtt_seg_size);
if (!dev->mr_table.tavor_fmr.mtt_base) {
mthca_warn(dev, "MTT ioremap for FMR failed.\n");
err = -ENOMEM;
goto err_fmr_mtt;
}
}
if (dev->limits.fmr_reserved_mtts) {
err = mthca_buddy_init(&dev->mr_table.tavor_fmr.mtt_buddy, fls(mtts - 1));
if (err)
goto err_fmr_mtt_buddy;
/* Prevent regular MRs from using FMR keys */
err = mthca_buddy_alloc(&dev->mr_table.mtt_buddy, fls(mtts - 1));
if (err)
goto err_reserve_fmr;
dev->mr_table.fmr_mtt_buddy =
&dev->mr_table.tavor_fmr.mtt_buddy;
} else
dev->mr_table.fmr_mtt_buddy = &dev->mr_table.mtt_buddy;
/* FMR table is always the first, take reserved MTTs out of there */
if (dev->limits.reserved_mtts) {
i = fls(dev->limits.reserved_mtts - 1);
if (mthca_alloc_mtt_range(dev, i,
dev->mr_table.fmr_mtt_buddy) == -1) {
mthca_warn(dev, "MTT table of order %d is too small.\n",
dev->mr_table.fmr_mtt_buddy->max_order);
err = -ENOMEM;
goto err_reserve_mtts;
}
}
return 0;
err_reserve_mtts:
err_reserve_fmr:
if (dev->limits.fmr_reserved_mtts)
mthca_buddy_cleanup(&dev->mr_table.tavor_fmr.mtt_buddy);
err_fmr_mtt_buddy:
if (dev->mr_table.tavor_fmr.mtt_base)
iounmap(dev->mr_table.tavor_fmr.mtt_base);
err_fmr_mtt:
if (dev->mr_table.tavor_fmr.mpt_base)
iounmap(dev->mr_table.tavor_fmr.mpt_base);
err_fmr_mpt:
mthca_buddy_cleanup(&dev->mr_table.mtt_buddy);
err_mtt_buddy:
mthca_alloc_cleanup(&dev->mr_table.mpt_alloc);
return err;
}
void mthca_cleanup_mr_table(struct mthca_dev *dev)
{
/* XXX check if any MRs are still allocated? */
if (dev->limits.fmr_reserved_mtts)
mthca_buddy_cleanup(&dev->mr_table.tavor_fmr.mtt_buddy);
mthca_buddy_cleanup(&dev->mr_table.mtt_buddy);
if (dev->mr_table.tavor_fmr.mtt_base)
iounmap(dev->mr_table.tavor_fmr.mtt_base);
if (dev->mr_table.tavor_fmr.mpt_base)
iounmap(dev->mr_table.tavor_fmr.mpt_base);
mthca_alloc_cleanup(&dev->mr_table.mpt_alloc);
}
| gpl-2.0 |
wenfengliaoshuzhai/linux | tools/perf/util/scripting-engines/trace-event-python.c | 226 | 31205 | /*
* trace-event-python. Feed trace events to an embedded Python interpreter.
*
* Copyright (C) 2010 Tom Zanussi <tzanussi@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <Python.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdbool.h>
#include <errno.h>
#include <linux/bitmap.h>
#include "../../perf.h"
#include "../debug.h"
#include "../callchain.h"
#include "../evsel.h"
#include "../util.h"
#include "../event.h"
#include "../thread.h"
#include "../comm.h"
#include "../machine.h"
#include "../db-export.h"
#include "../thread-stack.h"
#include "../trace-event.h"
#include "../machine.h"
PyMODINIT_FUNC initperf_trace_context(void);
#define FTRACE_MAX_EVENT \
((1 << (sizeof(unsigned short) * 8)) - 1)
static DECLARE_BITMAP(events_defined, FTRACE_MAX_EVENT);
#define MAX_FIELDS 64
#define N_COMMON_FIELDS 7
extern struct scripting_context *scripting_context;
static char *cur_field_name;
static int zero_flag_atom;
static PyObject *main_module, *main_dict;
struct tables {
struct db_export dbe;
PyObject *evsel_handler;
PyObject *machine_handler;
PyObject *thread_handler;
PyObject *comm_handler;
PyObject *comm_thread_handler;
PyObject *dso_handler;
PyObject *symbol_handler;
PyObject *branch_type_handler;
PyObject *sample_handler;
PyObject *call_path_handler;
PyObject *call_return_handler;
bool db_export_mode;
};
static struct tables tables_global;
static void handler_call_die(const char *handler_name) NORETURN;
static void handler_call_die(const char *handler_name)
{
PyErr_Print();
Py_FatalError("problem in Python trace event handler");
// Py_FatalError does not return
// but we have to make the compiler happy
abort();
}
/*
* Insert val into into the dictionary and decrement the reference counter.
* This is necessary for dictionaries since PyDict_SetItemString() does not
* steal a reference, as opposed to PyTuple_SetItem().
*/
static void pydict_set_item_string_decref(PyObject *dict, const char *key, PyObject *val)
{
PyDict_SetItemString(dict, key, val);
Py_DECREF(val);
}
static PyObject *get_handler(const char *handler_name)
{
PyObject *handler;
handler = PyDict_GetItemString(main_dict, handler_name);
if (handler && !PyCallable_Check(handler))
return NULL;
return handler;
}
static void call_object(PyObject *handler, PyObject *args, const char *die_msg)
{
PyObject *retval;
retval = PyObject_CallObject(handler, args);
if (retval == NULL)
handler_call_die(die_msg);
Py_DECREF(retval);
}
static void try_call_object(const char *handler_name, PyObject *args)
{
PyObject *handler;
handler = get_handler(handler_name);
if (handler)
call_object(handler, args, handler_name);
}
static void define_value(enum print_arg_type field_type,
const char *ev_name,
const char *field_name,
const char *field_value,
const char *field_str)
{
const char *handler_name = "define_flag_value";
PyObject *t;
unsigned long long value;
unsigned n = 0;
if (field_type == PRINT_SYMBOL)
handler_name = "define_symbolic_value";
t = PyTuple_New(4);
if (!t)
Py_FatalError("couldn't create Python tuple");
value = eval_flag(field_value);
PyTuple_SetItem(t, n++, PyString_FromString(ev_name));
PyTuple_SetItem(t, n++, PyString_FromString(field_name));
PyTuple_SetItem(t, n++, PyInt_FromLong(value));
PyTuple_SetItem(t, n++, PyString_FromString(field_str));
try_call_object(handler_name, t);
Py_DECREF(t);
}
static void define_values(enum print_arg_type field_type,
struct print_flag_sym *field,
const char *ev_name,
const char *field_name)
{
define_value(field_type, ev_name, field_name, field->value,
field->str);
if (field->next)
define_values(field_type, field->next, ev_name, field_name);
}
static void define_field(enum print_arg_type field_type,
const char *ev_name,
const char *field_name,
const char *delim)
{
const char *handler_name = "define_flag_field";
PyObject *t;
unsigned n = 0;
if (field_type == PRINT_SYMBOL)
handler_name = "define_symbolic_field";
if (field_type == PRINT_FLAGS)
t = PyTuple_New(3);
else
t = PyTuple_New(2);
if (!t)
Py_FatalError("couldn't create Python tuple");
PyTuple_SetItem(t, n++, PyString_FromString(ev_name));
PyTuple_SetItem(t, n++, PyString_FromString(field_name));
if (field_type == PRINT_FLAGS)
PyTuple_SetItem(t, n++, PyString_FromString(delim));
try_call_object(handler_name, t);
Py_DECREF(t);
}
static void define_event_symbols(struct event_format *event,
const char *ev_name,
struct print_arg *args)
{
switch (args->type) {
case PRINT_NULL:
break;
case PRINT_ATOM:
define_value(PRINT_FLAGS, ev_name, cur_field_name, "0",
args->atom.atom);
zero_flag_atom = 0;
break;
case PRINT_FIELD:
free(cur_field_name);
cur_field_name = strdup(args->field.name);
break;
case PRINT_FLAGS:
define_event_symbols(event, ev_name, args->flags.field);
define_field(PRINT_FLAGS, ev_name, cur_field_name,
args->flags.delim);
define_values(PRINT_FLAGS, args->flags.flags, ev_name,
cur_field_name);
break;
case PRINT_SYMBOL:
define_event_symbols(event, ev_name, args->symbol.field);
define_field(PRINT_SYMBOL, ev_name, cur_field_name, NULL);
define_values(PRINT_SYMBOL, args->symbol.symbols, ev_name,
cur_field_name);
break;
case PRINT_HEX:
define_event_symbols(event, ev_name, args->hex.field);
define_event_symbols(event, ev_name, args->hex.size);
break;
case PRINT_INT_ARRAY:
define_event_symbols(event, ev_name, args->int_array.field);
define_event_symbols(event, ev_name, args->int_array.count);
define_event_symbols(event, ev_name, args->int_array.el_size);
break;
case PRINT_STRING:
break;
case PRINT_TYPE:
define_event_symbols(event, ev_name, args->typecast.item);
break;
case PRINT_OP:
if (strcmp(args->op.op, ":") == 0)
zero_flag_atom = 1;
define_event_symbols(event, ev_name, args->op.left);
define_event_symbols(event, ev_name, args->op.right);
break;
default:
/* gcc warns for these? */
case PRINT_BSTRING:
case PRINT_DYNAMIC_ARRAY:
case PRINT_FUNC:
case PRINT_BITMASK:
/* we should warn... */
return;
}
if (args->next)
define_event_symbols(event, ev_name, args->next);
}
static PyObject *get_field_numeric_entry(struct event_format *event,
struct format_field *field, void *data)
{
bool is_array = field->flags & FIELD_IS_ARRAY;
PyObject *obj, *list = NULL;
unsigned long long val;
unsigned int item_size, n_items, i;
if (is_array) {
list = PyList_New(field->arraylen);
item_size = field->size / field->arraylen;
n_items = field->arraylen;
} else {
item_size = field->size;
n_items = 1;
}
for (i = 0; i < n_items; i++) {
val = read_size(event, data + field->offset + i * item_size,
item_size);
if (field->flags & FIELD_IS_SIGNED) {
if ((long long)val >= LONG_MIN &&
(long long)val <= LONG_MAX)
obj = PyInt_FromLong(val);
else
obj = PyLong_FromLongLong(val);
} else {
if (val <= LONG_MAX)
obj = PyInt_FromLong(val);
else
obj = PyLong_FromUnsignedLongLong(val);
}
if (is_array)
PyList_SET_ITEM(list, i, obj);
}
if (is_array)
obj = list;
return obj;
}
static PyObject *python_process_callchain(struct perf_sample *sample,
struct perf_evsel *evsel,
struct addr_location *al)
{
PyObject *pylist;
pylist = PyList_New(0);
if (!pylist)
Py_FatalError("couldn't create Python list");
if (!symbol_conf.use_callchain || !sample->callchain)
goto exit;
if (thread__resolve_callchain(al->thread, evsel,
sample, NULL, NULL,
PERF_MAX_STACK_DEPTH) != 0) {
pr_err("Failed to resolve callchain. Skipping\n");
goto exit;
}
callchain_cursor_commit(&callchain_cursor);
while (1) {
PyObject *pyelem;
struct callchain_cursor_node *node;
node = callchain_cursor_current(&callchain_cursor);
if (!node)
break;
pyelem = PyDict_New();
if (!pyelem)
Py_FatalError("couldn't create Python dictionary");
pydict_set_item_string_decref(pyelem, "ip",
PyLong_FromUnsignedLongLong(node->ip));
if (node->sym) {
PyObject *pysym = PyDict_New();
if (!pysym)
Py_FatalError("couldn't create Python dictionary");
pydict_set_item_string_decref(pysym, "start",
PyLong_FromUnsignedLongLong(node->sym->start));
pydict_set_item_string_decref(pysym, "end",
PyLong_FromUnsignedLongLong(node->sym->end));
pydict_set_item_string_decref(pysym, "binding",
PyInt_FromLong(node->sym->binding));
pydict_set_item_string_decref(pysym, "name",
PyString_FromStringAndSize(node->sym->name,
node->sym->namelen));
pydict_set_item_string_decref(pyelem, "sym", pysym);
}
if (node->map) {
struct map *map = node->map;
const char *dsoname = "[unknown]";
if (map && map->dso && (map->dso->name || map->dso->long_name)) {
if (symbol_conf.show_kernel_path && map->dso->long_name)
dsoname = map->dso->long_name;
else if (map->dso->name)
dsoname = map->dso->name;
}
pydict_set_item_string_decref(pyelem, "dso",
PyString_FromString(dsoname));
}
callchain_cursor_advance(&callchain_cursor);
PyList_Append(pylist, pyelem);
Py_DECREF(pyelem);
}
exit:
return pylist;
}
static void python_process_tracepoint(struct perf_sample *sample,
struct perf_evsel *evsel,
struct addr_location *al)
{
struct event_format *event = evsel->tp_format;
PyObject *handler, *context, *t, *obj, *callchain;
PyObject *dict = NULL;
static char handler_name[256];
struct format_field *field;
unsigned long s, ns;
unsigned n = 0;
int pid;
int cpu = sample->cpu;
void *data = sample->raw_data;
unsigned long long nsecs = sample->time;
const char *comm = thread__comm_str(al->thread);
t = PyTuple_New(MAX_FIELDS);
if (!t)
Py_FatalError("couldn't create Python tuple");
if (!event)
die("ug! no event found for type %d", (int)evsel->attr.config);
pid = raw_field_value(event, "common_pid", data);
sprintf(handler_name, "%s__%s", event->system, event->name);
if (!test_and_set_bit(event->id, events_defined))
define_event_symbols(event, handler_name, event->print_fmt.args);
handler = get_handler(handler_name);
if (!handler) {
dict = PyDict_New();
if (!dict)
Py_FatalError("couldn't create Python dict");
}
s = nsecs / NSECS_PER_SEC;
ns = nsecs - s * NSECS_PER_SEC;
scripting_context->event_data = data;
scripting_context->pevent = evsel->tp_format->pevent;
context = PyCObject_FromVoidPtr(scripting_context, NULL);
PyTuple_SetItem(t, n++, PyString_FromString(handler_name));
PyTuple_SetItem(t, n++, context);
/* ip unwinding */
callchain = python_process_callchain(sample, evsel, al);
if (handler) {
PyTuple_SetItem(t, n++, PyInt_FromLong(cpu));
PyTuple_SetItem(t, n++, PyInt_FromLong(s));
PyTuple_SetItem(t, n++, PyInt_FromLong(ns));
PyTuple_SetItem(t, n++, PyInt_FromLong(pid));
PyTuple_SetItem(t, n++, PyString_FromString(comm));
PyTuple_SetItem(t, n++, callchain);
} else {
pydict_set_item_string_decref(dict, "common_cpu", PyInt_FromLong(cpu));
pydict_set_item_string_decref(dict, "common_s", PyInt_FromLong(s));
pydict_set_item_string_decref(dict, "common_ns", PyInt_FromLong(ns));
pydict_set_item_string_decref(dict, "common_pid", PyInt_FromLong(pid));
pydict_set_item_string_decref(dict, "common_comm", PyString_FromString(comm));
pydict_set_item_string_decref(dict, "common_callchain", callchain);
}
for (field = event->format.fields; field; field = field->next) {
if (field->flags & FIELD_IS_STRING) {
int offset;
if (field->flags & FIELD_IS_DYNAMIC) {
offset = *(int *)(data + field->offset);
offset &= 0xffff;
} else
offset = field->offset;
obj = PyString_FromString((char *)data + offset);
} else { /* FIELD_IS_NUMERIC */
obj = get_field_numeric_entry(event, field, data);
}
if (handler)
PyTuple_SetItem(t, n++, obj);
else
pydict_set_item_string_decref(dict, field->name, obj);
}
if (!handler)
PyTuple_SetItem(t, n++, dict);
if (_PyTuple_Resize(&t, n) == -1)
Py_FatalError("error resizing Python tuple");
if (handler) {
call_object(handler, t, handler_name);
} else {
try_call_object("trace_unhandled", t);
Py_DECREF(dict);
}
Py_DECREF(t);
}
static PyObject *tuple_new(unsigned int sz)
{
PyObject *t;
t = PyTuple_New(sz);
if (!t)
Py_FatalError("couldn't create Python tuple");
return t;
}
static int tuple_set_u64(PyObject *t, unsigned int pos, u64 val)
{
#if BITS_PER_LONG == 64
return PyTuple_SetItem(t, pos, PyInt_FromLong(val));
#endif
#if BITS_PER_LONG == 32
return PyTuple_SetItem(t, pos, PyLong_FromLongLong(val));
#endif
}
static int tuple_set_s32(PyObject *t, unsigned int pos, s32 val)
{
return PyTuple_SetItem(t, pos, PyInt_FromLong(val));
}
static int tuple_set_string(PyObject *t, unsigned int pos, const char *s)
{
return PyTuple_SetItem(t, pos, PyString_FromString(s));
}
static int python_export_evsel(struct db_export *dbe, struct perf_evsel *evsel)
{
struct tables *tables = container_of(dbe, struct tables, dbe);
PyObject *t;
t = tuple_new(2);
tuple_set_u64(t, 0, evsel->db_id);
tuple_set_string(t, 1, perf_evsel__name(evsel));
call_object(tables->evsel_handler, t, "evsel_table");
Py_DECREF(t);
return 0;
}
static int python_export_machine(struct db_export *dbe,
struct machine *machine)
{
struct tables *tables = container_of(dbe, struct tables, dbe);
PyObject *t;
t = tuple_new(3);
tuple_set_u64(t, 0, machine->db_id);
tuple_set_s32(t, 1, machine->pid);
tuple_set_string(t, 2, machine->root_dir ? machine->root_dir : "");
call_object(tables->machine_handler, t, "machine_table");
Py_DECREF(t);
return 0;
}
static int python_export_thread(struct db_export *dbe, struct thread *thread,
u64 main_thread_db_id, struct machine *machine)
{
struct tables *tables = container_of(dbe, struct tables, dbe);
PyObject *t;
t = tuple_new(5);
tuple_set_u64(t, 0, thread->db_id);
tuple_set_u64(t, 1, machine->db_id);
tuple_set_u64(t, 2, main_thread_db_id);
tuple_set_s32(t, 3, thread->pid_);
tuple_set_s32(t, 4, thread->tid);
call_object(tables->thread_handler, t, "thread_table");
Py_DECREF(t);
return 0;
}
static int python_export_comm(struct db_export *dbe, struct comm *comm)
{
struct tables *tables = container_of(dbe, struct tables, dbe);
PyObject *t;
t = tuple_new(2);
tuple_set_u64(t, 0, comm->db_id);
tuple_set_string(t, 1, comm__str(comm));
call_object(tables->comm_handler, t, "comm_table");
Py_DECREF(t);
return 0;
}
static int python_export_comm_thread(struct db_export *dbe, u64 db_id,
struct comm *comm, struct thread *thread)
{
struct tables *tables = container_of(dbe, struct tables, dbe);
PyObject *t;
t = tuple_new(3);
tuple_set_u64(t, 0, db_id);
tuple_set_u64(t, 1, comm->db_id);
tuple_set_u64(t, 2, thread->db_id);
call_object(tables->comm_thread_handler, t, "comm_thread_table");
Py_DECREF(t);
return 0;
}
static int python_export_dso(struct db_export *dbe, struct dso *dso,
struct machine *machine)
{
struct tables *tables = container_of(dbe, struct tables, dbe);
char sbuild_id[BUILD_ID_SIZE * 2 + 1];
PyObject *t;
build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
t = tuple_new(5);
tuple_set_u64(t, 0, dso->db_id);
tuple_set_u64(t, 1, machine->db_id);
tuple_set_string(t, 2, dso->short_name);
tuple_set_string(t, 3, dso->long_name);
tuple_set_string(t, 4, sbuild_id);
call_object(tables->dso_handler, t, "dso_table");
Py_DECREF(t);
return 0;
}
static int python_export_symbol(struct db_export *dbe, struct symbol *sym,
struct dso *dso)
{
struct tables *tables = container_of(dbe, struct tables, dbe);
u64 *sym_db_id = symbol__priv(sym);
PyObject *t;
t = tuple_new(6);
tuple_set_u64(t, 0, *sym_db_id);
tuple_set_u64(t, 1, dso->db_id);
tuple_set_u64(t, 2, sym->start);
tuple_set_u64(t, 3, sym->end);
tuple_set_s32(t, 4, sym->binding);
tuple_set_string(t, 5, sym->name);
call_object(tables->symbol_handler, t, "symbol_table");
Py_DECREF(t);
return 0;
}
static int python_export_branch_type(struct db_export *dbe, u32 branch_type,
const char *name)
{
struct tables *tables = container_of(dbe, struct tables, dbe);
PyObject *t;
t = tuple_new(2);
tuple_set_s32(t, 0, branch_type);
tuple_set_string(t, 1, name);
call_object(tables->branch_type_handler, t, "branch_type_table");
Py_DECREF(t);
return 0;
}
static int python_export_sample(struct db_export *dbe,
struct export_sample *es)
{
struct tables *tables = container_of(dbe, struct tables, dbe);
PyObject *t;
t = tuple_new(21);
tuple_set_u64(t, 0, es->db_id);
tuple_set_u64(t, 1, es->evsel->db_id);
tuple_set_u64(t, 2, es->al->machine->db_id);
tuple_set_u64(t, 3, es->al->thread->db_id);
tuple_set_u64(t, 4, es->comm_db_id);
tuple_set_u64(t, 5, es->dso_db_id);
tuple_set_u64(t, 6, es->sym_db_id);
tuple_set_u64(t, 7, es->offset);
tuple_set_u64(t, 8, es->sample->ip);
tuple_set_u64(t, 9, es->sample->time);
tuple_set_s32(t, 10, es->sample->cpu);
tuple_set_u64(t, 11, es->addr_dso_db_id);
tuple_set_u64(t, 12, es->addr_sym_db_id);
tuple_set_u64(t, 13, es->addr_offset);
tuple_set_u64(t, 14, es->sample->addr);
tuple_set_u64(t, 15, es->sample->period);
tuple_set_u64(t, 16, es->sample->weight);
tuple_set_u64(t, 17, es->sample->transaction);
tuple_set_u64(t, 18, es->sample->data_src);
tuple_set_s32(t, 19, es->sample->flags & PERF_BRANCH_MASK);
tuple_set_s32(t, 20, !!(es->sample->flags & PERF_IP_FLAG_IN_TX));
call_object(tables->sample_handler, t, "sample_table");
Py_DECREF(t);
return 0;
}
static int python_export_call_path(struct db_export *dbe, struct call_path *cp)
{
struct tables *tables = container_of(dbe, struct tables, dbe);
PyObject *t;
u64 parent_db_id, sym_db_id;
parent_db_id = cp->parent ? cp->parent->db_id : 0;
sym_db_id = cp->sym ? *(u64 *)symbol__priv(cp->sym) : 0;
t = tuple_new(4);
tuple_set_u64(t, 0, cp->db_id);
tuple_set_u64(t, 1, parent_db_id);
tuple_set_u64(t, 2, sym_db_id);
tuple_set_u64(t, 3, cp->ip);
call_object(tables->call_path_handler, t, "call_path_table");
Py_DECREF(t);
return 0;
}
static int python_export_call_return(struct db_export *dbe,
struct call_return *cr)
{
struct tables *tables = container_of(dbe, struct tables, dbe);
u64 comm_db_id = cr->comm ? cr->comm->db_id : 0;
PyObject *t;
t = tuple_new(11);
tuple_set_u64(t, 0, cr->db_id);
tuple_set_u64(t, 1, cr->thread->db_id);
tuple_set_u64(t, 2, comm_db_id);
tuple_set_u64(t, 3, cr->cp->db_id);
tuple_set_u64(t, 4, cr->call_time);
tuple_set_u64(t, 5, cr->return_time);
tuple_set_u64(t, 6, cr->branch_count);
tuple_set_u64(t, 7, cr->call_ref);
tuple_set_u64(t, 8, cr->return_ref);
tuple_set_u64(t, 9, cr->cp->parent->db_id);
tuple_set_s32(t, 10, cr->flags);
call_object(tables->call_return_handler, t, "call_return_table");
Py_DECREF(t);
return 0;
}
static int python_process_call_return(struct call_return *cr, void *data)
{
struct db_export *dbe = data;
return db_export__call_return(dbe, cr);
}
static void python_process_general_event(struct perf_sample *sample,
struct perf_evsel *evsel,
struct addr_location *al)
{
PyObject *handler, *t, *dict, *callchain, *dict_sample;
static char handler_name[64];
unsigned n = 0;
/*
* Use the MAX_FIELDS to make the function expandable, though
* currently there is only one item for the tuple.
*/
t = PyTuple_New(MAX_FIELDS);
if (!t)
Py_FatalError("couldn't create Python tuple");
dict = PyDict_New();
if (!dict)
Py_FatalError("couldn't create Python dictionary");
dict_sample = PyDict_New();
if (!dict_sample)
Py_FatalError("couldn't create Python dictionary");
snprintf(handler_name, sizeof(handler_name), "%s", "process_event");
handler = get_handler(handler_name);
if (!handler)
goto exit;
pydict_set_item_string_decref(dict, "ev_name", PyString_FromString(perf_evsel__name(evsel)));
pydict_set_item_string_decref(dict, "attr", PyString_FromStringAndSize(
(const char *)&evsel->attr, sizeof(evsel->attr)));
pydict_set_item_string_decref(dict_sample, "pid",
PyInt_FromLong(sample->pid));
pydict_set_item_string_decref(dict_sample, "tid",
PyInt_FromLong(sample->tid));
pydict_set_item_string_decref(dict_sample, "cpu",
PyInt_FromLong(sample->cpu));
pydict_set_item_string_decref(dict_sample, "ip",
PyLong_FromUnsignedLongLong(sample->ip));
pydict_set_item_string_decref(dict_sample, "time",
PyLong_FromUnsignedLongLong(sample->time));
pydict_set_item_string_decref(dict_sample, "period",
PyLong_FromUnsignedLongLong(sample->period));
pydict_set_item_string_decref(dict, "sample", dict_sample);
pydict_set_item_string_decref(dict, "raw_buf", PyString_FromStringAndSize(
(const char *)sample->raw_data, sample->raw_size));
pydict_set_item_string_decref(dict, "comm",
PyString_FromString(thread__comm_str(al->thread)));
if (al->map) {
pydict_set_item_string_decref(dict, "dso",
PyString_FromString(al->map->dso->name));
}
if (al->sym) {
pydict_set_item_string_decref(dict, "symbol",
PyString_FromString(al->sym->name));
}
/* ip unwinding */
callchain = python_process_callchain(sample, evsel, al);
pydict_set_item_string_decref(dict, "callchain", callchain);
PyTuple_SetItem(t, n++, dict);
if (_PyTuple_Resize(&t, n) == -1)
Py_FatalError("error resizing Python tuple");
call_object(handler, t, handler_name);
exit:
Py_DECREF(dict);
Py_DECREF(t);
}
static void python_process_event(union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel,
struct addr_location *al)
{
struct tables *tables = &tables_global;
switch (evsel->attr.type) {
case PERF_TYPE_TRACEPOINT:
python_process_tracepoint(sample, evsel, al);
break;
/* Reserve for future process_hw/sw/raw APIs */
default:
if (tables->db_export_mode)
db_export__sample(&tables->dbe, event, sample, evsel, al);
else
python_process_general_event(sample, evsel, al);
}
}
static int run_start_sub(void)
{
main_module = PyImport_AddModule("__main__");
if (main_module == NULL)
return -1;
Py_INCREF(main_module);
main_dict = PyModule_GetDict(main_module);
if (main_dict == NULL)
goto error;
Py_INCREF(main_dict);
try_call_object("trace_begin", NULL);
return 0;
error:
Py_XDECREF(main_dict);
Py_XDECREF(main_module);
return -1;
}
#define SET_TABLE_HANDLER_(name, handler_name, table_name) do { \
tables->handler_name = get_handler(#table_name); \
if (tables->handler_name) \
tables->dbe.export_ ## name = python_export_ ## name; \
} while (0)
#define SET_TABLE_HANDLER(name) \
SET_TABLE_HANDLER_(name, name ## _handler, name ## _table)
static void set_table_handlers(struct tables *tables)
{
const char *perf_db_export_mode = "perf_db_export_mode";
const char *perf_db_export_calls = "perf_db_export_calls";
PyObject *db_export_mode, *db_export_calls;
bool export_calls = false;
int ret;
memset(tables, 0, sizeof(struct tables));
if (db_export__init(&tables->dbe))
Py_FatalError("failed to initialize export");
db_export_mode = PyDict_GetItemString(main_dict, perf_db_export_mode);
if (!db_export_mode)
return;
ret = PyObject_IsTrue(db_export_mode);
if (ret == -1)
handler_call_die(perf_db_export_mode);
if (!ret)
return;
tables->dbe.crp = NULL;
db_export_calls = PyDict_GetItemString(main_dict, perf_db_export_calls);
if (db_export_calls) {
ret = PyObject_IsTrue(db_export_calls);
if (ret == -1)
handler_call_die(perf_db_export_calls);
export_calls = !!ret;
}
if (export_calls) {
tables->dbe.crp =
call_return_processor__new(python_process_call_return,
&tables->dbe);
if (!tables->dbe.crp)
Py_FatalError("failed to create calls processor");
}
tables->db_export_mode = true;
/*
* Reserve per symbol space for symbol->db_id via symbol__priv()
*/
symbol_conf.priv_size = sizeof(u64);
SET_TABLE_HANDLER(evsel);
SET_TABLE_HANDLER(machine);
SET_TABLE_HANDLER(thread);
SET_TABLE_HANDLER(comm);
SET_TABLE_HANDLER(comm_thread);
SET_TABLE_HANDLER(dso);
SET_TABLE_HANDLER(symbol);
SET_TABLE_HANDLER(branch_type);
SET_TABLE_HANDLER(sample);
SET_TABLE_HANDLER(call_path);
SET_TABLE_HANDLER(call_return);
}
/*
* Start trace script
*/
static int python_start_script(const char *script, int argc, const char **argv)
{
struct tables *tables = &tables_global;
const char **command_line;
char buf[PATH_MAX];
int i, err = 0;
FILE *fp;
command_line = malloc((argc + 1) * sizeof(const char *));
command_line[0] = script;
for (i = 1; i < argc + 1; i++)
command_line[i] = argv[i - 1];
Py_Initialize();
initperf_trace_context();
PySys_SetArgv(argc + 1, (char **)command_line);
fp = fopen(script, "r");
if (!fp) {
sprintf(buf, "Can't open python script \"%s\"", script);
perror(buf);
err = -1;
goto error;
}
err = PyRun_SimpleFile(fp, script);
if (err) {
fprintf(stderr, "Error running python script %s\n", script);
goto error;
}
err = run_start_sub();
if (err) {
fprintf(stderr, "Error starting python script %s\n", script);
goto error;
}
free(command_line);
set_table_handlers(tables);
if (tables->db_export_mode) {
err = db_export__branch_types(&tables->dbe);
if (err)
goto error;
}
return err;
error:
Py_Finalize();
free(command_line);
return err;
}
static int python_flush_script(void)
{
struct tables *tables = &tables_global;
return db_export__flush(&tables->dbe);
}
/*
* Stop trace script
*/
static int python_stop_script(void)
{
struct tables *tables = &tables_global;
try_call_object("trace_end", NULL);
db_export__exit(&tables->dbe);
Py_XDECREF(main_dict);
Py_XDECREF(main_module);
Py_Finalize();
return 0;
}
static int python_generate_script(struct pevent *pevent, const char *outfile)
{
struct event_format *event = NULL;
struct format_field *f;
char fname[PATH_MAX];
int not_first, count;
FILE *ofp;
sprintf(fname, "%s.py", outfile);
ofp = fopen(fname, "w");
if (ofp == NULL) {
fprintf(stderr, "couldn't open %s\n", fname);
return -1;
}
fprintf(ofp, "# perf script event handlers, "
"generated by perf script -g python\n");
fprintf(ofp, "# Licensed under the terms of the GNU GPL"
" License version 2\n\n");
fprintf(ofp, "# The common_* event handler fields are the most useful "
"fields common to\n");
fprintf(ofp, "# all events. They don't necessarily correspond to "
"the 'common_*' fields\n");
fprintf(ofp, "# in the format files. Those fields not available as "
"handler params can\n");
fprintf(ofp, "# be retrieved using Python functions of the form "
"common_*(context).\n");
fprintf(ofp, "# See the perf-trace-python Documentation for the list "
"of available functions.\n\n");
fprintf(ofp, "import os\n");
fprintf(ofp, "import sys\n\n");
fprintf(ofp, "sys.path.append(os.environ['PERF_EXEC_PATH'] + \\\n");
fprintf(ofp, "\t'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')\n");
fprintf(ofp, "\nfrom perf_trace_context import *\n");
fprintf(ofp, "from Core import *\n\n\n");
fprintf(ofp, "def trace_begin():\n");
fprintf(ofp, "\tprint \"in trace_begin\"\n\n");
fprintf(ofp, "def trace_end():\n");
fprintf(ofp, "\tprint \"in trace_end\"\n\n");
while ((event = trace_find_next_event(pevent, event))) {
fprintf(ofp, "def %s__%s(", event->system, event->name);
fprintf(ofp, "event_name, ");
fprintf(ofp, "context, ");
fprintf(ofp, "common_cpu,\n");
fprintf(ofp, "\tcommon_secs, ");
fprintf(ofp, "common_nsecs, ");
fprintf(ofp, "common_pid, ");
fprintf(ofp, "common_comm,\n\t");
fprintf(ofp, "common_callchain, ");
not_first = 0;
count = 0;
for (f = event->format.fields; f; f = f->next) {
if (not_first++)
fprintf(ofp, ", ");
if (++count % 5 == 0)
fprintf(ofp, "\n\t");
fprintf(ofp, "%s", f->name);
}
fprintf(ofp, "):\n");
fprintf(ofp, "\t\tprint_header(event_name, common_cpu, "
"common_secs, common_nsecs,\n\t\t\t"
"common_pid, common_comm)\n\n");
fprintf(ofp, "\t\tprint \"");
not_first = 0;
count = 0;
for (f = event->format.fields; f; f = f->next) {
if (not_first++)
fprintf(ofp, ", ");
if (count && count % 3 == 0) {
fprintf(ofp, "\" \\\n\t\t\"");
}
count++;
fprintf(ofp, "%s=", f->name);
if (f->flags & FIELD_IS_STRING ||
f->flags & FIELD_IS_FLAG ||
f->flags & FIELD_IS_ARRAY ||
f->flags & FIELD_IS_SYMBOLIC)
fprintf(ofp, "%%s");
else if (f->flags & FIELD_IS_SIGNED)
fprintf(ofp, "%%d");
else
fprintf(ofp, "%%u");
}
fprintf(ofp, "\" %% \\\n\t\t(");
not_first = 0;
count = 0;
for (f = event->format.fields; f; f = f->next) {
if (not_first++)
fprintf(ofp, ", ");
if (++count % 5 == 0)
fprintf(ofp, "\n\t\t");
if (f->flags & FIELD_IS_FLAG) {
if ((count - 1) % 5 != 0) {
fprintf(ofp, "\n\t\t");
count = 4;
}
fprintf(ofp, "flag_str(\"");
fprintf(ofp, "%s__%s\", ", event->system,
event->name);
fprintf(ofp, "\"%s\", %s)", f->name,
f->name);
} else if (f->flags & FIELD_IS_SYMBOLIC) {
if ((count - 1) % 5 != 0) {
fprintf(ofp, "\n\t\t");
count = 4;
}
fprintf(ofp, "symbol_str(\"");
fprintf(ofp, "%s__%s\", ", event->system,
event->name);
fprintf(ofp, "\"%s\", %s)", f->name,
f->name);
} else
fprintf(ofp, "%s", f->name);
}
fprintf(ofp, ")\n\n");
fprintf(ofp, "\t\tfor node in common_callchain:");
fprintf(ofp, "\n\t\t\tif 'sym' in node:");
fprintf(ofp, "\n\t\t\t\tprint \"\\t[%%x] %%s\" %% (node['ip'], node['sym']['name'])");
fprintf(ofp, "\n\t\t\telse:");
fprintf(ofp, "\n\t\t\t\tprint \"\t[%%x]\" %% (node['ip'])\n\n");
fprintf(ofp, "\t\tprint \"\\n\"\n\n");
}
fprintf(ofp, "def trace_unhandled(event_name, context, "
"event_fields_dict):\n");
fprintf(ofp, "\t\tprint ' '.join(['%%s=%%s'%%(k,str(v))"
"for k,v in sorted(event_fields_dict.items())])\n\n");
fprintf(ofp, "def print_header("
"event_name, cpu, secs, nsecs, pid, comm):\n"
"\tprint \"%%-20s %%5u %%05u.%%09u %%8u %%-20s \" %% \\\n\t"
"(event_name, cpu, secs, nsecs, pid, comm),\n");
fclose(ofp);
fprintf(stderr, "generated Python script: %s\n", fname);
return 0;
}
struct scripting_ops python_scripting_ops = {
.name = "Python",
.start_script = python_start_script,
.flush_script = python_flush_script,
.stop_script = python_stop_script,
.process_event = python_process_event,
.generate_script = python_generate_script,
};
| gpl-2.0 |
Stane1983/amlogic-m3 | drivers/hwmon/max6650.c | 994 | 20901 | /*
* max6650.c - Part of lm_sensors, Linux kernel modules for hardware
* monitoring.
*
* (C) 2007 by Hans J. Koch <hjk@linutronix.de>
*
* based on code written by John Morris <john.morris@spirentcom.com>
* Copyright (c) 2003 Spirent Communications
* and Claus Gindhart <claus.gindhart@kontron.com>
*
* This module has only been tested with the MAX6650 chip. It should
* also work with the MAX6651. It does not distinguish max6650 and max6651
* chips.
*
* The datasheet was last seen at:
*
* http://pdfserv.maxim-ic.com/en/ds/MAX6650-MAX6651.pdf
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
/*
* Addresses to scan. There are four disjoint possibilities, by pin config.
*/
static const unsigned short normal_i2c[] = {0x1b, 0x1f, 0x48, 0x4b,
I2C_CLIENT_END};
/*
* Insmod parameters
*/
/* fan_voltage: 5=5V fan, 12=12V fan, 0=don't change */
static int fan_voltage;
/* prescaler: Possible values are 1, 2, 4, 8, 16 or 0 for don't change */
static int prescaler;
/* clock: The clock frequency of the chip the driver should assume */
static int clock = 254000;
module_param(fan_voltage, int, S_IRUGO);
module_param(prescaler, int, S_IRUGO);
module_param(clock, int, S_IRUGO);
/*
* MAX 6650/6651 registers
*/
#define MAX6650_REG_SPEED 0x00
#define MAX6650_REG_CONFIG 0x02
#define MAX6650_REG_GPIO_DEF 0x04
#define MAX6650_REG_DAC 0x06
#define MAX6650_REG_ALARM_EN 0x08
#define MAX6650_REG_ALARM 0x0A
#define MAX6650_REG_TACH0 0x0C
#define MAX6650_REG_TACH1 0x0E
#define MAX6650_REG_TACH2 0x10
#define MAX6650_REG_TACH3 0x12
#define MAX6650_REG_GPIO_STAT 0x14
#define MAX6650_REG_COUNT 0x16
/*
* Config register bits
*/
#define MAX6650_CFG_V12 0x08
#define MAX6650_CFG_PRESCALER_MASK 0x07
#define MAX6650_CFG_PRESCALER_2 0x01
#define MAX6650_CFG_PRESCALER_4 0x02
#define MAX6650_CFG_PRESCALER_8 0x03
#define MAX6650_CFG_PRESCALER_16 0x04
#define MAX6650_CFG_MODE_MASK 0x30
#define MAX6650_CFG_MODE_ON 0x00
#define MAX6650_CFG_MODE_OFF 0x10
#define MAX6650_CFG_MODE_CLOSED_LOOP 0x20
#define MAX6650_CFG_MODE_OPEN_LOOP 0x30
#define MAX6650_COUNT_MASK 0x03
/*
* Alarm status register bits
*/
#define MAX6650_ALRM_MAX 0x01
#define MAX6650_ALRM_MIN 0x02
#define MAX6650_ALRM_TACH 0x04
#define MAX6650_ALRM_GPIO1 0x08
#define MAX6650_ALRM_GPIO2 0x10
/* Minimum and maximum values of the FAN-RPM */
#define FAN_RPM_MIN 240
#define FAN_RPM_MAX 30000
#define DIV_FROM_REG(reg) (1 << (reg & 7))
static int max6650_probe(struct i2c_client *client,
const struct i2c_device_id *id);
static int max6650_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int max6650_init_client(struct i2c_client *client);
static int max6650_remove(struct i2c_client *client);
static struct max6650_data *max6650_update_device(struct device *dev);
/*
* Driver data (common to all clients)
*/
static const struct i2c_device_id max6650_id[] = {
{ "max6650", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, max6650_id);
static struct i2c_driver max6650_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "max6650",
},
.probe = max6650_probe,
.remove = max6650_remove,
.id_table = max6650_id,
.detect = max6650_detect,
.address_list = normal_i2c,
};
/*
* Client data (each client gets its own)
*/
struct max6650_data
{
struct device *hwmon_dev;
struct mutex update_lock;
char valid; /* zero until following fields are valid */
unsigned long last_updated; /* in jiffies */
/* register values */
u8 speed;
u8 config;
u8 tach[4];
u8 count;
u8 dac;
u8 alarm;
};
static ssize_t get_fan(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct max6650_data *data = max6650_update_device(dev);
int rpm;
/*
* Calculation details:
*
* Each tachometer counts over an interval given by the "count"
* register (0.25, 0.5, 1 or 2 seconds). This module assumes
* that the fans produce two pulses per revolution (this seems
* to be the most common).
*/
rpm = ((data->tach[attr->index] * 120) / DIV_FROM_REG(data->count));
return sprintf(buf, "%d\n", rpm);
}
/*
* Set the fan speed to the specified RPM (or read back the RPM setting).
* This works in closed loop mode only. Use pwm1 for open loop speed setting.
*
* The MAX6650/1 will automatically control fan speed when in closed loop
* mode.
*
* Assumptions:
*
* 1) The MAX6650/1 internal 254kHz clock frequency is set correctly. Use
* the clock module parameter if you need to fine tune this.
*
* 2) The prescaler (low three bits of the config register) has already
* been set to an appropriate value. Use the prescaler module parameter
* if your BIOS doesn't initialize the chip properly.
*
* The relevant equations are given on pages 21 and 22 of the datasheet.
*
* From the datasheet, the relevant equation when in regulation is:
*
* [fCLK / (128 x (KTACH + 1))] = 2 x FanSpeed / KSCALE
*
* where:
*
* fCLK is the oscillator frequency (either the 254kHz internal
* oscillator or the externally applied clock)
*
* KTACH is the value in the speed register
*
* FanSpeed is the speed of the fan in rps
*
* KSCALE is the prescaler value (1, 2, 4, 8, or 16)
*
* When reading, we need to solve for FanSpeed. When writing, we need to
* solve for KTACH.
*
* Note: this tachometer is completely separate from the tachometers
* used to measure the fan speeds. Only one fan's speed (fan1) is
* controlled.
*/
static ssize_t get_target(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct max6650_data *data = max6650_update_device(dev);
int kscale, ktach, rpm;
/*
* Use the datasheet equation:
*
* FanSpeed = KSCALE x fCLK / [256 x (KTACH + 1)]
*
* then multiply by 60 to give rpm.
*/
kscale = DIV_FROM_REG(data->config);
ktach = data->speed;
rpm = 60 * kscale * clock / (256 * (ktach + 1));
return sprintf(buf, "%d\n", rpm);
}
static ssize_t set_target(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct max6650_data *data = i2c_get_clientdata(client);
int rpm = simple_strtoul(buf, NULL, 10);
int kscale, ktach;
rpm = SENSORS_LIMIT(rpm, FAN_RPM_MIN, FAN_RPM_MAX);
/*
* Divide the required speed by 60 to get from rpm to rps, then
* use the datasheet equation:
*
* KTACH = [(fCLK x KSCALE) / (256 x FanSpeed)] - 1
*/
mutex_lock(&data->update_lock);
kscale = DIV_FROM_REG(data->config);
ktach = ((clock * kscale) / (256 * rpm / 60)) - 1;
if (ktach < 0)
ktach = 0;
if (ktach > 255)
ktach = 255;
data->speed = ktach;
i2c_smbus_write_byte_data(client, MAX6650_REG_SPEED, data->speed);
mutex_unlock(&data->update_lock);
return count;
}
/*
* Get/set the fan speed in open loop mode using pwm1 sysfs file.
* Speed is given as a relative value from 0 to 255, where 255 is maximum
* speed. Note that this is done by writing directly to the chip's DAC,
* it won't change the closed loop speed set by fan1_target.
* Also note that due to rounding errors it is possible that you don't read
* back exactly the value you have set.
*/
static ssize_t get_pwm(struct device *dev, struct device_attribute *devattr,
char *buf)
{
int pwm;
struct max6650_data *data = max6650_update_device(dev);
/* Useful range for dac is 0-180 for 12V fans and 0-76 for 5V fans.
Lower DAC values mean higher speeds. */
if (data->config & MAX6650_CFG_V12)
pwm = 255 - (255 * (int)data->dac)/180;
else
pwm = 255 - (255 * (int)data->dac)/76;
if (pwm < 0)
pwm = 0;
return sprintf(buf, "%d\n", pwm);
}
static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct max6650_data *data = i2c_get_clientdata(client);
int pwm = simple_strtoul(buf, NULL, 10);
pwm = SENSORS_LIMIT(pwm, 0, 255);
mutex_lock(&data->update_lock);
if (data->config & MAX6650_CFG_V12)
data->dac = 180 - (180 * pwm)/255;
else
data->dac = 76 - (76 * pwm)/255;
i2c_smbus_write_byte_data(client, MAX6650_REG_DAC, data->dac);
mutex_unlock(&data->update_lock);
return count;
}
/*
* Get/Set controller mode:
* Possible values:
* 0 = Fan always on
* 1 = Open loop, Voltage is set according to speed, not regulated.
* 2 = Closed loop, RPM for all fans regulated by fan1 tachometer
*/
static ssize_t get_enable(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct max6650_data *data = max6650_update_device(dev);
int mode = (data->config & MAX6650_CFG_MODE_MASK) >> 4;
int sysfs_modes[4] = {0, 1, 2, 1};
return sprintf(buf, "%d\n", sysfs_modes[mode]);
}
static ssize_t set_enable(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct max6650_data *data = i2c_get_clientdata(client);
int mode = simple_strtoul(buf, NULL, 10);
int max6650_modes[3] = {0, 3, 2};
if ((mode < 0)||(mode > 2)) {
dev_err(&client->dev,
"illegal value for pwm1_enable (%d)\n", mode);
return -EINVAL;
}
mutex_lock(&data->update_lock);
data->config = i2c_smbus_read_byte_data(client, MAX6650_REG_CONFIG);
data->config = (data->config & ~MAX6650_CFG_MODE_MASK)
| (max6650_modes[mode] << 4);
i2c_smbus_write_byte_data(client, MAX6650_REG_CONFIG, data->config);
mutex_unlock(&data->update_lock);
return count;
}
/*
* Read/write functions for fan1_div sysfs file. The MAX6650 has no such
* divider. We handle this by converting between divider and counttime:
*
* (counttime == k) <==> (divider == 2^k), k = 0, 1, 2, or 3
*
* Lower values of k allow to connect a faster fan without the risk of
* counter overflow. The price is lower resolution. You can also set counttime
* using the module parameter. Note that the module parameter "prescaler" also
* influences the behaviour. Unfortunately, there's no sysfs attribute
* defined for that. See the data sheet for details.
*/
static ssize_t get_div(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct max6650_data *data = max6650_update_device(dev);
return sprintf(buf, "%d\n", DIV_FROM_REG(data->count));
}
static ssize_t set_div(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct max6650_data *data = i2c_get_clientdata(client);
int div = simple_strtoul(buf, NULL, 10);
mutex_lock(&data->update_lock);
switch (div) {
case 1:
data->count = 0;
break;
case 2:
data->count = 1;
break;
case 4:
data->count = 2;
break;
case 8:
data->count = 3;
break;
default:
mutex_unlock(&data->update_lock);
dev_err(&client->dev,
"illegal value for fan divider (%d)\n", div);
return -EINVAL;
}
i2c_smbus_write_byte_data(client, MAX6650_REG_COUNT, data->count);
mutex_unlock(&data->update_lock);
return count;
}
/*
* Get alarm stati:
* Possible values:
* 0 = no alarm
* 1 = alarm
*/
static ssize_t get_alarm(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct max6650_data *data = max6650_update_device(dev);
struct i2c_client *client = to_i2c_client(dev);
int alarm = 0;
if (data->alarm & attr->index) {
mutex_lock(&data->update_lock);
alarm = 1;
data->alarm &= ~attr->index;
data->alarm |= i2c_smbus_read_byte_data(client,
MAX6650_REG_ALARM);
mutex_unlock(&data->update_lock);
}
return sprintf(buf, "%d\n", alarm);
}
static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, get_fan, NULL, 0);
static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, get_fan, NULL, 1);
static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, get_fan, NULL, 2);
static SENSOR_DEVICE_ATTR(fan4_input, S_IRUGO, get_fan, NULL, 3);
static DEVICE_ATTR(fan1_target, S_IWUSR | S_IRUGO, get_target, set_target);
static DEVICE_ATTR(fan1_div, S_IWUSR | S_IRUGO, get_div, set_div);
static DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, get_enable, set_enable);
static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, get_pwm, set_pwm);
static SENSOR_DEVICE_ATTR(fan1_max_alarm, S_IRUGO, get_alarm, NULL,
MAX6650_ALRM_MAX);
static SENSOR_DEVICE_ATTR(fan1_min_alarm, S_IRUGO, get_alarm, NULL,
MAX6650_ALRM_MIN);
static SENSOR_DEVICE_ATTR(fan1_fault, S_IRUGO, get_alarm, NULL,
MAX6650_ALRM_TACH);
static SENSOR_DEVICE_ATTR(gpio1_alarm, S_IRUGO, get_alarm, NULL,
MAX6650_ALRM_GPIO1);
static SENSOR_DEVICE_ATTR(gpio2_alarm, S_IRUGO, get_alarm, NULL,
MAX6650_ALRM_GPIO2);
static mode_t max6650_attrs_visible(struct kobject *kobj, struct attribute *a,
int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct i2c_client *client = to_i2c_client(dev);
u8 alarm_en = i2c_smbus_read_byte_data(client, MAX6650_REG_ALARM_EN);
struct device_attribute *devattr;
/*
* Hide the alarms that have not been enabled by the firmware
*/
devattr = container_of(a, struct device_attribute, attr);
if (devattr == &sensor_dev_attr_fan1_max_alarm.dev_attr
|| devattr == &sensor_dev_attr_fan1_min_alarm.dev_attr
|| devattr == &sensor_dev_attr_fan1_fault.dev_attr
|| devattr == &sensor_dev_attr_gpio1_alarm.dev_attr
|| devattr == &sensor_dev_attr_gpio2_alarm.dev_attr) {
if (!(alarm_en & to_sensor_dev_attr(devattr)->index))
return 0;
}
return a->mode;
}
static struct attribute *max6650_attrs[] = {
&sensor_dev_attr_fan1_input.dev_attr.attr,
&sensor_dev_attr_fan2_input.dev_attr.attr,
&sensor_dev_attr_fan3_input.dev_attr.attr,
&sensor_dev_attr_fan4_input.dev_attr.attr,
&dev_attr_fan1_target.attr,
&dev_attr_fan1_div.attr,
&dev_attr_pwm1_enable.attr,
&dev_attr_pwm1.attr,
&sensor_dev_attr_fan1_max_alarm.dev_attr.attr,
&sensor_dev_attr_fan1_min_alarm.dev_attr.attr,
&sensor_dev_attr_fan1_fault.dev_attr.attr,
&sensor_dev_attr_gpio1_alarm.dev_attr.attr,
&sensor_dev_attr_gpio2_alarm.dev_attr.attr,
NULL
};
static struct attribute_group max6650_attr_grp = {
.attrs = max6650_attrs,
.is_visible = max6650_attrs_visible,
};
/*
* Real code
*/
/* Return 0 if detection is successful, -ENODEV otherwise */
static int max6650_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
int address = client->addr;
dev_dbg(&adapter->dev, "max6650_detect called\n");
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
dev_dbg(&adapter->dev, "max6650: I2C bus doesn't support "
"byte read mode, skipping.\n");
return -ENODEV;
}
if (((i2c_smbus_read_byte_data(client, MAX6650_REG_CONFIG) & 0xC0)
||(i2c_smbus_read_byte_data(client, MAX6650_REG_GPIO_STAT) & 0xE0)
||(i2c_smbus_read_byte_data(client, MAX6650_REG_ALARM_EN) & 0xE0)
||(i2c_smbus_read_byte_data(client, MAX6650_REG_ALARM) & 0xE0)
||(i2c_smbus_read_byte_data(client, MAX6650_REG_COUNT) & 0xFC))) {
dev_dbg(&adapter->dev,
"max6650: detection failed at 0x%02x.\n", address);
return -ENODEV;
}
dev_info(&adapter->dev, "max6650: chip found at 0x%02x.\n", address);
strlcpy(info->type, "max6650", I2C_NAME_SIZE);
return 0;
}
static int max6650_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct max6650_data *data;
int err;
if (!(data = kzalloc(sizeof(struct max6650_data), GFP_KERNEL))) {
dev_err(&client->dev, "out of memory.\n");
return -ENOMEM;
}
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
/*
* Initialize the max6650 chip
*/
err = max6650_init_client(client);
if (err)
goto err_free;
err = sysfs_create_group(&client->dev.kobj, &max6650_attr_grp);
if (err)
goto err_free;
data->hwmon_dev = hwmon_device_register(&client->dev);
if (!IS_ERR(data->hwmon_dev))
return 0;
err = PTR_ERR(data->hwmon_dev);
dev_err(&client->dev, "error registering hwmon device.\n");
sysfs_remove_group(&client->dev.kobj, &max6650_attr_grp);
err_free:
kfree(data);
return err;
}
static int max6650_remove(struct i2c_client *client)
{
struct max6650_data *data = i2c_get_clientdata(client);
sysfs_remove_group(&client->dev.kobj, &max6650_attr_grp);
hwmon_device_unregister(data->hwmon_dev);
kfree(data);
return 0;
}
static int max6650_init_client(struct i2c_client *client)
{
struct max6650_data *data = i2c_get_clientdata(client);
int config;
int err = -EIO;
config = i2c_smbus_read_byte_data(client, MAX6650_REG_CONFIG);
if (config < 0) {
dev_err(&client->dev, "Error reading config, aborting.\n");
return err;
}
switch (fan_voltage) {
case 0:
break;
case 5:
config &= ~MAX6650_CFG_V12;
break;
case 12:
config |= MAX6650_CFG_V12;
break;
default:
dev_err(&client->dev,
"illegal value for fan_voltage (%d)\n",
fan_voltage);
}
dev_info(&client->dev, "Fan voltage is set to %dV.\n",
(config & MAX6650_CFG_V12) ? 12 : 5);
switch (prescaler) {
case 0:
break;
case 1:
config &= ~MAX6650_CFG_PRESCALER_MASK;
break;
case 2:
config = (config & ~MAX6650_CFG_PRESCALER_MASK)
| MAX6650_CFG_PRESCALER_2;
break;
case 4:
config = (config & ~MAX6650_CFG_PRESCALER_MASK)
| MAX6650_CFG_PRESCALER_4;
break;
case 8:
config = (config & ~MAX6650_CFG_PRESCALER_MASK)
| MAX6650_CFG_PRESCALER_8;
break;
case 16:
config = (config & ~MAX6650_CFG_PRESCALER_MASK)
| MAX6650_CFG_PRESCALER_16;
break;
default:
dev_err(&client->dev,
"illegal value for prescaler (%d)\n",
prescaler);
}
dev_info(&client->dev, "Prescaler is set to %d.\n",
1 << (config & MAX6650_CFG_PRESCALER_MASK));
/* If mode is set to "full off", we change it to "open loop" and
* set DAC to 255, which has the same effect. We do this because
* there's no "full off" mode defined in hwmon specifcations.
*/
if ((config & MAX6650_CFG_MODE_MASK) == MAX6650_CFG_MODE_OFF) {
dev_dbg(&client->dev, "Change mode to open loop, full off.\n");
config = (config & ~MAX6650_CFG_MODE_MASK)
| MAX6650_CFG_MODE_OPEN_LOOP;
if (i2c_smbus_write_byte_data(client, MAX6650_REG_DAC, 255)) {
dev_err(&client->dev, "DAC write error, aborting.\n");
return err;
}
}
if (i2c_smbus_write_byte_data(client, MAX6650_REG_CONFIG, config)) {
dev_err(&client->dev, "Config write error, aborting.\n");
return err;
}
data->config = config;
data->count = i2c_smbus_read_byte_data(client, MAX6650_REG_COUNT);
return 0;
}
static const u8 tach_reg[] = {
MAX6650_REG_TACH0,
MAX6650_REG_TACH1,
MAX6650_REG_TACH2,
MAX6650_REG_TACH3,
};
static struct max6650_data *max6650_update_device(struct device *dev)
{
int i;
struct i2c_client *client = to_i2c_client(dev);
struct max6650_data *data = i2c_get_clientdata(client);
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
data->speed = i2c_smbus_read_byte_data(client,
MAX6650_REG_SPEED);
data->config = i2c_smbus_read_byte_data(client,
MAX6650_REG_CONFIG);
for (i = 0; i < 4; i++) {
data->tach[i] = i2c_smbus_read_byte_data(client,
tach_reg[i]);
}
data->count = i2c_smbus_read_byte_data(client,
MAX6650_REG_COUNT);
data->dac = i2c_smbus_read_byte_data(client, MAX6650_REG_DAC);
/* Alarms are cleared on read in case the condition that
* caused the alarm is removed. Keep the value latched here
* for providing the register through different alarm files. */
data->alarm |= i2c_smbus_read_byte_data(client,
MAX6650_REG_ALARM);
data->last_updated = jiffies;
data->valid = 1;
}
mutex_unlock(&data->update_lock);
return data;
}
static int __init sensors_max6650_init(void)
{
return i2c_add_driver(&max6650_driver);
}
static void __exit sensors_max6650_exit(void)
{
i2c_del_driver(&max6650_driver);
}
MODULE_AUTHOR("Hans J. Koch");
MODULE_DESCRIPTION("MAX6650 sensor driver");
MODULE_LICENSE("GPL");
module_init(sensors_max6650_init);
module_exit(sensors_max6650_exit);
| gpl-2.0 |
joaquimorg/android_kernel_huawei_u8510 | drivers/hwmon/ltc4215.c | 1506 | 8530 | /*
* Driver for Linear Technology LTC4215 I2C Hot Swap Controller
*
* Copyright (C) 2009 Ira W. Snyder <iws@ovro.caltech.edu>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* Datasheet:
* http://www.linear.com/pc/downloadDocument.do?navId=H0,C1,C1003,C1006,C1163,P17572,D12697
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
/* Here are names of the chip's registers (a.k.a. commands) */
enum ltc4215_cmd {
LTC4215_CONTROL = 0x00, /* rw */
LTC4215_ALERT = 0x01, /* rw */
LTC4215_STATUS = 0x02, /* ro */
LTC4215_FAULT = 0x03, /* rw */
LTC4215_SENSE = 0x04, /* rw */
LTC4215_SOURCE = 0x05, /* rw */
LTC4215_ADIN = 0x06, /* rw */
};
struct ltc4215_data {
struct device *hwmon_dev;
struct mutex update_lock;
bool valid;
unsigned long last_updated; /* in jiffies */
/* Registers */
u8 regs[7];
};
static struct ltc4215_data *ltc4215_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct ltc4215_data *data = i2c_get_clientdata(client);
s32 val;
int i;
mutex_lock(&data->update_lock);
/* The chip's A/D updates 10 times per second */
if (time_after(jiffies, data->last_updated + HZ / 10) || !data->valid) {
dev_dbg(&client->dev, "Starting ltc4215 update\n");
/* Read all registers */
for (i = 0; i < ARRAY_SIZE(data->regs); i++) {
val = i2c_smbus_read_byte_data(client, i);
if (unlikely(val < 0))
data->regs[i] = 0;
else
data->regs[i] = val;
}
data->last_updated = jiffies;
data->valid = 1;
}
mutex_unlock(&data->update_lock);
return data;
}
/* Return the voltage from the given register in millivolts */
static int ltc4215_get_voltage(struct device *dev, u8 reg)
{
struct ltc4215_data *data = ltc4215_update_device(dev);
const u8 regval = data->regs[reg];
u32 voltage = 0;
switch (reg) {
case LTC4215_SENSE:
/* 151 uV per increment */
voltage = regval * 151 / 1000;
break;
case LTC4215_SOURCE:
/* 60.5 mV per increment */
voltage = regval * 605 / 10;
break;
case LTC4215_ADIN:
/* The ADIN input is divided by 12.5, and has 4.82 mV
* per increment, so we have the additional multiply */
voltage = regval * 482 * 125 / 1000;
break;
default:
/* If we get here, the developer messed up */
WARN_ON_ONCE(1);
break;
}
return voltage;
}
/* Return the current from the sense resistor in mA */
static unsigned int ltc4215_get_current(struct device *dev)
{
struct ltc4215_data *data = ltc4215_update_device(dev);
/* The strange looking conversions that follow are fixed-point
* math, since we cannot do floating point in the kernel.
*
* Step 1: convert sense register to microVolts
* Step 2: convert voltage to milliAmperes
*
* If you play around with the V=IR equation, you come up with
* the following: X uV / Y mOhm == Z mA
*
* With the resistors that are fractions of a milliOhm, we multiply
* the voltage and resistance by 10, to shift the decimal point.
* Now we can use the normal division operator again.
*/
/* Calculate voltage in microVolts (151 uV per increment) */
const unsigned int voltage = data->regs[LTC4215_SENSE] * 151;
/* Calculate current in milliAmperes (4 milliOhm sense resistor) */
const unsigned int curr = voltage / 4;
return curr;
}
static ssize_t ltc4215_show_voltage(struct device *dev,
struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
const int voltage = ltc4215_get_voltage(dev, attr->index);
return snprintf(buf, PAGE_SIZE, "%d\n", voltage);
}
static ssize_t ltc4215_show_current(struct device *dev,
struct device_attribute *da,
char *buf)
{
const unsigned int curr = ltc4215_get_current(dev);
return snprintf(buf, PAGE_SIZE, "%u\n", curr);
}
static ssize_t ltc4215_show_power(struct device *dev,
struct device_attribute *da,
char *buf)
{
const unsigned int curr = ltc4215_get_current(dev);
const int output_voltage = ltc4215_get_voltage(dev, LTC4215_ADIN);
/* current in mA * voltage in mV == power in uW */
const unsigned int power = abs(output_voltage * curr);
return snprintf(buf, PAGE_SIZE, "%u\n", power);
}
static ssize_t ltc4215_show_alarm(struct device *dev,
struct device_attribute *da,
char *buf)
{
struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(da);
struct ltc4215_data *data = ltc4215_update_device(dev);
const u8 reg = data->regs[attr->index];
const u32 mask = attr->nr;
return snprintf(buf, PAGE_SIZE, "%u\n", (reg & mask) ? 1 : 0);
}
/* These macros are used below in constructing device attribute objects
* for use with sysfs_create_group() to make a sysfs device file
* for each register.
*/
#define LTC4215_VOLTAGE(name, ltc4215_cmd_idx) \
static SENSOR_DEVICE_ATTR(name, S_IRUGO, \
ltc4215_show_voltage, NULL, ltc4215_cmd_idx)
#define LTC4215_CURRENT(name) \
static SENSOR_DEVICE_ATTR(name, S_IRUGO, \
ltc4215_show_current, NULL, 0);
#define LTC4215_POWER(name) \
static SENSOR_DEVICE_ATTR(name, S_IRUGO, \
ltc4215_show_power, NULL, 0);
#define LTC4215_ALARM(name, mask, reg) \
static SENSOR_DEVICE_ATTR_2(name, S_IRUGO, \
ltc4215_show_alarm, NULL, (mask), reg)
/* Construct a sensor_device_attribute structure for each register */
/* Current */
LTC4215_CURRENT(curr1_input);
LTC4215_ALARM(curr1_max_alarm, (1 << 2), LTC4215_STATUS);
/* Power (virtual) */
LTC4215_POWER(power1_input);
LTC4215_ALARM(power1_alarm, (1 << 3), LTC4215_STATUS);
/* Input Voltage */
LTC4215_VOLTAGE(in1_input, LTC4215_ADIN);
LTC4215_ALARM(in1_max_alarm, (1 << 0), LTC4215_STATUS);
LTC4215_ALARM(in1_min_alarm, (1 << 1), LTC4215_STATUS);
/* Output Voltage */
LTC4215_VOLTAGE(in2_input, LTC4215_SOURCE);
/* Finally, construct an array of pointers to members of the above objects,
* as required for sysfs_create_group()
*/
static struct attribute *ltc4215_attributes[] = {
&sensor_dev_attr_curr1_input.dev_attr.attr,
&sensor_dev_attr_curr1_max_alarm.dev_attr.attr,
&sensor_dev_attr_power1_input.dev_attr.attr,
&sensor_dev_attr_power1_alarm.dev_attr.attr,
&sensor_dev_attr_in1_input.dev_attr.attr,
&sensor_dev_attr_in1_max_alarm.dev_attr.attr,
&sensor_dev_attr_in1_min_alarm.dev_attr.attr,
&sensor_dev_attr_in2_input.dev_attr.attr,
NULL,
};
static const struct attribute_group ltc4215_group = {
.attrs = ltc4215_attributes,
};
static int ltc4215_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct i2c_adapter *adapter = client->adapter;
struct ltc4215_data *data;
int ret;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data) {
ret = -ENOMEM;
goto out_kzalloc;
}
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
/* Initialize the LTC4215 chip */
i2c_smbus_write_byte_data(client, LTC4215_FAULT, 0x00);
/* Register sysfs hooks */
ret = sysfs_create_group(&client->dev.kobj, <c4215_group);
if (ret)
goto out_sysfs_create_group;
data->hwmon_dev = hwmon_device_register(&client->dev);
if (IS_ERR(data->hwmon_dev)) {
ret = PTR_ERR(data->hwmon_dev);
goto out_hwmon_device_register;
}
return 0;
out_hwmon_device_register:
sysfs_remove_group(&client->dev.kobj, <c4215_group);
out_sysfs_create_group:
kfree(data);
out_kzalloc:
return ret;
}
static int ltc4215_remove(struct i2c_client *client)
{
struct ltc4215_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, <c4215_group);
kfree(data);
return 0;
}
static const struct i2c_device_id ltc4215_id[] = {
{ "ltc4215", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ltc4215_id);
/* This is the driver that will be inserted */
static struct i2c_driver ltc4215_driver = {
.driver = {
.name = "ltc4215",
},
.probe = ltc4215_probe,
.remove = ltc4215_remove,
.id_table = ltc4215_id,
};
static int __init ltc4215_init(void)
{
return i2c_add_driver(<c4215_driver);
}
static void __exit ltc4215_exit(void)
{
i2c_del_driver(<c4215_driver);
}
MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
MODULE_DESCRIPTION("LTC4215 driver");
MODULE_LICENSE("GPL");
module_init(ltc4215_init);
module_exit(ltc4215_exit);
| gpl-2.0 |
HandyMenny/android_kernel_samsung_msm | drivers/net/ixgbe/ixgbe_common.c | 1506 | 90561 | /*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
Copyright(c) 1999 - 2011 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/netdevice.h>
#include "ixgbe.h"
#include "ixgbe_common.h"
#include "ixgbe_phy.h"
static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
u16 count);
static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw);
static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw);
static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw);
static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
u16 offset);
/**
* ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
* @hw: pointer to hardware structure
*
* Starts the hardware by filling the bus info structure and media type, clears
* all on chip counters, initializes receive address registers, multicast
* table, VLAN filter table, calls routine to set up link and flow control
* settings, and leaves transmit and receive units disabled and uninitialized
**/
s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
{
u32 ctrl_ext;
/* Set the media type */
hw->phy.media_type = hw->mac.ops.get_media_type(hw);
/* Identify the PHY */
hw->phy.ops.identify(hw);
/* Clear the VLAN filter table */
hw->mac.ops.clear_vfta(hw);
/* Clear statistics registers */
hw->mac.ops.clear_hw_cntrs(hw);
/* Set No Snoop Disable */
ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
IXGBE_WRITE_FLUSH(hw);
/* Setup flow control */
ixgbe_setup_fc(hw, 0);
/* Clear adapter stopped flag */
hw->adapter_stopped = false;
return 0;
}
/**
* ixgbe_start_hw_gen2 - Init sequence for common device family
* @hw: pointer to hw structure
*
* Performs the init sequence common to the second generation
* of 10 GbE devices.
* Devices in the second generation:
* 82599
* X540
**/
s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
{
u32 i;
u32 regval;
/* Clear the rate limiters */
for (i = 0; i < hw->mac.max_tx_queues; i++) {
IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
}
IXGBE_WRITE_FLUSH(hw);
/* Disable relaxed ordering */
for (i = 0; i < hw->mac.max_tx_queues; i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
}
for (i = 0; i < hw->mac.max_rx_queues; i++) {
regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
}
return 0;
}
/**
* ixgbe_init_hw_generic - Generic hardware initialization
* @hw: pointer to hardware structure
*
* Initialize the hardware by resetting the hardware, filling the bus info
* structure and media type, clears all on chip counters, initializes receive
* address registers, multicast table, VLAN filter table, calls routine to set
* up link and flow control settings, and leaves transmit and receive units
* disabled and uninitialized
**/
s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
{
s32 status;
/* Reset the hardware */
status = hw->mac.ops.reset_hw(hw);
if (status == 0) {
/* Start the HW */
status = hw->mac.ops.start_hw(hw);
}
return status;
}
/**
* ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
* @hw: pointer to hardware structure
*
* Clears all hardware statistics counters by reading them from the hardware
* Statistics counters are clear on read.
**/
s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
{
u16 i = 0;
IXGBE_READ_REG(hw, IXGBE_CRCERRS);
IXGBE_READ_REG(hw, IXGBE_ILLERRC);
IXGBE_READ_REG(hw, IXGBE_ERRBC);
IXGBE_READ_REG(hw, IXGBE_MSPDC);
for (i = 0; i < 8; i++)
IXGBE_READ_REG(hw, IXGBE_MPC(i));
IXGBE_READ_REG(hw, IXGBE_MLFC);
IXGBE_READ_REG(hw, IXGBE_MRFC);
IXGBE_READ_REG(hw, IXGBE_RLEC);
IXGBE_READ_REG(hw, IXGBE_LXONTXC);
IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
if (hw->mac.type >= ixgbe_mac_82599EB) {
IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
} else {
IXGBE_READ_REG(hw, IXGBE_LXONRXC);
IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
}
for (i = 0; i < 8; i++) {
IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
if (hw->mac.type >= ixgbe_mac_82599EB) {
IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
} else {
IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
}
}
if (hw->mac.type >= ixgbe_mac_82599EB)
for (i = 0; i < 8; i++)
IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
IXGBE_READ_REG(hw, IXGBE_PRC64);
IXGBE_READ_REG(hw, IXGBE_PRC127);
IXGBE_READ_REG(hw, IXGBE_PRC255);
IXGBE_READ_REG(hw, IXGBE_PRC511);
IXGBE_READ_REG(hw, IXGBE_PRC1023);
IXGBE_READ_REG(hw, IXGBE_PRC1522);
IXGBE_READ_REG(hw, IXGBE_GPRC);
IXGBE_READ_REG(hw, IXGBE_BPRC);
IXGBE_READ_REG(hw, IXGBE_MPRC);
IXGBE_READ_REG(hw, IXGBE_GPTC);
IXGBE_READ_REG(hw, IXGBE_GORCL);
IXGBE_READ_REG(hw, IXGBE_GORCH);
IXGBE_READ_REG(hw, IXGBE_GOTCL);
IXGBE_READ_REG(hw, IXGBE_GOTCH);
for (i = 0; i < 8; i++)
IXGBE_READ_REG(hw, IXGBE_RNBC(i));
IXGBE_READ_REG(hw, IXGBE_RUC);
IXGBE_READ_REG(hw, IXGBE_RFC);
IXGBE_READ_REG(hw, IXGBE_ROC);
IXGBE_READ_REG(hw, IXGBE_RJC);
IXGBE_READ_REG(hw, IXGBE_MNGPRC);
IXGBE_READ_REG(hw, IXGBE_MNGPDC);
IXGBE_READ_REG(hw, IXGBE_MNGPTC);
IXGBE_READ_REG(hw, IXGBE_TORL);
IXGBE_READ_REG(hw, IXGBE_TORH);
IXGBE_READ_REG(hw, IXGBE_TPR);
IXGBE_READ_REG(hw, IXGBE_TPT);
IXGBE_READ_REG(hw, IXGBE_PTC64);
IXGBE_READ_REG(hw, IXGBE_PTC127);
IXGBE_READ_REG(hw, IXGBE_PTC255);
IXGBE_READ_REG(hw, IXGBE_PTC511);
IXGBE_READ_REG(hw, IXGBE_PTC1023);
IXGBE_READ_REG(hw, IXGBE_PTC1522);
IXGBE_READ_REG(hw, IXGBE_MPTC);
IXGBE_READ_REG(hw, IXGBE_BPTC);
for (i = 0; i < 16; i++) {
IXGBE_READ_REG(hw, IXGBE_QPRC(i));
IXGBE_READ_REG(hw, IXGBE_QPTC(i));
if (hw->mac.type >= ixgbe_mac_82599EB) {
IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
} else {
IXGBE_READ_REG(hw, IXGBE_QBRC(i));
IXGBE_READ_REG(hw, IXGBE_QBTC(i));
}
}
if (hw->mac.type == ixgbe_mac_X540) {
if (hw->phy.id == 0)
hw->phy.ops.identify(hw);
hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECL, &i);
hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECH, &i);
hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECL, &i);
hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECH, &i);
}
return 0;
}
/**
* ixgbe_read_pba_string_generic - Reads part number string from EEPROM
* @hw: pointer to hardware structure
* @pba_num: stores the part number string from the EEPROM
* @pba_num_size: part number string buffer length
*
* Reads the part number string from the EEPROM.
**/
s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
u32 pba_num_size)
{
s32 ret_val;
u16 data;
u16 pba_ptr;
u16 offset;
u16 length;
if (pba_num == NULL) {
hw_dbg(hw, "PBA string buffer was null\n");
return IXGBE_ERR_INVALID_ARGUMENT;
}
ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
if (ret_val) {
hw_dbg(hw, "NVM Read Error\n");
return ret_val;
}
ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
if (ret_val) {
hw_dbg(hw, "NVM Read Error\n");
return ret_val;
}
/*
* if data is not ptr guard the PBA must be in legacy format which
* means pba_ptr is actually our second data word for the PBA number
* and we can decode it into an ascii string
*/
if (data != IXGBE_PBANUM_PTR_GUARD) {
hw_dbg(hw, "NVM PBA number is not stored as string\n");
/* we will need 11 characters to store the PBA */
if (pba_num_size < 11) {
hw_dbg(hw, "PBA string buffer too small\n");
return IXGBE_ERR_NO_SPACE;
}
/* extract hex string from data and pba_ptr */
pba_num[0] = (data >> 12) & 0xF;
pba_num[1] = (data >> 8) & 0xF;
pba_num[2] = (data >> 4) & 0xF;
pba_num[3] = data & 0xF;
pba_num[4] = (pba_ptr >> 12) & 0xF;
pba_num[5] = (pba_ptr >> 8) & 0xF;
pba_num[6] = '-';
pba_num[7] = 0;
pba_num[8] = (pba_ptr >> 4) & 0xF;
pba_num[9] = pba_ptr & 0xF;
/* put a null character on the end of our string */
pba_num[10] = '\0';
/* switch all the data but the '-' to hex char */
for (offset = 0; offset < 10; offset++) {
if (pba_num[offset] < 0xA)
pba_num[offset] += '0';
else if (pba_num[offset] < 0x10)
pba_num[offset] += 'A' - 0xA;
}
return 0;
}
ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
if (ret_val) {
hw_dbg(hw, "NVM Read Error\n");
return ret_val;
}
if (length == 0xFFFF || length == 0) {
hw_dbg(hw, "NVM PBA number section invalid length\n");
return IXGBE_ERR_PBA_SECTION;
}
/* check if pba_num buffer is big enough */
if (pba_num_size < (((u32)length * 2) - 1)) {
hw_dbg(hw, "PBA string buffer too small\n");
return IXGBE_ERR_NO_SPACE;
}
/* trim pba length from start of string */
pba_ptr++;
length--;
for (offset = 0; offset < length; offset++) {
ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
if (ret_val) {
hw_dbg(hw, "NVM Read Error\n");
return ret_val;
}
pba_num[offset * 2] = (u8)(data >> 8);
pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
}
pba_num[offset * 2] = '\0';
return 0;
}
/**
* ixgbe_get_mac_addr_generic - Generic get MAC address
* @hw: pointer to hardware structure
* @mac_addr: Adapter MAC address
*
* Reads the adapter's MAC address from first Receive Address Register (RAR0)
* A reset of the adapter must be performed prior to calling this function
* in order for the MAC address to have been loaded from the EEPROM into RAR0
**/
s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
{
u32 rar_high;
u32 rar_low;
u16 i;
rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
for (i = 0; i < 4; i++)
mac_addr[i] = (u8)(rar_low >> (i*8));
for (i = 0; i < 2; i++)
mac_addr[i+4] = (u8)(rar_high >> (i*8));
return 0;
}
/**
* ixgbe_get_bus_info_generic - Generic set PCI bus info
* @hw: pointer to hardware structure
*
* Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
**/
s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
{
struct ixgbe_adapter *adapter = hw->back;
struct ixgbe_mac_info *mac = &hw->mac;
u16 link_status;
hw->bus.type = ixgbe_bus_type_pci_express;
/* Get the negotiated link width and speed from PCI config space */
pci_read_config_word(adapter->pdev, IXGBE_PCI_LINK_STATUS,
&link_status);
switch (link_status & IXGBE_PCI_LINK_WIDTH) {
case IXGBE_PCI_LINK_WIDTH_1:
hw->bus.width = ixgbe_bus_width_pcie_x1;
break;
case IXGBE_PCI_LINK_WIDTH_2:
hw->bus.width = ixgbe_bus_width_pcie_x2;
break;
case IXGBE_PCI_LINK_WIDTH_4:
hw->bus.width = ixgbe_bus_width_pcie_x4;
break;
case IXGBE_PCI_LINK_WIDTH_8:
hw->bus.width = ixgbe_bus_width_pcie_x8;
break;
default:
hw->bus.width = ixgbe_bus_width_unknown;
break;
}
switch (link_status & IXGBE_PCI_LINK_SPEED) {
case IXGBE_PCI_LINK_SPEED_2500:
hw->bus.speed = ixgbe_bus_speed_2500;
break;
case IXGBE_PCI_LINK_SPEED_5000:
hw->bus.speed = ixgbe_bus_speed_5000;
break;
default:
hw->bus.speed = ixgbe_bus_speed_unknown;
break;
}
mac->ops.set_lan_id(hw);
return 0;
}
/**
* ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
* @hw: pointer to the HW structure
*
* Determines the LAN function id by reading memory-mapped registers
* and swaps the port value if requested.
**/
void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
{
struct ixgbe_bus_info *bus = &hw->bus;
u32 reg;
reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
bus->lan_id = bus->func;
/* check for a port swap */
reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
if (reg & IXGBE_FACTPS_LFS)
bus->func ^= 0x1;
}
/**
* ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
* @hw: pointer to hardware structure
*
* Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
* disables transmit and receive units. The adapter_stopped flag is used by
* the shared code and drivers to determine if the adapter is in a stopped
* state and should not touch the hardware.
**/
s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
{
u32 number_of_queues;
u32 reg_val;
u16 i;
/*
* Set the adapter_stopped flag so other driver functions stop touching
* the hardware
*/
hw->adapter_stopped = true;
/* Disable the receive unit */
reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
reg_val &= ~(IXGBE_RXCTRL_RXEN);
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
IXGBE_WRITE_FLUSH(hw);
usleep_range(2000, 4000);
/* Clear interrupt mask to stop from interrupts being generated */
IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
/* Clear any pending interrupts */
IXGBE_READ_REG(hw, IXGBE_EICR);
/* Disable the transmit unit. Each queue must be disabled. */
number_of_queues = hw->mac.max_tx_queues;
for (i = 0; i < number_of_queues; i++) {
reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
if (reg_val & IXGBE_TXDCTL_ENABLE) {
reg_val &= ~IXGBE_TXDCTL_ENABLE;
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), reg_val);
}
}
/*
* Prevent the PCI-E bus from from hanging by disabling PCI-E master
* access and verify no pending requests
*/
ixgbe_disable_pcie_master(hw);
return 0;
}
/**
* ixgbe_led_on_generic - Turns on the software controllable LEDs.
* @hw: pointer to hardware structure
* @index: led number to turn on
**/
s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
{
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
/* To turn on the LED, set mode to ON. */
led_reg &= ~IXGBE_LED_MODE_MASK(index);
led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
IXGBE_WRITE_FLUSH(hw);
return 0;
}
/**
* ixgbe_led_off_generic - Turns off the software controllable LEDs.
* @hw: pointer to hardware structure
* @index: led number to turn off
**/
s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
{
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
/* To turn off the LED, set mode to OFF. */
led_reg &= ~IXGBE_LED_MODE_MASK(index);
led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
IXGBE_WRITE_FLUSH(hw);
return 0;
}
/**
* ixgbe_init_eeprom_params_generic - Initialize EEPROM params
* @hw: pointer to hardware structure
*
* Initializes the EEPROM parameters ixgbe_eeprom_info within the
* ixgbe_hw struct in order to set up EEPROM access.
**/
s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
{
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
u32 eec;
u16 eeprom_size;
if (eeprom->type == ixgbe_eeprom_uninitialized) {
eeprom->type = ixgbe_eeprom_none;
/* Set default semaphore delay to 10ms which is a well
* tested value */
eeprom->semaphore_delay = 10;
/* Clear EEPROM page size, it will be initialized as needed */
eeprom->word_page_size = 0;
/*
* Check for EEPROM present first.
* If not present leave as none
*/
eec = IXGBE_READ_REG(hw, IXGBE_EEC);
if (eec & IXGBE_EEC_PRES) {
eeprom->type = ixgbe_eeprom_spi;
/*
* SPI EEPROM is assumed here. This code would need to
* change if a future EEPROM is not SPI.
*/
eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
IXGBE_EEC_SIZE_SHIFT);
eeprom->word_size = 1 << (eeprom_size +
IXGBE_EEPROM_WORD_SIZE_SHIFT);
}
if (eec & IXGBE_EEC_ADDR_SIZE)
eeprom->address_bits = 16;
else
eeprom->address_bits = 8;
hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: "
"%d\n", eeprom->type, eeprom->word_size,
eeprom->address_bits);
}
return 0;
}
/**
* ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
* @hw: pointer to hardware structure
* @offset: offset within the EEPROM to write
* @words: number of words
* @data: 16 bit word(s) to write to EEPROM
*
* Reads 16 bit word(s) from EEPROM through bit-bang method
**/
s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data)
{
s32 status = 0;
u16 i, count;
hw->eeprom.ops.init_params(hw);
if (words == 0) {
status = IXGBE_ERR_INVALID_ARGUMENT;
goto out;
}
if (offset + words > hw->eeprom.word_size) {
status = IXGBE_ERR_EEPROM;
goto out;
}
/*
* The EEPROM page size cannot be queried from the chip. We do lazy
* initialization. It is worth to do that when we write large buffer.
*/
if ((hw->eeprom.word_page_size == 0) &&
(words > IXGBE_EEPROM_PAGE_SIZE_MAX))
ixgbe_detect_eeprom_page_size_generic(hw, offset);
/*
* We cannot hold synchronization semaphores for too long
* to avoid other entity starvation. However it is more efficient
* to read in bursts than synchronizing access for each word.
*/
for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
count, &data[i]);
if (status != 0)
break;
}
out:
return status;
}
/**
* ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
* @hw: pointer to hardware structure
* @offset: offset within the EEPROM to be written to
* @words: number of word(s)
* @data: 16 bit word(s) to be written to the EEPROM
*
* If ixgbe_eeprom_update_checksum is not called after this function, the
* EEPROM will most likely contain an invalid checksum.
**/
static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data)
{
s32 status;
u16 word;
u16 page_size;
u16 i;
u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
/* Prepare the EEPROM for writing */
status = ixgbe_acquire_eeprom(hw);
if (status == 0) {
if (ixgbe_ready_eeprom(hw) != 0) {
ixgbe_release_eeprom(hw);
status = IXGBE_ERR_EEPROM;
}
}
if (status == 0) {
for (i = 0; i < words; i++) {
ixgbe_standby_eeprom(hw);
/* Send the WRITE ENABLE command (8 bit opcode ) */
ixgbe_shift_out_eeprom_bits(hw,
IXGBE_EEPROM_WREN_OPCODE_SPI,
IXGBE_EEPROM_OPCODE_BITS);
ixgbe_standby_eeprom(hw);
/*
* Some SPI eeproms use the 8th address bit embedded
* in the opcode
*/
if ((hw->eeprom.address_bits == 8) &&
((offset + i) >= 128))
write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
/* Send the Write command (8-bit opcode + addr) */
ixgbe_shift_out_eeprom_bits(hw, write_opcode,
IXGBE_EEPROM_OPCODE_BITS);
ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
hw->eeprom.address_bits);
page_size = hw->eeprom.word_page_size;
/* Send the data in burst via SPI*/
do {
word = data[i];
word = (word >> 8) | (word << 8);
ixgbe_shift_out_eeprom_bits(hw, word, 16);
if (page_size == 0)
break;
/* do not wrap around page */
if (((offset + i) & (page_size - 1)) ==
(page_size - 1))
break;
} while (++i < words);
ixgbe_standby_eeprom(hw);
usleep_range(10000, 20000);
}
/* Done with writing - release the EEPROM */
ixgbe_release_eeprom(hw);
}
return status;
}
/**
* ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
* @hw: pointer to hardware structure
* @offset: offset within the EEPROM to be written to
* @data: 16 bit word to be written to the EEPROM
*
* If ixgbe_eeprom_update_checksum is not called after this function, the
* EEPROM will most likely contain an invalid checksum.
**/
s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
{
s32 status;
hw->eeprom.ops.init_params(hw);
if (offset >= hw->eeprom.word_size) {
status = IXGBE_ERR_EEPROM;
goto out;
}
status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
out:
return status;
}
/**
* ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
* @hw: pointer to hardware structure
* @offset: offset within the EEPROM to be read
* @words: number of word(s)
* @data: read 16 bit words(s) from EEPROM
*
* Reads 16 bit word(s) from EEPROM through bit-bang method
**/
s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data)
{
s32 status = 0;
u16 i, count;
hw->eeprom.ops.init_params(hw);
if (words == 0) {
status = IXGBE_ERR_INVALID_ARGUMENT;
goto out;
}
if (offset + words > hw->eeprom.word_size) {
status = IXGBE_ERR_EEPROM;
goto out;
}
/*
* We cannot hold synchronization semaphores for too long
* to avoid other entity starvation. However it is more efficient
* to read in bursts than synchronizing access for each word.
*/
for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
count, &data[i]);
if (status != 0)
break;
}
out:
return status;
}
/**
* ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
* @hw: pointer to hardware structure
* @offset: offset within the EEPROM to be read
* @words: number of word(s)
* @data: read 16 bit word(s) from EEPROM
*
* Reads 16 bit word(s) from EEPROM through bit-bang method
**/
static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data)
{
s32 status;
u16 word_in;
u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
u16 i;
/* Prepare the EEPROM for reading */
status = ixgbe_acquire_eeprom(hw);
if (status == 0) {
if (ixgbe_ready_eeprom(hw) != 0) {
ixgbe_release_eeprom(hw);
status = IXGBE_ERR_EEPROM;
}
}
if (status == 0) {
for (i = 0; i < words; i++) {
ixgbe_standby_eeprom(hw);
/*
* Some SPI eeproms use the 8th address bit embedded
* in the opcode
*/
if ((hw->eeprom.address_bits == 8) &&
((offset + i) >= 128))
read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
/* Send the READ command (opcode + addr) */
ixgbe_shift_out_eeprom_bits(hw, read_opcode,
IXGBE_EEPROM_OPCODE_BITS);
ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
hw->eeprom.address_bits);
/* Read the data. */
word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
data[i] = (word_in >> 8) | (word_in << 8);
}
/* End this read operation */
ixgbe_release_eeprom(hw);
}
return status;
}
/**
* ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
* @hw: pointer to hardware structure
* @offset: offset within the EEPROM to be read
* @data: read 16 bit value from EEPROM
*
* Reads 16 bit value from EEPROM through bit-bang method
**/
s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 *data)
{
s32 status;
hw->eeprom.ops.init_params(hw);
if (offset >= hw->eeprom.word_size) {
status = IXGBE_ERR_EEPROM;
goto out;
}
status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
out:
return status;
}
/**
* ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
* @hw: pointer to hardware structure
* @offset: offset of word in the EEPROM to read
* @words: number of word(s)
* @data: 16 bit word(s) from the EEPROM
*
* Reads a 16 bit word(s) from the EEPROM using the EERD register.
**/
s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data)
{
u32 eerd;
s32 status = 0;
u32 i;
hw->eeprom.ops.init_params(hw);
if (words == 0) {
status = IXGBE_ERR_INVALID_ARGUMENT;
goto out;
}
if (offset >= hw->eeprom.word_size) {
status = IXGBE_ERR_EEPROM;
goto out;
}
for (i = 0; i < words; i++) {
eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) +
IXGBE_EEPROM_RW_REG_START;
IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
if (status == 0) {
data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
IXGBE_EEPROM_RW_REG_DATA);
} else {
hw_dbg(hw, "Eeprom read timed out\n");
goto out;
}
}
out:
return status;
}
/**
* ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
* @hw: pointer to hardware structure
* @offset: offset within the EEPROM to be used as a scratch pad
*
* Discover EEPROM page size by writing marching data at given offset.
* This function is called only when we are writing a new large buffer
* at given offset so the data would be overwritten anyway.
**/
static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
u16 offset)
{
u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
s32 status = 0;
u16 i;
for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
data[i] = i;
hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
IXGBE_EEPROM_PAGE_SIZE_MAX, data);
hw->eeprom.word_page_size = 0;
if (status != 0)
goto out;
status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
if (status != 0)
goto out;
/*
* When writing in burst more than the actual page size
* EEPROM address wraps around current page.
*/
hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
hw_dbg(hw, "Detected EEPROM page size = %d words.",
hw->eeprom.word_page_size);
out:
return status;
}
/**
* ixgbe_read_eerd_generic - Read EEPROM word using EERD
* @hw: pointer to hardware structure
* @offset: offset of word in the EEPROM to read
* @data: word read from the EEPROM
*
* Reads a 16 bit word from the EEPROM using the EERD register.
**/
s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
{
return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
}
/**
* ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
* @hw: pointer to hardware structure
* @offset: offset of word in the EEPROM to write
* @words: number of words
* @data: word(s) write to the EEPROM
*
* Write a 16 bit word(s) to the EEPROM using the EEWR register.
**/
s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data)
{
u32 eewr;
s32 status = 0;
u16 i;
hw->eeprom.ops.init_params(hw);
if (words == 0) {
status = IXGBE_ERR_INVALID_ARGUMENT;
goto out;
}
if (offset >= hw->eeprom.word_size) {
status = IXGBE_ERR_EEPROM;
goto out;
}
for (i = 0; i < words; i++) {
eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
(data[i] << IXGBE_EEPROM_RW_REG_DATA) |
IXGBE_EEPROM_RW_REG_START;
status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
if (status != 0) {
hw_dbg(hw, "Eeprom write EEWR timed out\n");
goto out;
}
IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
if (status != 0) {
hw_dbg(hw, "Eeprom write EEWR timed out\n");
goto out;
}
}
out:
return status;
}
/**
* ixgbe_write_eewr_generic - Write EEPROM word using EEWR
* @hw: pointer to hardware structure
* @offset: offset of word in the EEPROM to write
* @data: word write to the EEPROM
*
* Write a 16 bit word to the EEPROM using the EEWR register.
**/
s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
{
return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
}
/**
* ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
* @hw: pointer to hardware structure
* @ee_reg: EEPROM flag for polling
*
* Polls the status bit (bit 1) of the EERD or EEWR to determine when the
* read or write is done respectively.
**/
static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
{
u32 i;
u32 reg;
s32 status = IXGBE_ERR_EEPROM;
for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
if (ee_reg == IXGBE_NVM_POLL_READ)
reg = IXGBE_READ_REG(hw, IXGBE_EERD);
else
reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
if (reg & IXGBE_EEPROM_RW_REG_DONE) {
status = 0;
break;
}
udelay(5);
}
return status;
}
/**
* ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
* @hw: pointer to hardware structure
*
* Prepares EEPROM for access using bit-bang method. This function should
* be called before issuing a command to the EEPROM.
**/
static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
{
s32 status = 0;
u32 eec;
u32 i;
if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
status = IXGBE_ERR_SWFW_SYNC;
if (status == 0) {
eec = IXGBE_READ_REG(hw, IXGBE_EEC);
/* Request EEPROM Access */
eec |= IXGBE_EEC_REQ;
IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
eec = IXGBE_READ_REG(hw, IXGBE_EEC);
if (eec & IXGBE_EEC_GNT)
break;
udelay(5);
}
/* Release if grant not acquired */
if (!(eec & IXGBE_EEC_GNT)) {
eec &= ~IXGBE_EEC_REQ;
IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
hw_dbg(hw, "Could not acquire EEPROM grant\n");
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
status = IXGBE_ERR_EEPROM;
}
/* Setup EEPROM for Read/Write */
if (status == 0) {
/* Clear CS and SK */
eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
IXGBE_WRITE_FLUSH(hw);
udelay(1);
}
}
return status;
}
/**
* ixgbe_get_eeprom_semaphore - Get hardware semaphore
* @hw: pointer to hardware structure
*
* Sets the hardware semaphores so EEPROM access can occur for bit-bang method
**/
static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
{
s32 status = IXGBE_ERR_EEPROM;
u32 timeout = 2000;
u32 i;
u32 swsm;
/* Get SMBI software semaphore between device drivers first */
for (i = 0; i < timeout; i++) {
/*
* If the SMBI bit is 0 when we read it, then the bit will be
* set and we have the semaphore
*/
swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
if (!(swsm & IXGBE_SWSM_SMBI)) {
status = 0;
break;
}
udelay(50);
}
if (i == timeout) {
hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore "
"not granted.\n");
/*
* this release is particularly important because our attempts
* above to get the semaphore may have succeeded, and if there
* was a timeout, we should unconditionally clear the semaphore
* bits to free the driver to make progress
*/
ixgbe_release_eeprom_semaphore(hw);
udelay(50);
/*
* one last try
* If the SMBI bit is 0 when we read it, then the bit will be
* set and we have the semaphore
*/
swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
if (!(swsm & IXGBE_SWSM_SMBI))
status = 0;
}
/* Now get the semaphore between SW/FW through the SWESMBI bit */
if (status == 0) {
for (i = 0; i < timeout; i++) {
swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
/* Set the SW EEPROM semaphore bit to request access */
swsm |= IXGBE_SWSM_SWESMBI;
IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
/*
* If we set the bit successfully then we got the
* semaphore.
*/
swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
if (swsm & IXGBE_SWSM_SWESMBI)
break;
udelay(50);
}
/*
* Release semaphores and return error if SW EEPROM semaphore
* was not granted because we don't have access to the EEPROM
*/
if (i >= timeout) {
hw_dbg(hw, "SWESMBI Software EEPROM semaphore "
"not granted.\n");
ixgbe_release_eeprom_semaphore(hw);
status = IXGBE_ERR_EEPROM;
}
} else {
hw_dbg(hw, "Software semaphore SMBI between device drivers "
"not granted.\n");
}
return status;
}
/**
* ixgbe_release_eeprom_semaphore - Release hardware semaphore
* @hw: pointer to hardware structure
*
* This function clears hardware semaphore bits.
**/
static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
{
u32 swsm;
swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
/* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
IXGBE_WRITE_FLUSH(hw);
}
/**
* ixgbe_ready_eeprom - Polls for EEPROM ready
* @hw: pointer to hardware structure
**/
static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
{
s32 status = 0;
u16 i;
u8 spi_stat_reg;
/*
* Read "Status Register" repeatedly until the LSB is cleared. The
* EEPROM will signal that the command has been completed by clearing
* bit 0 of the internal status register. If it's not cleared within
* 5 milliseconds, then error out.
*/
for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
IXGBE_EEPROM_OPCODE_BITS);
spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
break;
udelay(5);
ixgbe_standby_eeprom(hw);
};
/*
* On some parts, SPI write time could vary from 0-20mSec on 3.3V
* devices (and only 0-5mSec on 5V devices)
*/
if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
hw_dbg(hw, "SPI EEPROM Status error\n");
status = IXGBE_ERR_EEPROM;
}
return status;
}
/**
* ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
* @hw: pointer to hardware structure
**/
static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
{
u32 eec;
eec = IXGBE_READ_REG(hw, IXGBE_EEC);
/* Toggle CS to flush commands */
eec |= IXGBE_EEC_CS;
IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
IXGBE_WRITE_FLUSH(hw);
udelay(1);
eec &= ~IXGBE_EEC_CS;
IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
IXGBE_WRITE_FLUSH(hw);
udelay(1);
}
/**
* ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
* @hw: pointer to hardware structure
* @data: data to send to the EEPROM
* @count: number of bits to shift out
**/
static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
u16 count)
{
u32 eec;
u32 mask;
u32 i;
eec = IXGBE_READ_REG(hw, IXGBE_EEC);
/*
* Mask is used to shift "count" bits of "data" out to the EEPROM
* one bit at a time. Determine the starting bit based on count
*/
mask = 0x01 << (count - 1);
for (i = 0; i < count; i++) {
/*
* A "1" is shifted out to the EEPROM by setting bit "DI" to a
* "1", and then raising and then lowering the clock (the SK
* bit controls the clock input to the EEPROM). A "0" is
* shifted out to the EEPROM by setting "DI" to "0" and then
* raising and then lowering the clock.
*/
if (data & mask)
eec |= IXGBE_EEC_DI;
else
eec &= ~IXGBE_EEC_DI;
IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
IXGBE_WRITE_FLUSH(hw);
udelay(1);
ixgbe_raise_eeprom_clk(hw, &eec);
ixgbe_lower_eeprom_clk(hw, &eec);
/*
* Shift mask to signify next bit of data to shift in to the
* EEPROM
*/
mask = mask >> 1;
};
/* We leave the "DI" bit set to "0" when we leave this routine. */
eec &= ~IXGBE_EEC_DI;
IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
IXGBE_WRITE_FLUSH(hw);
}
/**
* ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
* @hw: pointer to hardware structure
**/
static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
{
u32 eec;
u32 i;
u16 data = 0;
/*
* In order to read a register from the EEPROM, we need to shift
* 'count' bits in from the EEPROM. Bits are "shifted in" by raising
* the clock input to the EEPROM (setting the SK bit), and then reading
* the value of the "DO" bit. During this "shifting in" process the
* "DI" bit should always be clear.
*/
eec = IXGBE_READ_REG(hw, IXGBE_EEC);
eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
for (i = 0; i < count; i++) {
data = data << 1;
ixgbe_raise_eeprom_clk(hw, &eec);
eec = IXGBE_READ_REG(hw, IXGBE_EEC);
eec &= ~(IXGBE_EEC_DI);
if (eec & IXGBE_EEC_DO)
data |= 1;
ixgbe_lower_eeprom_clk(hw, &eec);
}
return data;
}
/**
* ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
* @hw: pointer to hardware structure
* @eec: EEC register's current value
**/
static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
{
/*
* Raise the clock input to the EEPROM
* (setting the SK bit), then delay
*/
*eec = *eec | IXGBE_EEC_SK;
IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
IXGBE_WRITE_FLUSH(hw);
udelay(1);
}
/**
* ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
* @hw: pointer to hardware structure
* @eecd: EECD's current value
**/
static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
{
/*
* Lower the clock input to the EEPROM (clearing the SK bit), then
* delay
*/
*eec = *eec & ~IXGBE_EEC_SK;
IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
IXGBE_WRITE_FLUSH(hw);
udelay(1);
}
/**
* ixgbe_release_eeprom - Release EEPROM, release semaphores
* @hw: pointer to hardware structure
**/
static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
{
u32 eec;
eec = IXGBE_READ_REG(hw, IXGBE_EEC);
eec |= IXGBE_EEC_CS; /* Pull CS high */
eec &= ~IXGBE_EEC_SK; /* Lower SCK */
IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
IXGBE_WRITE_FLUSH(hw);
udelay(1);
/* Stop requesting EEPROM access */
eec &= ~IXGBE_EEC_REQ;
IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
/*
* Delay before attempt to obtain semaphore again to allow FW
* access. semaphore_delay is in ms we need us for usleep_range
*/
usleep_range(hw->eeprom.semaphore_delay * 1000,
hw->eeprom.semaphore_delay * 2000);
}
/**
* ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
* @hw: pointer to hardware structure
**/
u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
{
u16 i;
u16 j;
u16 checksum = 0;
u16 length = 0;
u16 pointer = 0;
u16 word = 0;
/* Include 0x0-0x3F in the checksum */
for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
if (hw->eeprom.ops.read(hw, i, &word) != 0) {
hw_dbg(hw, "EEPROM read failed\n");
break;
}
checksum += word;
}
/* Include all data from pointers except for the fw pointer */
for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
hw->eeprom.ops.read(hw, i, &pointer);
/* Make sure the pointer seems valid */
if (pointer != 0xFFFF && pointer != 0) {
hw->eeprom.ops.read(hw, pointer, &length);
if (length != 0xFFFF && length != 0) {
for (j = pointer+1; j <= pointer+length; j++) {
hw->eeprom.ops.read(hw, j, &word);
checksum += word;
}
}
}
}
checksum = (u16)IXGBE_EEPROM_SUM - checksum;
return checksum;
}
/**
* ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
* @hw: pointer to hardware structure
* @checksum_val: calculated checksum
*
* Performs checksum calculation and validates the EEPROM checksum. If the
* caller does not need checksum_val, the value can be NULL.
**/
s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
u16 *checksum_val)
{
s32 status;
u16 checksum;
u16 read_checksum = 0;
/*
* Read the first word from the EEPROM. If this times out or fails, do
* not continue or we could be in for a very long wait while every
* EEPROM read fails
*/
status = hw->eeprom.ops.read(hw, 0, &checksum);
if (status == 0) {
checksum = hw->eeprom.ops.calc_checksum(hw);
hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
/*
* Verify read checksum from EEPROM is the same as
* calculated checksum
*/
if (read_checksum != checksum)
status = IXGBE_ERR_EEPROM_CHECKSUM;
/* If the user cares, return the calculated checksum */
if (checksum_val)
*checksum_val = checksum;
} else {
hw_dbg(hw, "EEPROM read failed\n");
}
return status;
}
/**
* ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
* @hw: pointer to hardware structure
**/
s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
{
s32 status;
u16 checksum;
/*
* Read the first word from the EEPROM. If this times out or fails, do
* not continue or we could be in for a very long wait while every
* EEPROM read fails
*/
status = hw->eeprom.ops.read(hw, 0, &checksum);
if (status == 0) {
checksum = hw->eeprom.ops.calc_checksum(hw);
status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
checksum);
} else {
hw_dbg(hw, "EEPROM read failed\n");
}
return status;
}
/**
* ixgbe_validate_mac_addr - Validate MAC address
* @mac_addr: pointer to MAC address.
*
* Tests a MAC address to ensure it is a valid Individual Address
**/
s32 ixgbe_validate_mac_addr(u8 *mac_addr)
{
s32 status = 0;
/* Make sure it is not a multicast address */
if (IXGBE_IS_MULTICAST(mac_addr))
status = IXGBE_ERR_INVALID_MAC_ADDR;
/* Not a broadcast address */
else if (IXGBE_IS_BROADCAST(mac_addr))
status = IXGBE_ERR_INVALID_MAC_ADDR;
/* Reject the zero address */
else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0)
status = IXGBE_ERR_INVALID_MAC_ADDR;
return status;
}
/**
* ixgbe_set_rar_generic - Set Rx address register
* @hw: pointer to hardware structure
* @index: Receive address register to write
* @addr: Address to put into receive address register
* @vmdq: VMDq "set" or "pool" index
* @enable_addr: set flag that address is active
*
* Puts an ethernet address into a receive address register.
**/
s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
u32 enable_addr)
{
u32 rar_low, rar_high;
u32 rar_entries = hw->mac.num_rar_entries;
/* Make sure we are using a valid rar index range */
if (index >= rar_entries) {
hw_dbg(hw, "RAR index %d is out of range.\n", index);
return IXGBE_ERR_INVALID_ARGUMENT;
}
/* setup VMDq pool selection before this RAR gets enabled */
hw->mac.ops.set_vmdq(hw, index, vmdq);
/*
* HW expects these in little endian so we reverse the byte
* order from network order (big endian) to little endian
*/
rar_low = ((u32)addr[0] |
((u32)addr[1] << 8) |
((u32)addr[2] << 16) |
((u32)addr[3] << 24));
/*
* Some parts put the VMDq setting in the extra RAH bits,
* so save everything except the lower 16 bits that hold part
* of the address and the address valid bit.
*/
rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
if (enable_addr != 0)
rar_high |= IXGBE_RAH_AV;
IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
return 0;
}
/**
* ixgbe_clear_rar_generic - Remove Rx address register
* @hw: pointer to hardware structure
* @index: Receive address register to write
*
* Clears an ethernet address from a receive address register.
**/
s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
{
u32 rar_high;
u32 rar_entries = hw->mac.num_rar_entries;
/* Make sure we are using a valid rar index range */
if (index >= rar_entries) {
hw_dbg(hw, "RAR index %d is out of range.\n", index);
return IXGBE_ERR_INVALID_ARGUMENT;
}
/*
* Some parts put the VMDq setting in the extra RAH bits,
* so save everything except the lower 16 bits that hold part
* of the address and the address valid bit.
*/
rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
/* clear VMDq pool/queue selection for this RAR */
hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
return 0;
}
/**
* ixgbe_init_rx_addrs_generic - Initializes receive address filters.
* @hw: pointer to hardware structure
*
* Places the MAC address in receive address register 0 and clears the rest
* of the receive address registers. Clears the multicast table. Assumes
* the receiver is in reset when the routine is called.
**/
s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
{
u32 i;
u32 rar_entries = hw->mac.num_rar_entries;
/*
* If the current mac address is valid, assume it is a software override
* to the permanent address.
* Otherwise, use the permanent address from the eeprom.
*/
if (ixgbe_validate_mac_addr(hw->mac.addr) ==
IXGBE_ERR_INVALID_MAC_ADDR) {
/* Get the MAC address from the RAR0 for later reference */
hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
hw_dbg(hw, " Keeping Current RAR0 Addr =%pM\n", hw->mac.addr);
} else {
/* Setup the receive address. */
hw_dbg(hw, "Overriding MAC Address in RAR[0]\n");
hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr);
hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
/* clear VMDq pool/queue selection for RAR 0 */
hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
}
hw->addr_ctrl.overflow_promisc = 0;
hw->addr_ctrl.rar_used_count = 1;
/* Zero out the other receive addresses. */
hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1);
for (i = 1; i < rar_entries; i++) {
IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
}
/* Clear the MTA */
hw->addr_ctrl.mta_in_use = 0;
IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
hw_dbg(hw, " Clearing MTA\n");
for (i = 0; i < hw->mac.mcft_size; i++)
IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
if (hw->mac.ops.init_uta_tables)
hw->mac.ops.init_uta_tables(hw);
return 0;
}
/**
* ixgbe_mta_vector - Determines bit-vector in multicast table to set
* @hw: pointer to hardware structure
* @mc_addr: the multicast address
*
* Extracts the 12 bits, from a multicast address, to determine which
* bit-vector to set in the multicast table. The hardware uses 12 bits, from
* incoming rx multicast addresses, to determine the bit-vector to check in
* the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
* by the MO field of the MCSTCTRL. The MO field is set during initialization
* to mc_filter_type.
**/
static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
{
u32 vector = 0;
switch (hw->mac.mc_filter_type) {
case 0: /* use bits [47:36] of the address */
vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
break;
case 1: /* use bits [46:35] of the address */
vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
break;
case 2: /* use bits [45:34] of the address */
vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
break;
case 3: /* use bits [43:32] of the address */
vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
break;
default: /* Invalid mc_filter_type */
hw_dbg(hw, "MC filter type param set incorrectly\n");
break;
}
/* vector can only be 12-bits or boundary will be exceeded */
vector &= 0xFFF;
return vector;
}
/**
* ixgbe_set_mta - Set bit-vector in multicast table
* @hw: pointer to hardware structure
* @hash_value: Multicast address hash value
*
* Sets the bit-vector in the multicast table.
**/
static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
{
u32 vector;
u32 vector_bit;
u32 vector_reg;
hw->addr_ctrl.mta_in_use++;
vector = ixgbe_mta_vector(hw, mc_addr);
hw_dbg(hw, " bit-vector = 0x%03X\n", vector);
/*
* The MTA is a register array of 128 32-bit registers. It is treated
* like an array of 4096 bits. We want to set bit
* BitArray[vector_value]. So we figure out what register the bit is
* in, read it, OR in the new bit, then write back the new value. The
* register is determined by the upper 7 bits of the vector value and
* the bit within that register are determined by the lower 5 bits of
* the value.
*/
vector_reg = (vector >> 5) & 0x7F;
vector_bit = vector & 0x1F;
hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
}
/**
* ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
* @hw: pointer to hardware structure
* @netdev: pointer to net device structure
*
* The given list replaces any existing list. Clears the MC addrs from receive
* address registers and the multicast table. Uses unused receive address
* registers for the first multicast addresses, and hashes the rest into the
* multicast table.
**/
s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
struct net_device *netdev)
{
struct netdev_hw_addr *ha;
u32 i;
/*
* Set the new number of MC addresses that we are being requested to
* use.
*/
hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev);
hw->addr_ctrl.mta_in_use = 0;
/* Clear mta_shadow */
hw_dbg(hw, " Clearing MTA\n");
memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
/* Update mta shadow */
netdev_for_each_mc_addr(ha, netdev) {
hw_dbg(hw, " Adding the multicast addresses:\n");
ixgbe_set_mta(hw, ha->addr);
}
/* Enable mta */
for (i = 0; i < hw->mac.mcft_size; i++)
IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
hw->mac.mta_shadow[i]);
if (hw->addr_ctrl.mta_in_use > 0)
IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n");
return 0;
}
/**
* ixgbe_enable_mc_generic - Enable multicast address in RAR
* @hw: pointer to hardware structure
*
* Enables multicast address in RAR and the use of the multicast hash table.
**/
s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
{
struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
if (a->mta_in_use > 0)
IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
hw->mac.mc_filter_type);
return 0;
}
/**
* ixgbe_disable_mc_generic - Disable multicast address in RAR
* @hw: pointer to hardware structure
*
* Disables multicast address in RAR and the use of the multicast hash table.
**/
s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
{
struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
if (a->mta_in_use > 0)
IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
return 0;
}
/**
* ixgbe_fc_enable_generic - Enable flow control
* @hw: pointer to hardware structure
* @packetbuf_num: packet buffer number (0-7)
*
* Enable flow control according to the current settings.
**/
s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
{
s32 ret_val = 0;
u32 mflcn_reg, fccfg_reg;
u32 reg;
u32 rx_pba_size;
u32 fcrtl, fcrth;
#ifdef CONFIG_DCB
if (hw->fc.requested_mode == ixgbe_fc_pfc)
goto out;
#endif /* CONFIG_DCB */
/* Negotiate the fc mode to use */
ret_val = ixgbe_fc_autoneg(hw);
if (ret_val == IXGBE_ERR_FLOW_CONTROL)
goto out;
/* Disable any previous flow control settings */
mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
mflcn_reg &= ~(IXGBE_MFLCN_RFCE | IXGBE_MFLCN_RPFCE);
fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
/*
* The possible values of fc.current_mode are:
* 0: Flow control is completely disabled
* 1: Rx flow control is enabled (we can receive pause frames,
* but not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames but
* we do not support receiving pause frames).
* 3: Both Rx and Tx flow control (symmetric) are enabled.
#ifdef CONFIG_DCB
* 4: Priority Flow Control is enabled.
#endif
* other: Invalid.
*/
switch (hw->fc.current_mode) {
case ixgbe_fc_none:
/*
* Flow control is disabled by software override or autoneg.
* The code below will actually disable it in the HW.
*/
break;
case ixgbe_fc_rx_pause:
/*
* Rx Flow control is enabled and Tx Flow control is
* disabled by software override. Since there really
* isn't a way to advertise that we are capable of RX
* Pause ONLY, we will advertise that we support both
* symmetric and asymmetric Rx PAUSE. Later, we will
* disable the adapter's ability to send PAUSE frames.
*/
mflcn_reg |= IXGBE_MFLCN_RFCE;
break;
case ixgbe_fc_tx_pause:
/*
* Tx Flow control is enabled, and Rx Flow control is
* disabled by software override.
*/
fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
break;
case ixgbe_fc_full:
/* Flow control (both Rx and Tx) is enabled by SW override. */
mflcn_reg |= IXGBE_MFLCN_RFCE;
fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
break;
#ifdef CONFIG_DCB
case ixgbe_fc_pfc:
goto out;
break;
#endif /* CONFIG_DCB */
default:
hw_dbg(hw, "Flow control param set incorrectly\n");
ret_val = IXGBE_ERR_CONFIG;
goto out;
break;
}
/* Set 802.3x based flow control settings. */
mflcn_reg |= IXGBE_MFLCN_DPF;
IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
fcrth = (rx_pba_size - hw->fc.high_water) << 10;
fcrtl = (rx_pba_size - hw->fc.low_water) << 10;
if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
fcrth |= IXGBE_FCRTH_FCEN;
if (hw->fc.send_xon)
fcrtl |= IXGBE_FCRTL_XONE;
}
IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), fcrth);
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), fcrtl);
/* Configure pause time (2 TCs per register) */
reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
if ((packetbuf_num & 1) == 0)
reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
else
reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
out:
return ret_val;
}
/**
* ixgbe_fc_autoneg - Configure flow control
* @hw: pointer to hardware structure
*
* Compares our advertised flow control capabilities to those advertised by
* our link partner, and determines the proper flow control mode to use.
**/
s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
{
s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
ixgbe_link_speed speed;
bool link_up;
if (hw->fc.disable_fc_autoneg)
goto out;
/*
* AN should have completed when the cable was plugged in.
* Look for reasons to bail out. Bail out if:
* - FC autoneg is disabled, or if
* - link is not up.
*
* Since we're being called from an LSC, link is already known to be up.
* So use link_up_wait_to_complete=false.
*/
hw->mac.ops.check_link(hw, &speed, &link_up, false);
if (!link_up) {
ret_val = IXGBE_ERR_FLOW_CONTROL;
goto out;
}
switch (hw->phy.media_type) {
/* Autoneg flow control on fiber adapters */
case ixgbe_media_type_fiber:
if (speed == IXGBE_LINK_SPEED_1GB_FULL)
ret_val = ixgbe_fc_autoneg_fiber(hw);
break;
/* Autoneg flow control on backplane adapters */
case ixgbe_media_type_backplane:
ret_val = ixgbe_fc_autoneg_backplane(hw);
break;
/* Autoneg flow control on copper adapters */
case ixgbe_media_type_copper:
if (ixgbe_device_supports_autoneg_fc(hw) == 0)
ret_val = ixgbe_fc_autoneg_copper(hw);
break;
default:
break;
}
out:
if (ret_val == 0) {
hw->fc.fc_was_autonegged = true;
} else {
hw->fc.fc_was_autonegged = false;
hw->fc.current_mode = hw->fc.requested_mode;
}
return ret_val;
}
/**
* ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
* @hw: pointer to hardware structure
*
* Enable flow control according on 1 gig fiber.
**/
static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
{
u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
s32 ret_val;
/*
* On multispeed fiber at 1g, bail out if
* - link is up but AN did not complete, or if
* - link is up and AN completed but timed out
*/
linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
if (((linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
((linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
goto out;
}
pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
IXGBE_PCS1GANA_ASM_PAUSE,
IXGBE_PCS1GANA_SYM_PAUSE,
IXGBE_PCS1GANA_ASM_PAUSE);
out:
return ret_val;
}
/**
* ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
* @hw: pointer to hardware structure
*
* Enable flow control according to IEEE clause 37.
**/
static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
{
u32 links2, anlp1_reg, autoc_reg, links;
s32 ret_val;
/*
* On backplane, bail out if
* - backplane autoneg was not completed, or if
* - we are 82599 and link partner is not AN enabled
*/
links = IXGBE_READ_REG(hw, IXGBE_LINKS);
if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
hw->fc.fc_was_autonegged = false;
hw->fc.current_mode = hw->fc.requested_mode;
ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
goto out;
}
if (hw->mac.type == ixgbe_mac_82599EB) {
links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
hw->fc.fc_was_autonegged = false;
hw->fc.current_mode = hw->fc.requested_mode;
ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
goto out;
}
}
/*
* Read the 10g AN autoc and LP ability registers and resolve
* local flow control settings accordingly
*/
autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
out:
return ret_val;
}
/**
* ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
* @hw: pointer to hardware structure
*
* Enable flow control according to IEEE clause 37.
**/
static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
{
u16 technology_ability_reg = 0;
u16 lp_technology_ability_reg = 0;
hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
MDIO_MMD_AN,
&technology_ability_reg);
hw->phy.ops.read_reg(hw, MDIO_AN_LPA,
MDIO_MMD_AN,
&lp_technology_ability_reg);
return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
(u32)lp_technology_ability_reg,
IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
}
/**
* ixgbe_negotiate_fc - Negotiate flow control
* @hw: pointer to hardware structure
* @adv_reg: flow control advertised settings
* @lp_reg: link partner's flow control settings
* @adv_sym: symmetric pause bit in advertisement
* @adv_asm: asymmetric pause bit in advertisement
* @lp_sym: symmetric pause bit in link partner advertisement
* @lp_asm: asymmetric pause bit in link partner advertisement
*
* Find the intersection between advertised settings and link partner's
* advertised settings
**/
static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
{
if ((!(adv_reg)) || (!(lp_reg)))
return IXGBE_ERR_FC_NOT_NEGOTIATED;
if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
/*
* Now we need to check if the user selected Rx ONLY
* of pause frames. In this case, we had to advertise
* FULL flow control because we could not advertise RX
* ONLY. Hence, we must now check to see if we need to
* turn OFF the TRANSMISSION of PAUSE frames.
*/
if (hw->fc.requested_mode == ixgbe_fc_full) {
hw->fc.current_mode = ixgbe_fc_full;
hw_dbg(hw, "Flow Control = FULL.\n");
} else {
hw->fc.current_mode = ixgbe_fc_rx_pause;
hw_dbg(hw, "Flow Control=RX PAUSE frames only\n");
}
} else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
hw->fc.current_mode = ixgbe_fc_tx_pause;
hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
} else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
!(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
hw->fc.current_mode = ixgbe_fc_rx_pause;
hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
} else {
hw->fc.current_mode = ixgbe_fc_none;
hw_dbg(hw, "Flow Control = NONE.\n");
}
return 0;
}
/**
* ixgbe_setup_fc - Set up flow control
* @hw: pointer to hardware structure
*
* Called at init time to set up flow control.
**/
static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
{
s32 ret_val = 0;
u32 reg = 0, reg_bp = 0;
u16 reg_cu = 0;
#ifdef CONFIG_DCB
if (hw->fc.requested_mode == ixgbe_fc_pfc) {
hw->fc.current_mode = hw->fc.requested_mode;
goto out;
}
#endif /* CONFIG_DCB */
/* Validate the packetbuf configuration */
if (packetbuf_num < 0 || packetbuf_num > 7) {
hw_dbg(hw, "Invalid packet buffer number [%d], expected range "
"is 0-7\n", packetbuf_num);
ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
goto out;
}
/*
* Validate the water mark configuration. Zero water marks are invalid
* because it causes the controller to just blast out fc packets.
*/
if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) {
hw_dbg(hw, "Invalid water mark configuration\n");
ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
goto out;
}
/*
* Validate the requested mode. Strict IEEE mode does not allow
* ixgbe_fc_rx_pause because it will cause us to fail at UNH.
*/
if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict "
"IEEE mode\n");
ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
goto out;
}
/*
* 10gig parts do not have a word in the EEPROM to determine the
* default flow control setting, so we explicitly set it to full.
*/
if (hw->fc.requested_mode == ixgbe_fc_default)
hw->fc.requested_mode = ixgbe_fc_full;
/*
* Set up the 1G and 10G flow control advertisement registers so the
* HW will be able to do fc autoneg once the cable is plugged in. If
* we link at 10G, the 1G advertisement is harmless and vice versa.
*/
switch (hw->phy.media_type) {
case ixgbe_media_type_fiber:
case ixgbe_media_type_backplane:
reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
break;
case ixgbe_media_type_copper:
hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
MDIO_MMD_AN, ®_cu);
break;
default:
;
}
/*
* The possible values of fc.requested_mode are:
* 0: Flow control is completely disabled
* 1: Rx flow control is enabled (we can receive pause frames,
* but not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames but
* we do not support receiving pause frames).
* 3: Both Rx and Tx flow control (symmetric) are enabled.
#ifdef CONFIG_DCB
* 4: Priority Flow Control is enabled.
#endif
* other: Invalid.
*/
switch (hw->fc.requested_mode) {
case ixgbe_fc_none:
/* Flow control completely disabled by software override. */
reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
if (hw->phy.media_type == ixgbe_media_type_backplane)
reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
IXGBE_AUTOC_ASM_PAUSE);
else if (hw->phy.media_type == ixgbe_media_type_copper)
reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
break;
case ixgbe_fc_rx_pause:
/*
* Rx Flow control is enabled and Tx Flow control is
* disabled by software override. Since there really
* isn't a way to advertise that we are capable of RX
* Pause ONLY, we will advertise that we support both
* symmetric and asymmetric Rx PAUSE. Later, we will
* disable the adapter's ability to send PAUSE frames.
*/
reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
if (hw->phy.media_type == ixgbe_media_type_backplane)
reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
IXGBE_AUTOC_ASM_PAUSE);
else if (hw->phy.media_type == ixgbe_media_type_copper)
reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
break;
case ixgbe_fc_tx_pause:
/*
* Tx Flow control is enabled, and Rx Flow control is
* disabled by software override.
*/
reg |= (IXGBE_PCS1GANA_ASM_PAUSE);
reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE);
if (hw->phy.media_type == ixgbe_media_type_backplane) {
reg_bp |= (IXGBE_AUTOC_ASM_PAUSE);
reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE);
} else if (hw->phy.media_type == ixgbe_media_type_copper) {
reg_cu |= (IXGBE_TAF_ASM_PAUSE);
reg_cu &= ~(IXGBE_TAF_SYM_PAUSE);
}
break;
case ixgbe_fc_full:
/* Flow control (both Rx and Tx) is enabled by SW override. */
reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
if (hw->phy.media_type == ixgbe_media_type_backplane)
reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
IXGBE_AUTOC_ASM_PAUSE);
else if (hw->phy.media_type == ixgbe_media_type_copper)
reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
break;
#ifdef CONFIG_DCB
case ixgbe_fc_pfc:
goto out;
break;
#endif /* CONFIG_DCB */
default:
hw_dbg(hw, "Flow control param set incorrectly\n");
ret_val = IXGBE_ERR_CONFIG;
goto out;
break;
}
if (hw->mac.type != ixgbe_mac_X540) {
/*
* Enable auto-negotiation between the MAC & PHY;
* the MAC will advertise clause 37 flow control.
*/
IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
/* Disable AN timeout */
if (hw->fc.strict_ieee)
reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
}
/*
* AUTOC restart handles negotiation of 1G and 10G on backplane
* and copper. There is no need to set the PCS1GCTL register.
*
*/
if (hw->phy.media_type == ixgbe_media_type_backplane) {
reg_bp |= IXGBE_AUTOC_AN_RESTART;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
} else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
(ixgbe_device_supports_autoneg_fc(hw) == 0)) {
hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
MDIO_MMD_AN, reg_cu);
}
hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
out:
return ret_val;
}
/**
* ixgbe_disable_pcie_master - Disable PCI-express master access
* @hw: pointer to hardware structure
*
* Disables PCI-Express master access and verifies there are no pending
* requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
* bit hasn't caused the master requests to be disabled, else 0
* is returned signifying master requests disabled.
**/
s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
{
struct ixgbe_adapter *adapter = hw->back;
u32 i;
u32 reg_val;
u32 number_of_queues;
s32 status = 0;
u16 dev_status = 0;
/* Just jump out if bus mastering is already disabled */
if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
goto out;
/* Disable the receive unit by stopping each queue */
number_of_queues = hw->mac.max_rx_queues;
for (i = 0; i < number_of_queues; i++) {
reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
if (reg_val & IXGBE_RXDCTL_ENABLE) {
reg_val &= ~IXGBE_RXDCTL_ENABLE;
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
}
}
reg_val = IXGBE_READ_REG(hw, IXGBE_CTRL);
reg_val |= IXGBE_CTRL_GIO_DIS;
IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val);
for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
goto check_device_status;
udelay(100);
}
hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n");
status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
/*
* Before proceeding, make sure that the PCIe block does not have
* transactions pending.
*/
check_device_status:
for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
pci_read_config_word(adapter->pdev, IXGBE_PCI_DEVICE_STATUS,
&dev_status);
if (!(dev_status & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
break;
udelay(100);
}
if (i == IXGBE_PCI_MASTER_DISABLE_TIMEOUT)
hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n");
else
goto out;
/*
* Two consecutive resets are required via CTRL.RST per datasheet
* 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
* of this need. The first reset prevents new master requests from
* being issued by our device. We then must wait 1usec for any
* remaining completions from the PCIe bus to trickle in, and then reset
* again to clear out any effects they may have had on our device.
*/
hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
out:
return status;
}
/**
* ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
* @hw: pointer to hardware structure
* @mask: Mask to specify which semaphore to acquire
*
* Acquires the SWFW semaphore through the GSSR register for the specified
* function (CSR, PHY0, PHY1, EEPROM, Flash)
**/
s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
{
u32 gssr;
u32 swmask = mask;
u32 fwmask = mask << 5;
s32 timeout = 200;
while (timeout) {
/*
* SW EEPROM semaphore bit is used for access to all
* SW_FW_SYNC/GSSR bits (not just EEPROM)
*/
if (ixgbe_get_eeprom_semaphore(hw))
return IXGBE_ERR_SWFW_SYNC;
gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
if (!(gssr & (fwmask | swmask)))
break;
/*
* Firmware currently using resource (fwmask) or other software
* thread currently using resource (swmask)
*/
ixgbe_release_eeprom_semaphore(hw);
usleep_range(5000, 10000);
timeout--;
}
if (!timeout) {
hw_dbg(hw, "Driver can't access resource, SW_FW_SYNC timeout.\n");
return IXGBE_ERR_SWFW_SYNC;
}
gssr |= swmask;
IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
ixgbe_release_eeprom_semaphore(hw);
return 0;
}
/**
* ixgbe_release_swfw_sync - Release SWFW semaphore
* @hw: pointer to hardware structure
* @mask: Mask to specify which semaphore to release
*
* Releases the SWFW semaphore through the GSSR register for the specified
* function (CSR, PHY0, PHY1, EEPROM, Flash)
**/
void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
{
u32 gssr;
u32 swmask = mask;
ixgbe_get_eeprom_semaphore(hw);
gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
gssr &= ~swmask;
IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
ixgbe_release_eeprom_semaphore(hw);
}
/**
* ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
* @hw: pointer to hardware structure
* @regval: register value to write to RXCTRL
*
* Enables the Rx DMA unit
**/
s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
{
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
return 0;
}
/**
* ixgbe_blink_led_start_generic - Blink LED based on index.
* @hw: pointer to hardware structure
* @index: led number to blink
**/
s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
{
ixgbe_link_speed speed = 0;
bool link_up = 0;
u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
/*
* Link must be up to auto-blink the LEDs;
* Force it if link is down.
*/
hw->mac.ops.check_link(hw, &speed, &link_up, false);
if (!link_up) {
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
autoc_reg |= IXGBE_AUTOC_FLU;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
usleep_range(10000, 20000);
}
led_reg &= ~IXGBE_LED_MODE_MASK(index);
led_reg |= IXGBE_LED_BLINK(index);
IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
IXGBE_WRITE_FLUSH(hw);
return 0;
}
/**
* ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
* @hw: pointer to hardware structure
* @index: led number to stop blinking
**/
s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
{
u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
autoc_reg &= ~IXGBE_AUTOC_FLU;
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
led_reg &= ~IXGBE_LED_MODE_MASK(index);
led_reg &= ~IXGBE_LED_BLINK(index);
led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
IXGBE_WRITE_FLUSH(hw);
return 0;
}
/**
* ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
* @hw: pointer to hardware structure
* @san_mac_offset: SAN MAC address offset
*
* This function will read the EEPROM location for the SAN MAC address
* pointer, and returns the value at that location. This is used in both
* get and set mac_addr routines.
**/
static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
u16 *san_mac_offset)
{
/*
* First read the EEPROM pointer to see if the MAC addresses are
* available.
*/
hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset);
return 0;
}
/**
* ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
* @hw: pointer to hardware structure
* @san_mac_addr: SAN MAC address
*
* Reads the SAN MAC address from the EEPROM, if it's available. This is
* per-port, so set_lan_id() must be called before reading the addresses.
* set_lan_id() is called by identify_sfp(), but this cannot be relied
* upon for non-SFP connections, so we must call it here.
**/
s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
{
u16 san_mac_data, san_mac_offset;
u8 i;
/*
* First read the EEPROM pointer to see if the MAC addresses are
* available. If they're not, no point in calling set_lan_id() here.
*/
ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
/*
* No addresses available in this EEPROM. It's not an
* error though, so just wipe the local address and return.
*/
for (i = 0; i < 6; i++)
san_mac_addr[i] = 0xFF;
goto san_mac_addr_out;
}
/* make sure we know which port we need to program */
hw->mac.ops.set_lan_id(hw);
/* apply the port offset to the address offset */
(hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
(san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
for (i = 0; i < 3; i++) {
hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
san_mac_addr[i * 2] = (u8)(san_mac_data);
san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
san_mac_offset++;
}
san_mac_addr_out:
return 0;
}
/**
* ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
* @hw: pointer to hardware structure
*
* Read PCIe configuration space, and get the MSI-X vector count from
* the capabilities table.
**/
u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
{
struct ixgbe_adapter *adapter = hw->back;
u16 msix_count;
pci_read_config_word(adapter->pdev, IXGBE_PCIE_MSIX_82599_CAPS,
&msix_count);
msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
/* MSI-X count is zero-based in HW, so increment to give proper value */
msix_count++;
return msix_count;
}
/**
* ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
* @hw: pointer to hardware struct
* @rar: receive address register index to disassociate
* @vmdq: VMDq pool index to remove from the rar
**/
s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
{
u32 mpsar_lo, mpsar_hi;
u32 rar_entries = hw->mac.num_rar_entries;
/* Make sure we are using a valid rar index range */
if (rar >= rar_entries) {
hw_dbg(hw, "RAR index %d is out of range.\n", rar);
return IXGBE_ERR_INVALID_ARGUMENT;
}
mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
if (!mpsar_lo && !mpsar_hi)
goto done;
if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
if (mpsar_lo) {
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
mpsar_lo = 0;
}
if (mpsar_hi) {
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
mpsar_hi = 0;
}
} else if (vmdq < 32) {
mpsar_lo &= ~(1 << vmdq);
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
} else {
mpsar_hi &= ~(1 << (vmdq - 32));
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
}
/* was that the last pool using this rar? */
if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
hw->mac.ops.clear_rar(hw, rar);
done:
return 0;
}
/**
* ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
* @hw: pointer to hardware struct
* @rar: receive address register index to associate with a VMDq index
* @vmdq: VMDq pool index
**/
s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
{
u32 mpsar;
u32 rar_entries = hw->mac.num_rar_entries;
/* Make sure we are using a valid rar index range */
if (rar >= rar_entries) {
hw_dbg(hw, "RAR index %d is out of range.\n", rar);
return IXGBE_ERR_INVALID_ARGUMENT;
}
if (vmdq < 32) {
mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
mpsar |= 1 << vmdq;
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
} else {
mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
mpsar |= 1 << (vmdq - 32);
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
}
return 0;
}
/**
* ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
* @hw: pointer to hardware structure
**/
s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
{
int i;
for (i = 0; i < 128; i++)
IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
return 0;
}
/**
* ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
* @hw: pointer to hardware structure
* @vlan: VLAN id to write to VLAN filter
*
* return the VLVF index where this VLAN id should be placed
*
**/
static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
{
u32 bits = 0;
u32 first_empty_slot = 0;
s32 regindex;
/* short cut the special case */
if (vlan == 0)
return 0;
/*
* Search for the vlan id in the VLVF entries. Save off the first empty
* slot found along the way
*/
for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
if (!bits && !(first_empty_slot))
first_empty_slot = regindex;
else if ((bits & 0x0FFF) == vlan)
break;
}
/*
* If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
* in the VLVF. Else use the first empty VLVF register for this
* vlan id.
*/
if (regindex >= IXGBE_VLVF_ENTRIES) {
if (first_empty_slot)
regindex = first_empty_slot;
else {
hw_dbg(hw, "No space in VLVF.\n");
regindex = IXGBE_ERR_NO_SPACE;
}
}
return regindex;
}
/**
* ixgbe_set_vfta_generic - Set VLAN filter table
* @hw: pointer to hardware structure
* @vlan: VLAN id to write to VLAN filter
* @vind: VMDq output index that maps queue to VLAN id in VFVFB
* @vlan_on: boolean flag to turn on/off VLAN in VFVF
*
* Turn on/off specified VLAN in the VLAN filter table.
**/
s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on)
{
s32 regindex;
u32 bitindex;
u32 vfta;
u32 bits;
u32 vt;
u32 targetbit;
bool vfta_changed = false;
if (vlan > 4095)
return IXGBE_ERR_PARAM;
/*
* this is a 2 part operation - first the VFTA, then the
* VLVF and VLVFB if VT Mode is set
* We don't write the VFTA until we know the VLVF part succeeded.
*/
/* Part 1
* The VFTA is a bitstring made up of 128 32-bit registers
* that enable the particular VLAN id, much like the MTA:
* bits[11-5]: which register
* bits[4-0]: which bit in the register
*/
regindex = (vlan >> 5) & 0x7F;
bitindex = vlan & 0x1F;
targetbit = (1 << bitindex);
vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
if (vlan_on) {
if (!(vfta & targetbit)) {
vfta |= targetbit;
vfta_changed = true;
}
} else {
if ((vfta & targetbit)) {
vfta &= ~targetbit;
vfta_changed = true;
}
}
/* Part 2
* If VT Mode is set
* Either vlan_on
* make sure the vlan is in VLVF
* set the vind bit in the matching VLVFB
* Or !vlan_on
* clear the pool bit and possibly the vind
*/
vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
if (vt & IXGBE_VT_CTL_VT_ENABLE) {
s32 vlvf_index;
vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
if (vlvf_index < 0)
return vlvf_index;
if (vlan_on) {
/* set the pool bit */
if (vind < 32) {
bits = IXGBE_READ_REG(hw,
IXGBE_VLVFB(vlvf_index*2));
bits |= (1 << vind);
IXGBE_WRITE_REG(hw,
IXGBE_VLVFB(vlvf_index*2),
bits);
} else {
bits = IXGBE_READ_REG(hw,
IXGBE_VLVFB((vlvf_index*2)+1));
bits |= (1 << (vind-32));
IXGBE_WRITE_REG(hw,
IXGBE_VLVFB((vlvf_index*2)+1),
bits);
}
} else {
/* clear the pool bit */
if (vind < 32) {
bits = IXGBE_READ_REG(hw,
IXGBE_VLVFB(vlvf_index*2));
bits &= ~(1 << vind);
IXGBE_WRITE_REG(hw,
IXGBE_VLVFB(vlvf_index*2),
bits);
bits |= IXGBE_READ_REG(hw,
IXGBE_VLVFB((vlvf_index*2)+1));
} else {
bits = IXGBE_READ_REG(hw,
IXGBE_VLVFB((vlvf_index*2)+1));
bits &= ~(1 << (vind-32));
IXGBE_WRITE_REG(hw,
IXGBE_VLVFB((vlvf_index*2)+1),
bits);
bits |= IXGBE_READ_REG(hw,
IXGBE_VLVFB(vlvf_index*2));
}
}
/*
* If there are still bits set in the VLVFB registers
* for the VLAN ID indicated we need to see if the
* caller is requesting that we clear the VFTA entry bit.
* If the caller has requested that we clear the VFTA
* entry bit but there are still pools/VFs using this VLAN
* ID entry then ignore the request. We're not worried
* about the case where we're turning the VFTA VLAN ID
* entry bit on, only when requested to turn it off as
* there may be multiple pools and/or VFs using the
* VLAN ID entry. In that case we cannot clear the
* VFTA bit until all pools/VFs using that VLAN ID have also
* been cleared. This will be indicated by "bits" being
* zero.
*/
if (bits) {
IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
(IXGBE_VLVF_VIEN | vlan));
if (!vlan_on) {
/* someone wants to clear the vfta entry
* but some pools/VFs are still using it.
* Ignore it. */
vfta_changed = false;
}
}
else
IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
}
if (vfta_changed)
IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
return 0;
}
/**
* ixgbe_clear_vfta_generic - Clear VLAN filter table
* @hw: pointer to hardware structure
*
* Clears the VLAN filer table, and the VMDq index associated with the filter
**/
s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
{
u32 offset;
for (offset = 0; offset < hw->mac.vft_size; offset++)
IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset*2), 0);
IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset*2)+1), 0);
}
return 0;
}
/**
* ixgbe_check_mac_link_generic - Determine link and speed status
* @hw: pointer to hardware structure
* @speed: pointer to link speed
* @link_up: true when link is up
* @link_up_wait_to_complete: bool used to wait for link up or not
*
* Reads the links register to determine if link is up and the current speed
**/
s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
bool *link_up, bool link_up_wait_to_complete)
{
u32 links_reg, links_orig;
u32 i;
/* clear the old state */
links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
if (links_orig != links_reg) {
hw_dbg(hw, "LINKS changed from %08X to %08X\n",
links_orig, links_reg);
}
if (link_up_wait_to_complete) {
for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
if (links_reg & IXGBE_LINKS_UP) {
*link_up = true;
break;
} else {
*link_up = false;
}
msleep(100);
links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
}
} else {
if (links_reg & IXGBE_LINKS_UP)
*link_up = true;
else
*link_up = false;
}
if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
IXGBE_LINKS_SPEED_10G_82599)
*speed = IXGBE_LINK_SPEED_10GB_FULL;
else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
IXGBE_LINKS_SPEED_1G_82599)
*speed = IXGBE_LINK_SPEED_1GB_FULL;
else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
IXGBE_LINKS_SPEED_100_82599)
*speed = IXGBE_LINK_SPEED_100_FULL;
else
*speed = IXGBE_LINK_SPEED_UNKNOWN;
/* if link is down, zero out the current_mode */
if (*link_up == false) {
hw->fc.current_mode = ixgbe_fc_none;
hw->fc.fc_was_autonegged = false;
}
return 0;
}
/**
* ixgbe_get_wwn_prefix_generic Get alternative WWNN/WWPN prefix from
* the EEPROM
* @hw: pointer to hardware structure
* @wwnn_prefix: the alternative WWNN prefix
* @wwpn_prefix: the alternative WWPN prefix
*
* This function will read the EEPROM from the alternative SAN MAC address
* block to check the support for the alternative WWNN/WWPN prefix support.
**/
s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
u16 *wwpn_prefix)
{
u16 offset, caps;
u16 alt_san_mac_blk_offset;
/* clear output first */
*wwnn_prefix = 0xFFFF;
*wwpn_prefix = 0xFFFF;
/* check if alternative SAN MAC is supported */
hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
&alt_san_mac_blk_offset);
if ((alt_san_mac_blk_offset == 0) ||
(alt_san_mac_blk_offset == 0xFFFF))
goto wwn_prefix_out;
/* check capability in alternative san mac address block */
offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
hw->eeprom.ops.read(hw, offset, &caps);
if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
goto wwn_prefix_out;
/* get the corresponding prefix for WWNN/WWPN */
offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
hw->eeprom.ops.read(hw, offset, wwnn_prefix);
offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
hw->eeprom.ops.read(hw, offset, wwpn_prefix);
wwn_prefix_out:
return 0;
}
/**
* ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
* control
* @hw: pointer to hardware structure
*
* There are several phys that do not support autoneg flow control. This
* function check the device id to see if the associated phy supports
* autoneg flow control.
**/
static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
{
switch (hw->device_id) {
case IXGBE_DEV_ID_X540T:
return 0;
case IXGBE_DEV_ID_82599_T3_LOM:
return 0;
default:
return IXGBE_ERR_FC_NOT_SUPPORTED;
}
}
/**
* ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
* @hw: pointer to hardware structure
* @enable: enable or disable switch for anti-spoofing
* @pf: Physical Function pool - do not enable anti-spoofing for the PF
*
**/
void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
{
int j;
int pf_target_reg = pf >> 3;
int pf_target_shift = pf % 8;
u32 pfvfspoof = 0;
if (hw->mac.type == ixgbe_mac_82598EB)
return;
if (enable)
pfvfspoof = IXGBE_SPOOF_MACAS_MASK;
/*
* PFVFSPOOF register array is size 8 with 8 bits assigned to
* MAC anti-spoof enables in each register array element.
*/
for (j = 0; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
/* If not enabling anti-spoofing then done */
if (!enable)
return;
/*
* The PF should be allowed to spoof so that it can support
* emulation mode NICs. Reset the bit assigned to the PF
*/
pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg));
pfvfspoof ^= (1 << pf_target_shift);
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg), pfvfspoof);
}
/**
* ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
* @hw: pointer to hardware structure
* @enable: enable or disable switch for VLAN anti-spoofing
* @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
*
**/
void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
{
int vf_target_reg = vf >> 3;
int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
u32 pfvfspoof;
if (hw->mac.type == ixgbe_mac_82598EB)
return;
pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
if (enable)
pfvfspoof |= (1 << vf_target_shift);
else
pfvfspoof &= ~(1 << vf_target_shift);
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
}
/**
* ixgbe_get_device_caps_generic - Get additional device capabilities
* @hw: pointer to hardware structure
* @device_caps: the EEPROM word with the extra device capabilities
*
* This function will read the EEPROM location for the device capabilities,
* and return the word through device_caps.
**/
s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
{
hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
return 0;
}
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.